diff options
author | Denys Vlasenko | 2022-02-10 15:38:10 +0100 |
---|---|---|
committer | Denys Vlasenko | 2022-02-10 15:38:10 +0100 |
commit | 6f56fa17131b3cbb84e887c6c5fb202f2492169e (patch) | |
tree | 3d0bbb352f7e5d56cfb7e529922a77d7176b2e42 | |
parent | 6a6c1c0ea91edeeb18736190feb5a7278d3d1141 (diff) | |
download | busybox-6f56fa17131b3cbb84e887c6c5fb202f2492169e.zip busybox-6f56fa17131b3cbb84e887c6c5fb202f2492169e.tar.gz |
libbb/sha: improve comments
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
-rw-r--r-- | libbb/hash_md5_sha256_x86-32_shaNI.S | 18 | ||||
-rw-r--r-- | libbb/hash_md5_sha256_x86-64_shaNI.S | 19 | ||||
-rw-r--r-- | libbb/hash_md5_sha_x86-32_shaNI.S | 2 | ||||
-rw-r--r-- | libbb/hash_md5_sha_x86-64_shaNI.S | 2 |
4 files changed, 20 insertions, 21 deletions
diff --git a/libbb/hash_md5_sha256_x86-32_shaNI.S b/libbb/hash_md5_sha256_x86-32_shaNI.S index 413e2df..4b33449 100644 --- a/libbb/hash_md5_sha256_x86-32_shaNI.S +++ b/libbb/hash_md5_sha256_x86-32_shaNI.S @@ -4,7 +4,7 @@ // We use shorter insns, even though they are for "wrong" // data type (fp, not int). // For Intel, there is no penalty for doing it at all -// (CPUs which do have such penalty do not support SHA1 insns). +// (CPUs which do have such penalty do not support SHA insns). // For AMD, the penalty is one extra cycle // (allegedly: I failed to find measurable difference). @@ -39,12 +39,13 @@ .balign 8 # allow decoders to fetch at least 2 first insns sha256_process_block64_shaNI: - movu128 76+0*16(%eax), XMMTMP /* DCBA (msb-to-lsb: 3,2,1,0) */ - movu128 76+1*16(%eax), STATE1 /* HGFE */ + movu128 76+0*16(%eax), XMMTMP /* ABCD (little-endian dword order) */ + movu128 76+1*16(%eax), STATE1 /* EFGH */ /* shufps takes dwords 0,1 from *2nd* operand, and dwords 2,3 from 1st one */ mova128 STATE1, STATE0 - shufps SHUF(1,0,1,0), XMMTMP, STATE0 /* ABEF */ - shufps SHUF(3,2,3,2), XMMTMP, STATE1 /* CDGH */ + /* --- -------------- ABCD -- EFGH */ + shufps SHUF(1,0,1,0), XMMTMP, STATE0 /* FEBA */ + shufps SHUF(3,2,3,2), XMMTMP, STATE1 /* HGDC */ /* XMMTMP holds flip mask from here... */ mova128 PSHUFFLE_BSWAP32_FLIP_MASK, XMMTMP @@ -232,12 +233,11 @@ sha256_process_block64_shaNI: sha256rnds2 STATE1, STATE0 /* Write hash values back in the correct order */ - /* STATE0: ABEF (msb-to-lsb: 3,2,1,0) */ - /* STATE1: CDGH */ mova128 STATE0, XMMTMP /* shufps takes dwords 0,1 from *2nd* operand, and dwords 2,3 from 1st one */ - shufps SHUF(3,2,3,2), STATE1, STATE0 /* DCBA */ - shufps SHUF(1,0,1,0), STATE1, XMMTMP /* HGFE */ + /* --- -------------- HGDC -- FEBA */ + shufps SHUF(3,2,3,2), STATE1, STATE0 /* ABCD */ + shufps SHUF(1,0,1,0), STATE1, XMMTMP /* EFGH */ /* add current hash values to previous ones */ movu128 76+1*16(%eax), STATE1 paddd XMMTMP, STATE1 diff --git a/libbb/hash_md5_sha256_x86-64_shaNI.S b/libbb/hash_md5_sha256_x86-64_shaNI.S index c246762..5ed80c2 100644 --- a/libbb/hash_md5_sha256_x86-64_shaNI.S +++ b/libbb/hash_md5_sha256_x86-64_shaNI.S @@ -4,7 +4,7 @@ // We use shorter insns, even though they are for "wrong" // data type (fp, not int). // For Intel, there is no penalty for doing it at all -// (CPUs which do have such penalty do not support SHA1 insns). +// (CPUs which do have such penalty do not support SHA insns). // For AMD, the penalty is one extra cycle // (allegedly: I failed to find measurable difference). @@ -42,12 +42,13 @@ .balign 8 # allow decoders to fetch at least 2 first insns sha256_process_block64_shaNI: - movu128 80+0*16(%rdi), XMMTMP /* DCBA (msb-to-lsb: 3,2,1,0) */ - movu128 80+1*16(%rdi), STATE1 /* HGFE */ + movu128 80+0*16(%rdi), XMMTMP /* ABCD (little-endian dword order) */ + movu128 80+1*16(%rdi), STATE1 /* EFGH */ /* shufps takes dwords 0,1 from *2nd* operand, and dwords 2,3 from 1st one */ mova128 STATE1, STATE0 - shufps SHUF(1,0,1,0), XMMTMP, STATE0 /* ABEF */ - shufps SHUF(3,2,3,2), XMMTMP, STATE1 /* CDGH */ + /* --- -------------- ABCD -- EFGH */ + shufps SHUF(1,0,1,0), XMMTMP, STATE0 /* FEBA */ + shufps SHUF(3,2,3,2), XMMTMP, STATE1 /* HGDC */ /* XMMTMP holds flip mask from here... */ mova128 PSHUFFLE_BSWAP32_FLIP_MASK(%rip), XMMTMP @@ -243,13 +244,11 @@ sha256_process_block64_shaNI: paddd CDGH_SAVE, STATE1 /* Write hash values back in the correct order */ - /* STATE0: ABEF (msb-to-lsb: 3,2,1,0) */ - /* STATE1: CDGH */ mova128 STATE0, XMMTMP /* shufps takes dwords 0,1 from *2nd* operand, and dwords 2,3 from 1st one */ - shufps SHUF(3,2,3,2), STATE1, STATE0 /* DCBA */ - shufps SHUF(1,0,1,0), STATE1, XMMTMP /* HGFE */ - + /* --- -------------- HGDC -- FEBA */ + shufps SHUF(3,2,3,2), STATE1, STATE0 /* ABCD */ + shufps SHUF(1,0,1,0), STATE1, XMMTMP /* EFGH */ movu128 STATE0, 80+0*16(%rdi) movu128 XMMTMP, 80+1*16(%rdi) diff --git a/libbb/hash_md5_sha_x86-32_shaNI.S b/libbb/hash_md5_sha_x86-32_shaNI.S index afca98a..c7fb243 100644 --- a/libbb/hash_md5_sha_x86-32_shaNI.S +++ b/libbb/hash_md5_sha_x86-32_shaNI.S @@ -4,7 +4,7 @@ // We use shorter insns, even though they are for "wrong" // data type (fp, not int). // For Intel, there is no penalty for doing it at all -// (CPUs which do have such penalty do not support SHA1 insns). +// (CPUs which do have such penalty do not support SHA insns). // For AMD, the penalty is one extra cycle // (allegedly: I failed to find measurable difference). diff --git a/libbb/hash_md5_sha_x86-64_shaNI.S b/libbb/hash_md5_sha_x86-64_shaNI.S index 54d1227..c13cdec 100644 --- a/libbb/hash_md5_sha_x86-64_shaNI.S +++ b/libbb/hash_md5_sha_x86-64_shaNI.S @@ -4,7 +4,7 @@ // We use shorter insns, even though they are for "wrong" // data type (fp, not int). // For Intel, there is no penalty for doing it at all -// (CPUs which do have such penalty do not support SHA1 insns). +// (CPUs which do have such penalty do not support SHA insns). // For AMD, the penalty is one extra cycle // (allegedly: I failed to find measurable difference). |