diff options
-rw-r--r-- | libbb/hash_md5_sha_x86-64.S | 8 | ||||
-rwxr-xr-x | libbb/hash_md5_sha_x86-64.S.sh | 14 |
2 files changed, 11 insertions, 11 deletions
diff --git a/libbb/hash_md5_sha_x86-64.S b/libbb/hash_md5_sha_x86-64.S index f0daa30..1d55b91 100644 --- a/libbb/hash_md5_sha_x86-64.S +++ b/libbb/hash_md5_sha_x86-64.S @@ -71,8 +71,8 @@ sha1_process_block64: movq 4*10(%rdi), %r12 bswapq %r11 bswapq %r12 - rolq $32, %r11 # r11 = W[9]:W[8] - rolq $32, %r12 # r12 = W[11]:W[10] + rolq $32, %r11 # r11 = W[9]:W[8] + rolq $32, %r12 # r12 = W[11]:W[10] movq %r11, %xmm2 movq %r12, %xmm4 punpcklqdq %xmm4, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11]) @@ -81,8 +81,8 @@ sha1_process_block64: movq 4*14(%rdi), %r14 bswapq %r13 bswapq %r14 - rolq $32, %r13 # r13 = W[13]:W[12] - rolq $32, %r14 # r14 = W[15]:W[14] + rolq $32, %r13 # r13 = W[13]:W[12] + rolq $32, %r14 # r14 = W[15]:W[14] movq %r13, %xmm3 movq %r14, %xmm4 punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15]) diff --git a/libbb/hash_md5_sha_x86-64.S.sh b/libbb/hash_md5_sha_x86-64.S.sh index 57e77b1..40c979d 100755 --- a/libbb/hash_md5_sha_x86-64.S.sh +++ b/libbb/hash_md5_sha_x86-64.S.sh @@ -99,7 +99,7 @@ INTERLEAVE() { ) } -# movaps bswap32_mask(%rip), $xmmT1 +# movaps bswap32_mask(%rip), $xmmT1 # Load W[] to xmm0..3, byteswapping on the fly. # For iterations 0..15, we pass RCONST+W[] in rsi,r8..r14 # for use in RD1As instead of spilling them to stack. @@ -110,8 +110,8 @@ INTERLEAVE() { # movaps %xmm0, $xmmT2 # paddd $xmmRCONST, $xmmT2 # movq $xmmT2, %rsi -# #pextrq \$1, $xmmT2, %r8 #SSE4.1 insn -# #movhpd $xmmT2, %r8 #can only move to mem, not to reg +# #pextrq \$1, $xmmT2, %r8 #SSE4.1 insn +# #movhpd $xmmT2, %r8 #can only move to mem, not to reg # shufps \$0x0e, $xmmT2, $xmmT2 # have to use two-insn sequence # movq $xmmT2, %r8 # instead # ... @@ -197,8 +197,8 @@ sha1_process_block64: movq 4*10(%rdi), %r12 bswapq %r11 bswapq %r12 - rolq \$32, %r11 # r11 = W[9]:W[8] - rolq \$32, %r12 # r12 = W[11]:W[10] + rolq \$32, %r11 # r11 = W[9]:W[8] + rolq \$32, %r12 # r12 = W[11]:W[10] movq %r11, %xmm2 movq %r12, $xmmT1 punpcklqdq $xmmT1, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11]) @@ -207,8 +207,8 @@ sha1_process_block64: movq 4*14(%rdi), %r14 bswapq %r13 bswapq %r14 - rolq \$32, %r13 # r13 = W[13]:W[12] - rolq \$32, %r14 # r14 = W[15]:W[14] + rolq \$32, %r13 # r13 = W[13]:W[12] + rolq \$32, %r14 # r14 = W[15]:W[14] movq %r13, %xmm3 movq %r14, $xmmT1 punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15]) |