summaryrefslogtreecommitdiff
path: root/libbb
diff options
context:
space:
mode:
authorDenys Vlasenko2022-01-03 01:57:29 +0100
committerDenys Vlasenko2022-01-03 12:57:36 +0100
commit05fd13ebec869fc5e6f226481a2405a2685e8db1 (patch)
treebd112a629c547af9bb2a974d1b47fd5f193c3813 /libbb
parent5c0c5582319a5123635c9fd62f8e99ef01cceb3f (diff)
downloadbusybox-05fd13ebec869fc5e6f226481a2405a2685e8db1.zip
busybox-05fd13ebec869fc5e6f226481a2405a2685e8db1.tar.gz
libbb/sha1: x86_64 version: move to a separate .S file, no code changes
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
Diffstat (limited to 'libbb')
-rw-r--r--libbb/Kbuild.src1
-rw-r--r--libbb/hash_md5_sha.c392
-rw-r--r--libbb/hash_md5_sha_x86-64.S1349
3 files changed, 1353 insertions, 389 deletions
diff --git a/libbb/Kbuild.src b/libbb/Kbuild.src
index 2fa2398..19b8aad 100644
--- a/libbb/Kbuild.src
+++ b/libbb/Kbuild.src
@@ -56,6 +56,7 @@ lib-y += login.o
lib-y += make_directory.o
lib-y += makedev.o
lib-y += hash_md5_sha.o
+lib-y += hash_md5_sha_x86-64.o
# Alternative (disabled) MD5 implementation
#lib-y += hash_md5prime.o
lib-y += messages.o
diff --git a/libbb/hash_md5_sha.c b/libbb/hash_md5_sha.c
index 7eca3de..ee19c1c 100644
--- a/libbb/hash_md5_sha.c
+++ b/libbb/hash_md5_sha.c
@@ -696,397 +696,11 @@ static void FAST_FUNC sha1_process_block64(sha1_ctx_t *ctx UNUSED_PARAM)
#undef RCONST
}
# elif defined(__GNUC__) && defined(__x86_64__)
-static void FAST_FUNC sha1_process_block64(sha1_ctx_t *ctx UNUSED_PARAM)
-{
- BUILD_BUG_ON(offsetof(sha1_ctx_t, hash) != 80);
- asm(
-"\n\
- pushq %r15 # \n\
- pushq %r14 # \n\
- pushq %r13 # \n\
- pushq %r12 # \n\
- pushq %rbp # \n\
- pushq %rbx # \n\
- pushq %rdi # we need ctx at the end \n\
- \n\
-#Register and stack use: \n\
-# eax..edx: a..d \n\
-# ebp: e \n\
-# esi,edi: temps \n\
-# -32+4*n(%rsp),r8...r15: W[0..7,8..15] \n\
- .macro loadW n,r \n\
- .if \\n == 0 \n\
- movl -32+4*0(%rsp),\\r \n\
- .endif \n\
- .if \\n == 1 \n\
- movl -32+4*1(%rsp),\\r \n\
- .endif \n\
- .if \\n == 2 \n\
- movl -32+4*2(%rsp),\\r \n\
- .endif \n\
- .if \\n == 3 \n\
- movl -32+4*3(%rsp),\\r \n\
- .endif \n\
- .if \\n == 4 \n\
- movl -32+4*4(%rsp),\\r \n\
- .endif \n\
- .if \\n == 5 \n\
- movl -32+4*5(%rsp),\\r \n\
- .endif \n\
- .if \\n == 6 \n\
- movl -32+4*6(%rsp),\\r \n\
- .endif \n\
- .if \\n == 7 \n\
- movl -32+4*7(%rsp),\\r \n\
- .endif \n\
- .if \\n == 8 \n\
- movl %r8d,\\r \n\
- .endif \n\
- .if \\n == 9 \n\
- movl %r9d,\\r \n\
- .endif \n\
- .if \\n == 10 \n\
- movl %r10d,\\r \n\
- .endif \n\
- .if \\n == 11 \n\
- movl %r11d,\\r \n\
- .endif \n\
- .if \\n == 12 \n\
- movl %r12d,\\r \n\
- .endif \n\
- .if \\n == 13 \n\
- movl %r13d,\\r \n\
- .endif \n\
- .if \\n == 14 \n\
- movl %r14d,\\r \n\
- .endif \n\
- .if \\n == 15 \n\
- movl %r15d,\\r \n\
- .endif \n\
- .endm \n\
- \n\
- .macro storeW r,n \n\
- .if \\n == 0 \n\
- movl \\r,-32+4*0(%rsp) \n\
- .endif \n\
- .if \\n == 1 \n\
- movl \\r,-32+4*1(%rsp) \n\
- .endif \n\
- .if \\n == 2 \n\
- movl \\r,-32+4*2(%rsp) \n\
- .endif \n\
- .if \\n == 3 \n\
- movl \\r,-32+4*3(%rsp) \n\
- .endif \n\
- .if \\n == 4 \n\
- movl \\r,-32+4*4(%rsp) \n\
- .endif \n\
- .if \\n == 5 \n\
- movl \\r,-32+4*5(%rsp) \n\
- .endif \n\
- .if \\n == 6 \n\
- movl \\r,-32+4*6(%rsp) \n\
- .endif \n\
- .if \\n == 7 \n\
- movl \\r,-32+4*7(%rsp) \n\
- .endif \n\
- .if \\n == 8 \n\
- movl \\r,%r8d \n\
- .endif \n\
- .if \\n == 9 \n\
- movl \\r,%r9d \n\
- .endif \n\
- .if \\n == 10 \n\
- movl \\r,%r10d \n\
- .endif \n\
- .if \\n == 11 \n\
- movl \\r,%r11d \n\
- .endif \n\
- .if \\n == 12 \n\
- movl \\r,%r12d \n\
- .endif \n\
- .if \\n == 13 \n\
- movl \\r,%r13d \n\
- .endif \n\
- .if \\n == 14 \n\
- movl \\r,%r14d \n\
- .endif \n\
- .if \\n == 15 \n\
- movl \\r,%r15d \n\
- .endif \n\
- .endm \n\
- \n\
- .macro xorW n,r \n\
- .if \\n == 0 \n\
- xorl -32+4*0(%rsp),\\r \n\
- .endif \n\
- .if \\n == 1 \n\
- xorl -32+4*1(%rsp),\\r \n\
- .endif \n\
- .if \\n == 2 \n\
- xorl -32+4*2(%rsp),\\r \n\
- .endif \n\
- .if \\n == 3 \n\
- xorl -32+4*3(%rsp),\\r \n\
- .endif \n\
- .if \\n == 4 \n\
- xorl -32+4*4(%rsp),\\r \n\
- .endif \n\
- .if \\n == 5 \n\
- xorl -32+4*5(%rsp),\\r \n\
- .endif \n\
- .if \\n == 6 \n\
- xorl -32+4*6(%rsp),\\r \n\
- .endif \n\
- .if \\n == 7 \n\
- xorl -32+4*7(%rsp),\\r \n\
- .endif \n\
- .if \\n == 8 \n\
- xorl %r8d,\\r \n\
- .endif \n\
- .if \\n == 9 \n\
- xorl %r9d,\\r \n\
- .endif \n\
- .if \\n == 10 \n\
- xorl %r10d,\\r \n\
- .endif \n\
- .if \\n == 11 \n\
- xorl %r11d,\\r \n\
- .endif \n\
- .if \\n == 12 \n\
- xorl %r12d,\\r \n\
- .endif \n\
- .if \\n == 13 \n\
- xorl %r13d,\\r \n\
- .endif \n\
- .if \\n == 14 \n\
- xorl %r14d,\\r \n\
- .endif \n\
- .if \\n == 15 \n\
- xorl %r15d,\\r \n\
- .endif \n\
- .endm \n\
- \n\
- movq 4*8(%rdi), %r8 \n\
- bswap %r8 \n\
- movl %r8d, %r9d \n\
- shrq $32, %r8 \n\
- movq 4*10(%rdi), %r10 \n\
- bswap %r10 \n\
- movl %r10d, %r11d \n\
- shrq $32, %r10 \n\
- movq 4*12(%rdi), %r12 \n\
- bswap %r12 \n\
- movl %r12d, %r13d \n\
- shrq $32, %r12 \n\
- movq 4*14(%rdi), %r14 \n\
- bswap %r14 \n\
- movl %r14d, %r15d \n\
- shrq $32, %r14 \n\
- \n\
- movl $3, %eax \n\
-1: \n\
- movq (%rdi,%rax,8), %rsi \n\
- bswap %rsi \n\
- rolq $32, %rsi \n\
- movq %rsi, -32(%rsp,%rax,8) \n\
- decl %eax \n\
- jns 1b \n\
- movl 80(%rdi), %eax # a = ctx->hash[0] \n\
- movl 84(%rdi), %ebx # b = ctx->hash[1] \n\
- movl 88(%rdi), %ecx # c = ctx->hash[2] \n\
- movl 92(%rdi), %edx # d = ctx->hash[3] \n\
- movl 96(%rdi), %ebp # e = ctx->hash[4] \n\
-"
-#define RD1As(a,b,c,d,e, n, RCONST) \
-"\n\
- ##loadW "n", %esi # n=0, W[0] already in %esi \n\
- movl %e"c", %edi # c \n\
- xorl %e"d", %edi # ^d \n\
- andl %e"b", %edi # &b \n\
- xorl %e"d", %edi # (((c ^ d) & b) ^ d) \n\
- leal "RCONST"(%r"e",%rsi), %e"e" # e += RCONST + W[n] \n\
- addl %edi, %e"e" # e += (((c ^ d) & b) ^ d) \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD1Bs(a,b,c,d,e, n, RCONST) \
-"\n\
- loadW "n", %esi # W[n] \n\
- movl %e"c", %edi # c \n\
- xorl %e"d", %edi # ^d \n\
- andl %e"b", %edi # &b \n\
- xorl %e"d", %edi # (((c ^ d) & b) ^ d) \n\
- leal "RCONST"(%r"e",%rsi), %e"e" # e += RCONST + W[n] \n\
- addl %edi, %e"e" # e += (((c ^ d) & b) ^ d) \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD1Cs(a,b,c,d,e, n, RCONST) \
-"\n\
- movl %e"c", %edi # c \n\
- xorl %e"d", %edi # ^d \n\
- andl %e"b", %edi # &b \n\
- xorl %e"d", %edi # (((c ^ d) & b) ^ d) \n\
- leal "RCONST"(%r"e",%r"n"), %e"e" # e += RCONST + W[n] \n\
- addl %edi, %e"e" # e += (((c ^ d) & b) ^ d) \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD1Ds(a,b,c,d,e, n13,n8,n2,n, RCONST) \
-"\n\
- loadW "n13", %esi # W[(n+13) & 15] \n\
- xorW "n8", %esi # ^W[(n+8) & 15] \n\
- xorW "n2", %esi # ^W[(n+2) & 15] \n\
- xorW "n", %esi # ^W[n & 15] \n\
- roll %esi # \n\
- storeW %esi, "n" # store to W[n & 15] \n\
- movl %e"c", %edi # c \n\
- xorl %e"d", %edi # ^d \n\
- andl %e"b", %edi # &b \n\
- xorl %e"d", %edi # (((c ^ d) & b) ^ d) \n\
- leal "RCONST"(%r"e",%rsi), %e"e" # e += RCONST + mixed_W \n\
- addl %edi, %e"e" # e += (((c ^ d) & b) ^ d) \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD1A(a,b,c,d,e, n) RD1As(STR(a),STR(b),STR(c),STR(d),STR(e), STR(n), STR(RCONST))
-#define RD1B(a,b,c,d,e, n) RD1Bs(STR(a),STR(b),STR(c),STR(d),STR(e), STR(n), STR(RCONST))
-#define RD1C(a,b,c,d,e, n) RD1Cs(STR(a),STR(b),STR(c),STR(d),STR(e), STR(n), STR(RCONST))
-#define RD1D(a,b,c,d,e, n) RD1Ds(STR(a),STR(b),STR(c),STR(d),STR(e), STR(((n+13)&15)), STR(((n+8)&15)), STR(((n+2)&15)), STR(((n)&15)), STR(RCONST))
-#undef RCONST
-#define RCONST 0x5A827999
- RD1A(ax,bx,cx,dx,bp, 0) RD1B(bp,ax,bx,cx,dx, 1) RD1B(dx,bp,ax,bx,cx, 2) RD1B(cx,dx,bp,ax,bx, 3) RD1B(bx,cx,dx,bp,ax, 4)
- RD1B(ax,bx,cx,dx,bp, 5) RD1B(bp,ax,bx,cx,dx, 6) RD1B(dx,bp,ax,bx,cx, 7) RD1C(cx,dx,bp,ax,bx, 8) RD1C(bx,cx,dx,bp,ax, 9)
- RD1C(ax,bx,cx,dx,bp,10) RD1C(bp,ax,bx,cx,dx,11) RD1C(dx,bp,ax,bx,cx,12) RD1C(cx,dx,bp,ax,bx,13) RD1C(bx,cx,dx,bp,ax,14)
- RD1C(ax,bx,cx,dx,bp,15) RD1D(bp,ax,bx,cx,dx,16) RD1D(dx,bp,ax,bx,cx,17) RD1D(cx,dx,bp,ax,bx,18) RD1D(bx,cx,dx,bp,ax,19)
-#define RD2s(a,b,c,d,e, n13,n8,n2,n, RCONST) \
-"\n\
- loadW "n13", %esi # W[(n+13) & 15] \n\
- xorW "n8", %esi # ^W[(n+8) & 15] \n\
- xorW "n2", %esi # ^W[(n+2) & 15] \n\
- xorW "n", %esi # ^W[n & 15] \n\
- roll %esi # \n\
- storeW %esi, "n" # store to W[n & 15] \n\
- movl %e"c", %edi # c \n\
- xorl %e"d", %edi # ^d \n\
- xorl %e"b", %edi # ^b \n\
- leal "RCONST"(%r"e",%rsi), %e"e" # e += RCONST + mixed_W \n\
- addl %edi, %e"e" # e += (c ^ d ^ b) \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD2(a,b,c,d,e, n) RD2s(STR(a),STR(b),STR(c),STR(d),STR(e), STR(((20+n+13)&15)), STR(((20+n+8)&15)), STR(((20+n+2)&15)), STR(((20+n)&15)), STR(RCONST))
-#undef RCONST
-#define RCONST 0x6ED9EBA1
- RD2(ax,bx,cx,dx,bp, 0) RD2(bp,ax,bx,cx,dx, 1) RD2(dx,bp,ax,bx,cx, 2) RD2(cx,dx,bp,ax,bx, 3) RD2(bx,cx,dx,bp,ax, 4)
- RD2(ax,bx,cx,dx,bp, 5) RD2(bp,ax,bx,cx,dx, 6) RD2(dx,bp,ax,bx,cx, 7) RD2(cx,dx,bp,ax,bx, 8) RD2(bx,cx,dx,bp,ax, 9)
- RD2(ax,bx,cx,dx,bp,10) RD2(bp,ax,bx,cx,dx,11) RD2(dx,bp,ax,bx,cx,12) RD2(cx,dx,bp,ax,bx,13) RD2(bx,cx,dx,bp,ax,14)
- RD2(ax,bx,cx,dx,bp,15) RD2(bp,ax,bx,cx,dx,16) RD2(dx,bp,ax,bx,cx,17) RD2(cx,dx,bp,ax,bx,18) RD2(bx,cx,dx,bp,ax,19)
-
-#define RD3s(a,b,c,d,e, n13,n8,n2,n, RCONST) \
-"\n\
- movl %e"b", %edi # di: b \n\
- movl %e"b", %esi # si: b \n\
- orl %e"c", %edi # di: b | c \n\
- andl %e"c", %esi # si: b & c \n\
- andl %e"d", %edi # di: (b | c) & d \n\
- orl %esi, %edi # ((b | c) & d) | (b & c) \n\
- loadW "n13", %esi # W[(n+13) & 15] \n\
- xorW "n8", %esi # ^W[(n+8) & 15] \n\
- xorW "n2", %esi # ^W[(n+2) & 15] \n\
- xorW "n", %esi # ^W[n & 15] \n\
- roll %esi # \n\
- storeW %esi, "n" # store to W[n & 15] \n\
- addl %edi, %e"e" # += ((b | c) & d) | (b & c)\n\
- leal "RCONST"(%r"e",%rsi), %e"e" # e += RCONST + mixed_W \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD3(a,b,c,d,e, n) RD3s(STR(a),STR(b),STR(c),STR(d),STR(e), STR(((40+n+13)&15)), STR(((40+n+8)&15)), STR(((40+n+2)&15)), STR(((40+n)&15)), STR(RCONST))
-#undef RCONST
-//#define RCONST 0x8F1BBCDC "out of range for signed 32bit displacement"
-#define RCONST -0x70e44324
- RD3(ax,bx,cx,dx,bp, 0) RD3(bp,ax,bx,cx,dx, 1) RD3(dx,bp,ax,bx,cx, 2) RD3(cx,dx,bp,ax,bx, 3) RD3(bx,cx,dx,bp,ax, 4)
- RD3(ax,bx,cx,dx,bp, 5) RD3(bp,ax,bx,cx,dx, 6) RD3(dx,bp,ax,bx,cx, 7) RD3(cx,dx,bp,ax,bx, 8) RD3(bx,cx,dx,bp,ax, 9)
- RD3(ax,bx,cx,dx,bp,10) RD3(bp,ax,bx,cx,dx,11) RD3(dx,bp,ax,bx,cx,12) RD3(cx,dx,bp,ax,bx,13) RD3(bx,cx,dx,bp,ax,14)
- RD3(ax,bx,cx,dx,bp,15) RD3(bp,ax,bx,cx,dx,16) RD3(dx,bp,ax,bx,cx,17) RD3(cx,dx,bp,ax,bx,18) RD3(bx,cx,dx,bp,ax,19)
-#define RD4As(a,b,c,d,e, n13,n8,n2,n, RCONST) \
-"\n\
- loadW "n13", %esi # W[(n+13) & 15] \n\
- xorW "n8", %esi # ^W[(n+8) & 15] \n\
- xorW "n2", %esi # ^W[(n+2) & 15] \n\
- xorW "n", %esi # ^W[n & 15] \n\
- roll %esi # \n\
- storeW %esi, "n" # store to W[n & 15] \n\
- movl %e"c", %edi # c \n\
- xorl %e"d", %edi # ^d \n\
- xorl %e"b", %edi # ^b \n\
- leal "RCONST"(%r"e",%rsi), %e"e" # e += RCONST + mixed_W \n\
- addl %edi, %e"e" # e += (c ^ d ^ b) \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD4Bs(a,b,c,d,e, n13,n8,n2,n, RCONST) \
-"\n\
- loadW "n13", %esi # W[(n+13) & 15] \n\
- xorW "n8", %esi # ^W[(n+8) & 15] \n\
- xorW "n2", %esi # ^W[(n+2) & 15] \n\
- xorW "n", %esi # ^W[n & 15] \n\
- roll %esi # \n\
- #storeW %esi, "n" # store to W[n & 15] elided \n\
- movl %e"c", %edi # c \n\
- xorl %e"d", %edi # ^d \n\
- xorl %e"b", %edi # ^b \n\
- leal "RCONST"(%r"e",%rsi), %e"e" # e += RCONST + mixed_W \n\
- addl %edi, %e"e" # e += (c ^ d ^ b) \n\
- movl %e"a", %esi # \n\
- roll $5, %esi # rotl32(a,5) \n\
- addl %esi, %e"e" # e += rotl32(a,5) \n\
- rorl $2, %e"b" # b = rotl32(b,30) \n\
-"
-#define RD4A(a,b,c,d,e, n) RD4As(STR(a),STR(b),STR(c),STR(d),STR(e), STR(((60+n+13)&15)), STR(((60+n+8)&15)), STR(((60+n+2)&15)), STR(((60+n)&15)), STR(RCONST))
-#define RD4B(a,b,c,d,e, n) RD4Bs(STR(a),STR(b),STR(c),STR(d),STR(e), STR(((60+n+13)&15)), STR(((60+n+8)&15)), STR(((60+n+2)&15)), STR(((60+n)&15)), STR(RCONST))
-#undef RCONST
-//#define RCONST 0xCA62C1D6 "out of range for signed 32bit displacement"
-#define RCONST -0x359d3e2a
- RD4A(ax,bx,cx,dx,bp, 0) RD4A(bp,ax,bx,cx,dx, 1) RD4A(dx,bp,ax,bx,cx, 2) RD4A(cx,dx,bp,ax,bx, 3) RD4A(bx,cx,dx,bp,ax, 4)
- RD4A(ax,bx,cx,dx,bp, 5) RD4A(bp,ax,bx,cx,dx, 6) RD4A(dx,bp,ax,bx,cx, 7) RD4A(cx,dx,bp,ax,bx, 8) RD4A(bx,cx,dx,bp,ax, 9)
- RD4A(ax,bx,cx,dx,bp,10) RD4A(bp,ax,bx,cx,dx,11) RD4A(dx,bp,ax,bx,cx,12) RD4A(cx,dx,bp,ax,bx,13) RD4A(bx,cx,dx,bp,ax,14)
- RD4A(ax,bx,cx,dx,bp,15) RD4A(bp,ax,bx,cx,dx,16) RD4B(dx,bp,ax,bx,cx,17) RD4B(cx,dx,bp,ax,bx,18) RD4B(bx,cx,dx,bp,ax,19)
+/* in hash_md5_sha_x86-64.S */
+struct ASM_expects_80 { char t[1 - 2*(offsetof(sha1_ctx_t, hash) != 80)]; };
+void FAST_FUNC sha1_process_block64(sha1_ctx_t *ctx UNUSED_PARAM);
-"\n\
- popq %rdi # \n\
- addl %eax, 80(%rdi) # ctx->hash[0] += a \n\
- addl %ebx, 84(%rdi) # ctx->hash[1] += b \n\
- addl %ecx, 88(%rdi) # ctx->hash[2] += c \n\
- addl %edx, 92(%rdi) # ctx->hash[3] += d \n\
- addl %ebp, 96(%rdi) # ctx->hash[4] += e \n\
- popq %rbx # \n\
- popq %rbp # \n\
- popq %r12 # \n\
- popq %r13 # \n\
- popq %r14 # \n\
- popq %r15 # \n\
-"
- ); /* asm */
-#undef RCONST
-}
# else
/* Fast, fully-unrolled SHA1. +3800 bytes of code on x86.
* It seems further speedup can be achieved by handling more than
diff --git a/libbb/hash_md5_sha_x86-64.S b/libbb/hash_md5_sha_x86-64.S
new file mode 100644
index 0000000..466cd9a
--- /dev/null
+++ b/libbb/hash_md5_sha_x86-64.S
@@ -0,0 +1,1349 @@
+### Generated by hash_md5_sha_x86-64.S.sh ###
+#if defined(__GNUC__) && defined(__x86_64__)
+ .section .text.sha1_process_block64,"ax",@progbits
+ .globl sha1_process_block64
+ .hidden sha1_process_block64
+ .type sha1_process_block64, @function
+sha1_process_block64:
+ pushq %r15 #
+ pushq %r14 #
+ pushq %r13 #
+ pushq %r12 #
+ pushq %rbp #
+ pushq %rbx #
+ pushq %rdi # we need ctx at the end
+
+#Register and stack use:
+# eax..edx: a..d
+# ebp: e
+# esi,edi: temps
+# -32+4*n(%rsp),r8...r15: W[0..7,8..15]
+
+ movq 4*8(%rdi), %r8
+ bswapq %r8
+ movl %r8d, %r9d
+ shrq $32, %r8
+ movq 4*10(%rdi), %r10
+ bswapq %r10
+ movl %r10d, %r11d
+ shrq $32, %r10
+ movq 4*12(%rdi), %r12
+ bswapq %r12
+ movl %r12d, %r13d
+ shrq $32, %r12
+ movq 4*14(%rdi), %r14
+ bswapq %r14
+ movl %r14d, %r15d
+ shrq $32, %r14
+
+ movl $3, %eax
+1:
+ movq (%rdi,%rax,8), %rsi
+ bswapq %rsi
+ rolq $32, %rsi
+ movq %rsi, -32(%rsp,%rax,8)
+ decl %eax
+ jns 1b
+ movl 80(%rdi), %eax # a = ctx->hash[0]
+ movl 84(%rdi), %ebx # b = ctx->hash[1]
+ movl 88(%rdi), %ecx # c = ctx->hash[2]
+ movl 92(%rdi), %edx # d = ctx->hash[3]
+ movl 96(%rdi), %ebp # e = ctx->hash[4]
+
+# 0
+ # W[0], already in %esi
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%rsi),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 1
+ movl -32+4*1(%rsp), %esi # W[n]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 2
+ movl -32+4*2(%rsp), %esi # W[n]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 3
+ movl -32+4*3(%rsp), %esi # W[n]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%rsi),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 4
+ movl -32+4*4(%rsp), %esi # W[n]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%rsi),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 5
+ movl -32+4*5(%rsp), %esi # W[n]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%rsi),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 6
+ movl -32+4*6(%rsp), %esi # W[n]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 7
+ movl -32+4*7(%rsp), %esi # W[n]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 8
+ # W[n], in %r8
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%r8),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 9
+ # W[n], in %r9
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%r9),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 10
+ # W[n], in %r10
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%r10),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 11
+ # W[n], in %r11
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%r11),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 12
+ # W[n], in %r12
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%r12),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 13
+ # W[n], in %r13
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%r13),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 14
+ # W[n], in %r14
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%r14),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 15
+ # W[n], in %r15
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%r15),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 16
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 17
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 18
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%rsi),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 19
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%rsi),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 20
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 21
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 22
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 23
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 24
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 25
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 26
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 27
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 28
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 29
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r13d # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 30
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r14d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 31
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r15d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 32
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 33
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 34
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 35
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 36
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 37
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 38
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 39
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 40
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 41
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 42
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 43
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 44
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 45
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r13d # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 46
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r14d # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 47
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r15d # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 48
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 49
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 50
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 51
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 52
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 53
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 54
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 55
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 56
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 57
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 58
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 59
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 60
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 61
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r13d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 62
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r14d # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 63
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r15d # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 64
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 65
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 66
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 67
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 68
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 69
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 70
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 71
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 72
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 73
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 74
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 75
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 76
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 77
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ # store to W[n & 15] - unused, not done
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 78
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ # store to W[n & 15] - unused, not done
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 79
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ # store to W[n & 15] - unused, not done
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+
+ popq %rdi #
+ addl %eax, 80(%rdi) # ctx->hash[0] += a
+ addl %ebx, 84(%rdi) # ctx->hash[1] += b
+ addl %ecx, 88(%rdi) # ctx->hash[2] += c
+ addl %edx, 92(%rdi) # ctx->hash[3] += d
+ addl %ebp, 96(%rdi) # ctx->hash[4] += e
+ popq %rbx #
+ popq %rbp #
+ popq %r12 #
+ popq %r13 #
+ popq %r14 #
+ popq %r15 #
+
+ ret
+ .size sha1_process_block64, .-sha1_process_block64
+#endif