summaryrefslogtreecommitdiff
path: root/libbb/hash_md5_sha_x86-64.S
diff options
context:
space:
mode:
Diffstat (limited to 'libbb/hash_md5_sha_x86-64.S')
-rw-r--r--libbb/hash_md5_sha_x86-64.S1349
1 files changed, 1349 insertions, 0 deletions
diff --git a/libbb/hash_md5_sha_x86-64.S b/libbb/hash_md5_sha_x86-64.S
new file mode 100644
index 0000000..466cd9a
--- /dev/null
+++ b/libbb/hash_md5_sha_x86-64.S
@@ -0,0 +1,1349 @@
+### Generated by hash_md5_sha_x86-64.S.sh ###
+#if defined(__GNUC__) && defined(__x86_64__)
+ .section .text.sha1_process_block64,"ax",@progbits
+ .globl sha1_process_block64
+ .hidden sha1_process_block64
+ .type sha1_process_block64, @function
+sha1_process_block64:
+ pushq %r15 #
+ pushq %r14 #
+ pushq %r13 #
+ pushq %r12 #
+ pushq %rbp #
+ pushq %rbx #
+ pushq %rdi # we need ctx at the end
+
+#Register and stack use:
+# eax..edx: a..d
+# ebp: e
+# esi,edi: temps
+# -32+4*n(%rsp),r8...r15: W[0..7,8..15]
+
+ movq 4*8(%rdi), %r8
+ bswapq %r8
+ movl %r8d, %r9d
+ shrq $32, %r8
+ movq 4*10(%rdi), %r10
+ bswapq %r10
+ movl %r10d, %r11d
+ shrq $32, %r10
+ movq 4*12(%rdi), %r12
+ bswapq %r12
+ movl %r12d, %r13d
+ shrq $32, %r12
+ movq 4*14(%rdi), %r14
+ bswapq %r14
+ movl %r14d, %r15d
+ shrq $32, %r14
+
+ movl $3, %eax
+1:
+ movq (%rdi,%rax,8), %rsi
+ bswapq %rsi
+ rolq $32, %rsi
+ movq %rsi, -32(%rsp,%rax,8)
+ decl %eax
+ jns 1b
+ movl 80(%rdi), %eax # a = ctx->hash[0]
+ movl 84(%rdi), %ebx # b = ctx->hash[1]
+ movl 88(%rdi), %ecx # c = ctx->hash[2]
+ movl 92(%rdi), %edx # d = ctx->hash[3]
+ movl 96(%rdi), %ebp # e = ctx->hash[4]
+
+# 0
+ # W[0], already in %esi
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%rsi),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 1
+ movl -32+4*1(%rsp), %esi # W[n]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 2
+ movl -32+4*2(%rsp), %esi # W[n]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 3
+ movl -32+4*3(%rsp), %esi # W[n]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%rsi),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 4
+ movl -32+4*4(%rsp), %esi # W[n]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%rsi),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 5
+ movl -32+4*5(%rsp), %esi # W[n]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%rsi),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 6
+ movl -32+4*6(%rsp), %esi # W[n]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 7
+ movl -32+4*7(%rsp), %esi # W[n]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 8
+ # W[n], in %r8
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%r8),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 9
+ # W[n], in %r9
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%r9),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 10
+ # W[n], in %r10
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%r10),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 11
+ # W[n], in %r11
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%r11),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 12
+ # W[n], in %r12
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%r12),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 13
+ # W[n], in %r13
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%r13),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 14
+ # W[n], in %r14
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%r14),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 15
+ # W[n], in %r15
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ andl %ebx, %edi # &b
+ xorl %edx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbp,%r15),%ebp # e += RCONST + W[n]
+ addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 16
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ andl %eax, %edi # &b
+ xorl %ecx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n]
+ addl %edi, %edx # e += (((c ^ d) & b) ^ d)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 17
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ andl %ebp, %edi # &b
+ xorl %ebx, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n]
+ addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 18
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ andl %edx, %edi # &b
+ xorl %eax, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rbx,%rsi),%ebx # e += RCONST + W[n]
+ addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 19
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ andl %ecx, %edi # &b
+ xorl %ebp, %edi # (((c ^ d) & b) ^ d)
+ leal 0x5A827999(%rax,%rsi),%eax # e += RCONST + W[n]
+ addl %edi, %eax # e += (((c ^ d) & b) ^ d)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 20
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 21
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 22
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 23
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 24
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 25
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 26
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 27
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 28
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 29
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r13d # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 30
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r14d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 31
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r15d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 32
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 33
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 34
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 35
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 36
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 37
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 38
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 39
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 40
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 41
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 42
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 43
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 44
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 45
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r13d # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 46
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r14d # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 47
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r15d # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 48
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 49
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 50
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 51
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 52
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 53
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 54
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 55
+ movl %ebx, %edi # di: b
+ movl %ebx, %esi # si: b
+ orl %ecx, %edi # di: b | c
+ andl %ecx, %esi # si: b & c
+ andl %edx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ addl %edi, %ebp # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 56
+ movl %eax, %edi # di: b
+ movl %eax, %esi # si: b
+ orl %ebx, %edi # di: b | c
+ andl %ebx, %esi # si: b & c
+ andl %ecx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ addl %edi, %edx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 57
+ movl %ebp, %edi # di: b
+ movl %ebp, %esi # si: b
+ orl %eax, %edi # di: b | c
+ andl %eax, %esi # si: b & c
+ andl %ebx, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ addl %edi, %ecx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 58
+ movl %edx, %edi # di: b
+ movl %edx, %esi # si: b
+ orl %ebp, %edi # di: b | c
+ andl %ebp, %esi # si: b & c
+ andl %eax, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ addl %edi, %ebx # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 59
+ movl %ecx, %edi # di: b
+ movl %ecx, %esi # si: b
+ orl %edx, %edi # di: b | c
+ andl %edx, %esi # si: b & c
+ andl %ebp, %edi # di: (b | c) & d
+ orl %esi, %edi # ((b | c) & d) | (b & c)
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ addl %edi, %eax # += ((b | c) & d) | (b & c)
+ leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 60
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 61
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r13d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 62
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r14d # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 63
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r15d # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 64
+ movl %r13d, %esi # W[(n+13) & 15]
+ xorl %r8d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*0(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 65
+ movl %r14d, %esi # W[(n+13) & 15]
+ xorl %r9d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*1(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 66
+ movl %r15d, %esi # W[(n+13) & 15]
+ xorl %r10d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*2(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 67
+ movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
+ xorl %r11d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*3(%rsp) # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 68
+ movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
+ xorl %r12d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*4(%rsp) # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 69
+ movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
+ xorl %r13d, %esi # ^W[(n+8) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*5(%rsp) # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 70
+ movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
+ xorl %r14d, %esi # ^W[(n+8) & 15]
+ xorl %r8d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*6(%rsp) # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 71
+ movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
+ xorl %r15d, %esi # ^W[(n+8) & 15]
+ xorl %r9d, %esi # ^W[(n+2) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, -32+4*7(%rsp) # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 72
+ movl -32+4*5(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r10d, %esi # ^W[(n+2) & 15]
+ xorl %r8d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r8d # store to W[n & 15]
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 73
+ movl -32+4*6(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r11d, %esi # ^W[(n+2) & 15]
+ xorl %r9d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r9d # store to W[n & 15]
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 74
+ movl -32+4*7(%rsp), %esi # W[(n+13) & 15]
+ xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r12d, %esi # ^W[(n+2) & 15]
+ xorl %r10d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r10d # store to W[n & 15]
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+# 75
+ movl %r8d, %esi # W[(n+13) & 15]
+ xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r13d, %esi # ^W[(n+2) & 15]
+ xorl %r11d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r11d # store to W[n & 15]
+ movl %ecx, %edi # c
+ xorl %edx, %edi # ^d
+ xorl %ebx, %edi # ^b
+ leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W
+ addl %edi, %ebp # e += (c ^ d ^ b)
+ movl %eax, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebp # e += rotl32(a,5)
+ rorl $2, %ebx # b = rotl32(b,30)
+# 76
+ movl %r9d, %esi # W[(n+13) & 15]
+ xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r14d, %esi # ^W[(n+2) & 15]
+ xorl %r12d, %esi # ^W[n & 15]
+ roll %esi #
+ movl %esi, %r12d # store to W[n & 15]
+ movl %ebx, %edi # c
+ xorl %ecx, %edi # ^d
+ xorl %eax, %edi # ^b
+ leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W
+ addl %edi, %edx # e += (c ^ d ^ b)
+ movl %ebp, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %edx # e += rotl32(a,5)
+ rorl $2, %eax # b = rotl32(b,30)
+# 77
+ movl %r10d, %esi # W[(n+13) & 15]
+ xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15]
+ xorl %r15d, %esi # ^W[(n+2) & 15]
+ xorl %r13d, %esi # ^W[n & 15]
+ roll %esi #
+ # store to W[n & 15] - unused, not done
+ movl %eax, %edi # c
+ xorl %ebx, %edi # ^d
+ xorl %ebp, %edi # ^b
+ leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W
+ addl %edi, %ecx # e += (c ^ d ^ b)
+ movl %edx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ecx # e += rotl32(a,5)
+ rorl $2, %ebp # b = rotl32(b,30)
+# 78
+ movl %r11d, %esi # W[(n+13) & 15]
+ xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r14d, %esi # ^W[n & 15]
+ roll %esi #
+ # store to W[n & 15] - unused, not done
+ movl %ebp, %edi # c
+ xorl %eax, %edi # ^d
+ xorl %edx, %edi # ^b
+ leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W
+ addl %edi, %ebx # e += (c ^ d ^ b)
+ movl %ecx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %ebx # e += rotl32(a,5)
+ rorl $2, %edx # b = rotl32(b,30)
+# 79
+ movl %r12d, %esi # W[(n+13) & 15]
+ xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15]
+ xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15]
+ xorl %r15d, %esi # ^W[n & 15]
+ roll %esi #
+ # store to W[n & 15] - unused, not done
+ movl %edx, %edi # c
+ xorl %ebp, %edi # ^d
+ xorl %ecx, %edi # ^b
+ leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W
+ addl %edi, %eax # e += (c ^ d ^ b)
+ movl %ebx, %esi #
+ roll $5, %esi # rotl32(a,5)
+ addl %esi, %eax # e += rotl32(a,5)
+ rorl $2, %ecx # b = rotl32(b,30)
+
+ popq %rdi #
+ addl %eax, 80(%rdi) # ctx->hash[0] += a
+ addl %ebx, 84(%rdi) # ctx->hash[1] += b
+ addl %ecx, 88(%rdi) # ctx->hash[2] += c
+ addl %edx, 92(%rdi) # ctx->hash[3] += d
+ addl %ebp, 96(%rdi) # ctx->hash[4] += e
+ popq %rbx #
+ popq %rbp #
+ popq %r12 #
+ popq %r13 #
+ popq %r14 #
+ popq %r15 #
+
+ ret
+ .size sha1_process_block64, .-sha1_process_block64
+#endif