
Before this patch, redundant COPY couldn't be removed for the following case: ``` $R0 = OP ... ... // Read of %R0 $R1 = COPY killed $R0 ``` This patch adds support for tracking the users of the source register during backward propagation, so that we can remove the redundant COPY in the above case and optimize it to: ``` $R1 = OP ... ... // Replace all uses of %R0 with $R1 ```
5953 lines
260 KiB
LLVM
5953 lines
260 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X86
|
|
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
|
|
|
|
define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
|
|
; X86-LABEL: test_1024:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $400, %esp # imm = 0x190
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 60(%eax), %ebp
|
|
; X86-NEXT: movl 56(%eax), %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl (%ebx), %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl 4(%ebx), %ebx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 48(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl 52(%ecx), %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl %bl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 8(%eax), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 12(%eax), %ecx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 40(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl 44(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 32(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl 36(%ecx), %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %ebx, %ebp
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebx, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movl %edi, %edx
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %edi, %eax
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 16(%eax), %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %ecx, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 20(%eax), %ecx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %esi, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 24(%eax), %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 28(%eax), %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebx, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 24(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl 28(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %ebx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 16(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl 20(%ecx), %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %ebp, %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebx, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 8(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl 12(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl (%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl 4(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %esi, %ecx
|
|
; X86-NEXT: movl %ebx, %edx
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %esi, %eax
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebp, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl %ecx, %esi
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl (%esp), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %ebp, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebx, %esi
|
|
; X86-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: addl %esi, %ebx
|
|
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl 32(%edi), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %esi, %ebx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 36(%eax), %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebx, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 40(%eax), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 44(%eax), %ecx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: adcl %edi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: adcl %edi, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %esi, %ebx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebx, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %esi, %ebx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %edi, %ecx
|
|
; X86-NEXT: movl %ebx, %edx
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %edi, %eax
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 48(%eax), %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebp, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 52(%eax), %ebp
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %ebp, %ebx
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl 56(%esi), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl 60(%esi), %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %esi, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: addl %ebx, %esi
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %ecx, %eax
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %esi, %ecx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %esi, %ebx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %edi, %ecx
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %edi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, %ebp
|
|
; X86-NEXT: adcl %eax, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebp, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebx, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %edi, %ecx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ecx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ecx, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ecx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, %esi
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %ecx, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %edi, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl (%esp), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movzbl %bl, %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %edi, %esi
|
|
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %edi, %eax
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebx, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %ecx
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %edi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ecx
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, %esi
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %ebx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: movl %esi, %ebp
|
|
; X86-NEXT: adcl %eax, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 64(%eax), %ecx
|
|
; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 68(%eax), %edi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebp, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 72(%eax), %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 76(%eax), %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %edx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebp, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebx, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %esi, %ebx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ebx, %ecx
|
|
; X86-NEXT: movl %edi, %edx
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %edi, %eax
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 80(%eax), %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %ecx, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 84(%eax), %ecx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebp, %edi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 88(%eax), %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 92(%eax), %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %esi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl (%esp), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: imull %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: addl %esi, %edx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: imull %ebp, %eax
|
|
; X86-NEXT: addl %edx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: imull %ebx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: imull %edi, %esi
|
|
; X86-NEXT: addl %edx, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X86-NEXT: imull %eax, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: addl %edx, %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: imull %ebx, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: imull %edi, %ecx
|
|
; X86-NEXT: addl %edx, %ecx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebp, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ecx, %esi
|
|
; X86-NEXT: adcl %ebx, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 104(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl 108(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl 96(%esi), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl 100(%esi), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebx, %esi
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %edi, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebx
|
|
; X86-NEXT: addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl 112(%esi), %edi
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: imull %edi, %ecx
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movl 116(%esi), %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: imull %eax, %ebx
|
|
; X86-NEXT: addl %edx, %ebx
|
|
; X86-NEXT: movl 120(%esi), %eax
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: imull %esi, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 124(%ecx), %ecx
|
|
; X86-NEXT: imull %ebp, %ecx
|
|
; X86-NEXT: addl %edx, %ecx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebx, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %esi, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: imull %ebp, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: addl %edx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: imull %ebx, %edi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %edx
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: imull %eax, %ecx
|
|
; X86-NEXT: addl %edx, %ecx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebx, %ecx
|
|
; X86-NEXT: adcl %ebp, %edi
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movzbl %bl, %edi
|
|
; X86-NEXT: adcl %edi, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, %ebx
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 88(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl 92(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 80(%ecx), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl 84(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ebp, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl 72(%ecx), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl 76(%ecx), %esi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl 64(%esi), %ebx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl 68(%esi), %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebp, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %edi, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: adcl %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edi
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl %ebx, %edi
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %ecx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %ecx, %ebx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ecx
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ecx, %esi
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebx, %edi
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %esi, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %ecx, %eax
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %esi, %ecx
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %eax
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl (%esp), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %ecx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %edi, %ebx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebx
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl (%esp), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl %ebx, %ecx
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edi
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 96(%eax), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 100(%eax), %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %esi, %ebp
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl (%esp), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 104(%eax), %ecx
|
|
; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl 108(%eax), %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %ebp
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: adcl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: imull %eax, %ecx
|
|
; X86-NEXT: movl (%esp), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: addl %edx, %eax
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: imull %ebx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: imull %edi, %esi
|
|
; X86-NEXT: addl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl (%esp), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: addl %edi, %eax
|
|
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl 120(%ebx), %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: addl %edx, %esi
|
|
; X86-NEXT: movl 124(%ebx), %eax
|
|
; X86-NEXT: imull %ecx, %eax
|
|
; X86-NEXT: addl %eax, %esi
|
|
; X86-NEXT: movl 112(%ebx), %edi
|
|
; X86-NEXT: movl 116(%ebx), %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: imull %ebp, %ebx
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: imull %edi, %ecx
|
|
; X86-NEXT: addl %edx, %ecx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ebx, %ebp
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: addl %ebp, %edi
|
|
; X86-NEXT: adcl %esi, %ecx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ecx, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl (%esp), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: addl %ecx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %ebp
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl %cl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: addl %ecx, %ebx
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %edi, %ecx
|
|
; X86-NEXT: setb %bl
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebp, %edi
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %ecx, %ebp
|
|
; X86-NEXT: movzbl %bl, %eax
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebx, %esi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ecx
|
|
; X86-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ebp, %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: adcl %edi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %edx
|
|
; X86-NEXT: addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: adcl %ecx, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl $0, %edx
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: imull %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %edx
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: addl %edx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: imull %ebx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %esi, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: imull %edi, %esi
|
|
; X86-NEXT: addl %edx, %esi
|
|
; X86-NEXT: addl %ebp, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: adcl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %ebx
|
|
; X86-NEXT: setb %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: imull %esi, %ecx
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: addl %edx, %ebx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: imull %edi, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: imull %ebp, %ecx
|
|
; X86-NEXT: addl %edx, %ecx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ebx, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %esi
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ecx, %esi
|
|
; X86-NEXT: adcl $0, %edi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: addl %esi, %ebp
|
|
; X86-NEXT: adcl %edi, %ecx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %ecx, %esi
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, (%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 4(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 8(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 12(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 16(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 20(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 24(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 28(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 32(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 36(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 40(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 44(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 48(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 52(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 56(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 60(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 64(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 68(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 72(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 76(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 80(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 84(%ecx)
|
|
; X86-NEXT: movl %ebp, 88(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 92(%ecx)
|
|
; X86-NEXT: movl %ebx, 96(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 100(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 104(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 108(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 112(%ecx)
|
|
; X86-NEXT: movl %edi, 116(%ecx)
|
|
; X86-NEXT: movl %edx, 120(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 124(%ecx)
|
|
; X86-NEXT: addl $400, %esp # imm = 0x190
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_1024:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbp
|
|
; X64-NEXT: pushq %r15
|
|
; X64-NEXT: pushq %r14
|
|
; X64-NEXT: pushq %r13
|
|
; X64-NEXT: pushq %r12
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: subq $240, %rsp
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq 40(%rdi), %rbx
|
|
; X64-NEXT: movq 32(%rdi), %r12
|
|
; X64-NEXT: movq 56(%rdi), %r15
|
|
; X64-NEXT: movq 48(%rdi), %r10
|
|
; X64-NEXT: movq (%rsi), %r11
|
|
; X64-NEXT: movq 8(%rsi), %r14
|
|
; X64-NEXT: movq %rsi, %r13
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %rcx, %r9
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %r9, %r8
|
|
; X64-NEXT: adcq %rsi, %r10
|
|
; X64-NEXT: setb %al
|
|
; X64-NEXT: movzbl %al, %r9d
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %r10, %rsi
|
|
; X64-NEXT: adcq %r9, %rcx
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %r9, %r11
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: addq %r11, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r10, %r9
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %rbx, %r11
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %r15
|
|
; X64-NEXT: addq %r9, %r15
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %rbx
|
|
; X64-NEXT: addq %rdi, %r15
|
|
; X64-NEXT: adcq %r8, %rbx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq 16(%r13), %r8
|
|
; X64-NEXT: movq %r12, %r10
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %rdi, %r12
|
|
; X64-NEXT: adcq $0, %r9
|
|
; X64-NEXT: movq 24(%r13), %rbp
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: movq %rdx, %r13
|
|
; X64-NEXT: addq %r12, %rax
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: adcq %r9, %r13
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %r13, %r9
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %rdi
|
|
; X64-NEXT: addq %r15, %r14
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbx, %r12
|
|
; X64-NEXT: movq %r12, (%rsp) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %r9
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: addq %rsi, %r9
|
|
; X64-NEXT: adcq %rcx, %rdi
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: movzbl %sil, %eax
|
|
; X64-NEXT: adcq %rax, %rdx
|
|
; X64-NEXT: addq %r9, %r11
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rdi, %rbx
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rdx
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: movq 16(%r14), %r11
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq 24(%r14), %r8
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %rcx, %rdi
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rdi, %rbx
|
|
; X64-NEXT: adcq %rsi, %r15
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r15, %rdi
|
|
; X64-NEXT: movzbl %sil, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: movq (%r14), %rbp
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq 8(%r14), %r14
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %rsi, %r12
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: addq %r12, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r15, %rsi
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r14, %r15
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %r12
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: addq %rsi, %r13
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %r12
|
|
; X64-NEXT: addq %r9, %r13
|
|
; X64-NEXT: adcq %rbx, %r12
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rsi, %rbx
|
|
; X64-NEXT: adcq $0, %r9
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: adcq %r9, %rbp
|
|
; X64-NEXT: setb %r9b
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %rbp, %rsi
|
|
; X64-NEXT: movzbl %r9b, %eax
|
|
; X64-NEXT: adcq %rax, %r15
|
|
; X64-NEXT: addq %r13, %r10
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r12, %rbx
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: addq %rdi, %rsi
|
|
; X64-NEXT: adcq %rcx, %r15
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq %r8, %rdi
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %rcx, %r12
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq %r11, %rbp
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r13
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %r12, %r11
|
|
; X64-NEXT: adcq %rdi, %r13
|
|
; X64-NEXT: setb %dil
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: movq %r8, %r9
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: addq %r13, %rax
|
|
; X64-NEXT: movzbl %dil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: addq %rsi, %rbx
|
|
; X64-NEXT: adcq %r15, %r11
|
|
; X64-NEXT: movzbl %r10b, %ecx
|
|
; X64-NEXT: adcq %rcx, %rax
|
|
; X64-NEXT: adcq $0, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq (%rsp), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: movq 32(%r8), %rcx
|
|
; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %rsi, %r11
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq 40(%r8), %rbx
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %r11, %rsi
|
|
; X64-NEXT: adcq %rdi, %r15
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %r15, %r11
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %rdi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r13
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %r15, %rbp
|
|
; X64-NEXT: adcq $0, %r13
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq %rbx, %rcx
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: addq %rbp, %rax
|
|
; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill
|
|
; X64-NEXT: adcq %r13, %r10
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %r10, %rbp
|
|
; X64-NEXT: movzbl %bl, %eax
|
|
; X64-NEXT: adcq %rax, %r15
|
|
; X64-NEXT: addq %r12, %rbp
|
|
; X64-NEXT: adcq %rsi, %r15
|
|
; X64-NEXT: adcq $0, %r11
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq 48(%r8), %rcx
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: movq %r14, %r12
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: addq %rsi, %r13
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq 56(%r8), %rsi
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %r14
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %r13, %r9
|
|
; X64-NEXT: adcq %r10, %r14
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: addq %r14, %r13
|
|
; X64-NEXT: movzbl %r8b, %eax
|
|
; X64-NEXT: adcq %rax, %rsi
|
|
; X64-NEXT: addq %rbp, %rbx
|
|
; X64-NEXT: adcq %r15, %r9
|
|
; X64-NEXT: adcq $0, %r13
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: addq %r11, %r13
|
|
; X64-NEXT: adcq %rdi, %rsi
|
|
; X64-NEXT: setb %r11b
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %rdi, %r14
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %r14, %rbp
|
|
; X64-NEXT: adcq %r10, %r8
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r8, %rdi
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %r15
|
|
; X64-NEXT: addq %r13, %r12
|
|
; X64-NEXT: adcq %rsi, %rbp
|
|
; X64-NEXT: movzbl %r11b, %eax
|
|
; X64-NEXT: adcq %rax, %rdi
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: addq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %rax, (%rsp) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %r12
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
|
|
; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rcx, %r8
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r8, %rbx
|
|
; X64-NEXT: adcq %rsi, %r10
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %r10, %rsi
|
|
; X64-NEXT: movzbl %r8b, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %r8, %r14
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq %r11, %r13
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: addq %r14, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r10, %r8
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: movq %r12, %r11
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %r12
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: addq %r8, %r13
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %r12
|
|
; X64-NEXT: addq %rbp, %r13
|
|
; X64-NEXT: adcq %rbx, %r12
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq %r11, %rbx
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: addq %r8, %r10
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %r10, %r11
|
|
; X64-NEXT: adcq %rbp, %r8
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %r9, %rbp
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r8, %rbx
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %r9
|
|
; X64-NEXT: addq %r13, %r14
|
|
; X64-NEXT: movq %r14, %r13
|
|
; X64-NEXT: adcq %r12, %r11
|
|
; X64-NEXT: adcq $0, %rbx
|
|
; X64-NEXT: adcq $0, %r9
|
|
; X64-NEXT: addq %rsi, %rbx
|
|
; X64-NEXT: adcq %rcx, %r9
|
|
; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rcx, %r8
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %r8, %rax
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: movzbl %sil, %eax
|
|
; X64-NEXT: adcq %rax, %rdx
|
|
; X64-NEXT: addq %rbx, %r10
|
|
; X64-NEXT: adcq %r9, %r8
|
|
; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: adcq $0, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: addq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq %rdi, %r13
|
|
; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r15, %r11
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X64-NEXT: adcq %rax, %r10
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %r8
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rdx
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
|
|
; X64-NEXT: movq 64(%r13), %r15
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rsi, %r8
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq 72(%r13), %rsi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r8, %rbx
|
|
; X64-NEXT: adcq %rdi, %r10
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %r10, %r9
|
|
; X64-NEXT: movzbl %r8b, %eax
|
|
; X64-NEXT: adcq %rax, %rdi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: movq %r15, %rcx
|
|
; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %r8, %r14
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: movq %r12, %rcx
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: addq %r14, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r10, %r8
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: movq %r15, %r12
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %r8, %rbp
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %r15
|
|
; X64-NEXT: addq %r11, %rbp
|
|
; X64-NEXT: adcq %rbx, %r15
|
|
; X64-NEXT: adcq $0, %r9
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq 80(%r13), %r14
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %r8, %r11
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq 88(%r13), %rbx
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: addq %r11, %rax
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: adcq %r10, %r8
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %r12
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: addq %r8, %r13
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %r12
|
|
; X64-NEXT: addq %rbp, %rsi
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r15, %r11
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %r13
|
|
; X64-NEXT: adcq $0, %r12
|
|
; X64-NEXT: addq %r9, %r13
|
|
; X64-NEXT: adcq %rdi, %r12
|
|
; X64-NEXT: setb %bpl
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: addq %rdi, %r10
|
|
; X64-NEXT: adcq $0, %r8
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq %r9, %r15
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: addq %r10, %rax
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: adcq %r8, %rdi
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, %r9
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: movzbl %r8b, %eax
|
|
; X64-NEXT: adcq %rax, %rdx
|
|
; X64-NEXT: addq %r13, %rsi
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r12, %r10
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movzbl %bpl, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rdx
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: imulq %rax, %rbx
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rbx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: imulq %rcx, %r14
|
|
; X64-NEXT: addq %rdx, %r14
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: imulq %rsi, %r10
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r10, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: imulq %r11, %rbx
|
|
; X64-NEXT: addq %rdx, %rbx
|
|
; X64-NEXT: addq %r8, %rdi
|
|
; X64-NEXT: adcq %r14, %rbx
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %r8, %r14
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: addq %r14, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r10, %r8
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %r8, %r14
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %rsi
|
|
; X64-NEXT: addq %rdi, %r14
|
|
; X64-NEXT: adcq %rbx, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq 112(%rcx), %r10
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: imulq %r11, %r10
|
|
; X64-NEXT: addq %rdx, %r10
|
|
; X64-NEXT: movq 120(%rcx), %rax
|
|
; X64-NEXT: imulq %rdi, %rax
|
|
; X64-NEXT: movq %rdi, %r12
|
|
; X64-NEXT: addq %rax, %r10
|
|
; X64-NEXT: movq 96(%rcx), %r13
|
|
; X64-NEXT: movq 104(%rcx), %r8
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: movq %r15, %rbx
|
|
; X64-NEXT: imulq %r8, %rbx
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %rbx, %rdx
|
|
; X64-NEXT: imulq %r13, %r9
|
|
; X64-NEXT: addq %rdx, %r9
|
|
; X64-NEXT: addq %rbp, %rdi
|
|
; X64-NEXT: adcq %r10, %r9
|
|
; X64-NEXT: movq %r9, %r15
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %r10, %r12
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: addq %r12, %r13
|
|
; X64-NEXT: adcq %rbp, %r10
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: addq %r10, %rax
|
|
; X64-NEXT: movzbl %bl, %r8d
|
|
; X64-NEXT: adcq %r8, %rdx
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: adcq %r15, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq %r14, %rax
|
|
; X64-NEXT: adcq %rsi, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: movq 80(%r14), %r10
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq 88(%r14), %r15
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %rcx, %r9
|
|
; X64-NEXT: adcq $0, %r8
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r9, %rdi
|
|
; X64-NEXT: adcq %r8, %rcx
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %rcx, %r12
|
|
; X64-NEXT: movzbl %r8b, %eax
|
|
; X64-NEXT: adcq %rax, %r15
|
|
; X64-NEXT: movq 64(%r14), %rcx
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq 72(%r14), %r8
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %r11, %r14
|
|
; X64-NEXT: adcq $0, %rbx
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, %r9
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: addq %r14, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbx, %r11
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %r11, %rbp
|
|
; X64-NEXT: movzbl %cl, %eax
|
|
; X64-NEXT: adcq %rax, %rbx
|
|
; X64-NEXT: addq %rsi, %rbp
|
|
; X64-NEXT: adcq %rdi, %rbx
|
|
; X64-NEXT: adcq $0, %r12
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: movq %r9, %rcx
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: movq %r8, %r10
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %rsi, %r11
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %r13
|
|
; X64-NEXT: addq %r11, %rax
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: adcq %rdi, %r13
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r13, %rdi
|
|
; X64-NEXT: movzbl %cl, %eax
|
|
; X64-NEXT: adcq %rax, %rsi
|
|
; X64-NEXT: addq %rbp, %r9
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbx, %r11
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: addq %r12, %rdi
|
|
; X64-NEXT: adcq %r15, %rsi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, %r15
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r9, %rbx
|
|
; X64-NEXT: adcq $0, %r11
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq %r8, %r9
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %r13
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: adcq %r11, %r13
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: addq %r13, %rax
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: movzbl %r8b, %eax
|
|
; X64-NEXT: adcq %rax, %rdx
|
|
; X64-NEXT: addq %rdi, %r15
|
|
; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rsi, %r10
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movzbl %cl, %eax
|
|
; X64-NEXT: adcq %rax, %r11
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rdx
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq 96(%rcx), %rsi
|
|
; X64-NEXT: imulq %rsi, %r9
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r9, %rdx
|
|
; X64-NEXT: movq 104(%rcx), %r9
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: imulq %r9, %rax
|
|
; X64-NEXT: addq %rdx, %rax
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: movq 112(%rcx), %rax
|
|
; X64-NEXT: movq %rcx, %r14
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: imulq %r10, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rcx, %rdx
|
|
; X64-NEXT: movq 120(%r14), %r13
|
|
; X64-NEXT: imulq %rbx, %r13
|
|
; X64-NEXT: addq %rdx, %r13
|
|
; X64-NEXT: addq %rdi, %r8
|
|
; X64-NEXT: adcq %r11, %r13
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: movq %rbx, %rcx
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rdi, %rbx
|
|
; X64-NEXT: adcq $0, %r11
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %rbx, %r12
|
|
; X64-NEXT: adcq %r11, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %rcx, %r9
|
|
; X64-NEXT: movzbl %sil, %eax
|
|
; X64-NEXT: adcq %rax, %rbx
|
|
; X64-NEXT: addq %r8, %r9
|
|
; X64-NEXT: adcq %r13, %rbx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: imulq %r10, %rdi
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: addq %rdi, %rdx
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: imulq %r14, %rax
|
|
; X64-NEXT: addq %rdx, %rax
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: imulq %r8, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %rsi, %rdx
|
|
; X64-NEXT: imulq %rdi, %rbp
|
|
; X64-NEXT: addq %rdx, %rbp
|
|
; X64-NEXT: addq %rcx, %r11
|
|
; X64-NEXT: adcq %r13, %rbp
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: movq %r8, %r15
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %rcx, %rsi
|
|
; X64-NEXT: adcq $0, %r8
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: addq %rsi, %rcx
|
|
; X64-NEXT: adcq %r8, %rdi
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: movzbl %sil, %esi
|
|
; X64-NEXT: adcq %rsi, %rdx
|
|
; X64-NEXT: addq %r11, %rax
|
|
; X64-NEXT: adcq %rbp, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq %r12, %rcx
|
|
; X64-NEXT: adcq %r9, %rax
|
|
; X64-NEXT: adcq %rbx, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rsi, %r8
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdi, %r9
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, (%rsi)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 8(%rsi)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 16(%rsi)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 24(%rsi)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 32(%rsi)
|
|
; X64-NEXT: movq (%rsp), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 40(%rsi)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 48(%rsi)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 56(%rsi)
|
|
; X64-NEXT: movq %r8, 64(%rsi)
|
|
; X64-NEXT: movq %r9, 72(%rsi)
|
|
; X64-NEXT: movq %r10, 80(%rsi)
|
|
; X64-NEXT: movq %r11, 88(%rsi)
|
|
; X64-NEXT: movq %r13, 96(%rsi)
|
|
; X64-NEXT: movq %rcx, 104(%rsi)
|
|
; X64-NEXT: movq %rax, 112(%rsi)
|
|
; X64-NEXT: movq %rdx, 120(%rsi)
|
|
; X64-NEXT: addq $240, %rsp
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: popq %r12
|
|
; X64-NEXT: popq %r13
|
|
; X64-NEXT: popq %r14
|
|
; X64-NEXT: popq %r15
|
|
; X64-NEXT: popq %rbp
|
|
; X64-NEXT: retq
|
|
%av = load i1024, ptr %a
|
|
%bv = load i1024, ptr %b
|
|
%r = mul i1024 %av, %bv
|
|
store i1024 %r, ptr %out
|
|
ret void
|
|
}
|