llvm-project/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
Ahmed Bougacha 155d5849da [AArch64] Avoid jump tables in swiftasync clobber-live-reg test. NFC.
The upstream test relies on jump-tables, which are lowered in
dramatically different ways with later arm64e/ptrauth patches.

Concretely, it's failing for at least two reasons:
- ptrauth removes x16/x17 from tcGPR64 to prevent indirect tail-calls
  from using either register as the callee, conflicting with their usage
  as scratch for the tail-call LR auth checking sequence.  In the
  1/2_available_regs_left tests, this causes the MI scheduler to move
  the load up across some of the inlineasm register clobbers.

- ptrauth adds an x16/x17-using pseudo for jump-table dispatch, which
  looks somewhat different from the regular jump-table dispatch codegen
  by itself, but also prevents compression currently.

They seem like sensible changes.  But they mean the tests aren't really
testing what they're intented to, because there's always an implicit
x16/x17 clobber when using jump-tables.

This updates the test in a way that should work identically regardless
of ptrauth support, with one exception, #1 above, which merely reorders
the load/inlineasm w.r.t. eachother.
I verified the tests still fail the live-reg assertions when
applicable.
2024-01-03 13:51:46 -08:00

509 lines
17 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -o - -mtriple=arm64e-apple-macosx %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
; x16 is not available, shrink-wrapping cannot happen because
; StoreSwiftAsyncContext needs it.
define swifttailcc void @test_async_with_jumptable_x16_clobbered(ptr %src, ptr swiftasync %as) #0 {
; CHECK-LABEL: test_async_with_jumptable_x16_clobbered:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: orr x29, x29, #0x1000000000000000
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x16, sp, #8
; CHECK-NEXT: movk x16, #49946, lsl #48
; CHECK-NEXT: mov x17, x22
; CHECK-NEXT: pacdb x17, x16
; CHECK-NEXT: str x17, [sp, #8]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov x20, x22
; CHECK-NEXT: mov x22, x0
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ldr x8, [x0]
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: cbnz x8, LBB0_2
; CHECK-NEXT: ; %bb.1: ; %then.1
; CHECK-NEXT: str xzr, [x22]
; CHECK-NEXT: mov x0, x22
; CHECK-NEXT: LBB0_2: ; %exit
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: bl _foo
; CHECK-NEXT: mov x1, x0
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: and x29, x29, #0xefffffffffffffff
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: br x1
entry:
%x16 = tail call i64 asm "", "={x16}"()
%l = load i64, ptr %src, align 8
%c = icmp eq i64 %l, 0
br i1 %c, label %then.1, label %exit
then.1:
store i64 0, ptr %src
br label %exit
exit:
%p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
tail call void asm sideeffect "", "{x16}"(i64 %x16)
%r = call i64 @foo(ptr %p)
%fn = inttoptr i64 %r to ptr
musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
ret void
}
; x17 is not available, shrink-wrapping cannot happen because
; StoreSwiftAsyncContext needs it.
define swifttailcc void @test_async_with_jumptable_x17_clobbered(ptr %src, ptr swiftasync %as) #0 {
; CHECK-LABEL: test_async_with_jumptable_x17_clobbered:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: orr x29, x29, #0x1000000000000000
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x16, sp, #8
; CHECK-NEXT: movk x16, #49946, lsl #48
; CHECK-NEXT: mov x17, x22
; CHECK-NEXT: pacdb x17, x16
; CHECK-NEXT: str x17, [sp, #8]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov x20, x22
; CHECK-NEXT: mov x22, x0
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ldr x8, [x0]
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: cbnz x8, LBB1_2
; CHECK-NEXT: ; %bb.1: ; %then.1
; CHECK-NEXT: str xzr, [x22]
; CHECK-NEXT: mov x0, x22
; CHECK-NEXT: LBB1_2: ; %exit
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: bl _foo
; CHECK-NEXT: mov x1, x0
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: and x29, x29, #0xefffffffffffffff
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: br x1
entry:
%x17 = tail call i64 asm "", "={x17}"()
%l = load i64, ptr %src, align 8
%c = icmp eq i64 %l, 0
br i1 %c, label %then.1, label %exit
then.1:
store i64 0, ptr %src
br label %exit
exit:
%p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
tail call void asm sideeffect "", "{x17}"(i64 %x17)
%r = call i64 @foo(ptr %p)
%fn = inttoptr i64 %r to ptr
musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
ret void
}
define swifttailcc void @test_async_with_jumptable_x1_clobbered(ptr %src, ptr swiftasync %as) #0 {
; CHECK-LABEL: test_async_with_jumptable_x1_clobbered:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: mov x20, x22
; CHECK-NEXT: mov x22, x0
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ldr x8, [x0]
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: cbnz x8, LBB2_2
; CHECK-NEXT: ; %bb.1: ; %then.1
; CHECK-NEXT: str xzr, [x22]
; CHECK-NEXT: mov x0, x22
; CHECK-NEXT: LBB2_2: ; %exit
; CHECK-NEXT: orr x29, x29, #0x1000000000000000
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x16, sp, #8
; CHECK-NEXT: movk x16, #49946, lsl #48
; CHECK-NEXT: mov x17, x22
; CHECK-NEXT: pacdb x17, x16
; CHECK-NEXT: str x17, [sp, #8]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: bl _foo
; CHECK-NEXT: mov x1, x0
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: and x29, x29, #0xefffffffffffffff
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: br x1
entry:
%x1 = tail call i64 asm "", "={x1}"()
%l = load i64, ptr %src, align 8
%c = icmp eq i64 %l, 0
br i1 %c, label %then.1, label %exit
then.1:
store i64 0, ptr %src
br label %exit
exit:
%p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
tail call void asm sideeffect "", "{x1}"(i64 %x1)
%r = call i64 @foo(ptr %p)
%fn = inttoptr i64 %r to ptr
musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
ret void
}
define swifttailcc void @test_async_with_jumptable_x1_x9_clobbered(ptr %src, ptr swiftasync %as) #0 {
; CHECK-LABEL: test_async_with_jumptable_x1_x9_clobbered:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: mov x20, x22
; CHECK-NEXT: mov x22, x0
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ldr x8, [x0]
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: cbnz x8, LBB3_2
; CHECK-NEXT: ; %bb.1: ; %then.1
; CHECK-NEXT: str xzr, [x22]
; CHECK-NEXT: mov x0, x22
; CHECK-NEXT: LBB3_2: ; %exit
; CHECK-NEXT: orr x29, x29, #0x1000000000000000
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x16, sp, #8
; CHECK-NEXT: movk x16, #49946, lsl #48
; CHECK-NEXT: mov x17, x22
; CHECK-NEXT: pacdb x17, x16
; CHECK-NEXT: str x17, [sp, #8]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: bl _foo
; CHECK-NEXT: mov x1, x0
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: and x29, x29, #0xefffffffffffffff
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: br x1
entry:
%x1 = tail call i64 asm "", "={x1}"()
%x9 = tail call i64 asm "", "={x9}"()
%l = load i64, ptr %src, align 8
%c = icmp eq i64 %l, 0
br i1 %c, label %then.1, label %exit
then.1:
store i64 0, ptr %src
br label %exit
exit:
%p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
tail call void asm sideeffect "", "{x1}"(i64 %x1)
tail call void asm sideeffect "", "{x9}"(i64 %x9)
%r = call i64 @foo(ptr %p)
%fn = inttoptr i64 %r to ptr
musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
ret void
}
; There are 2 available scratch registers left, shrink-wrapping can happen.
define swifttailcc void @test_async_with_jumptable_2_available_regs_left(ptr %src, ptr swiftasync %as) #0 {
; CHECK-LABEL: test_async_with_jumptable_2_available_regs_left:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: mov x20, x22
; CHECK-NEXT: mov x22, x0
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldr x10, [x22]
; CHECK-NEXT: cbnz x10, LBB4_2
; CHECK-NEXT: ; %bb.1: ; %then.1
; CHECK-NEXT: str xzr, [x22]
; CHECK-NEXT: mov x0, x22
; CHECK-NEXT: LBB4_2: ; %exit
; CHECK-NEXT: orr x29, x29, #0x1000000000000000
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x16, sp, #8
; CHECK-NEXT: movk x16, #49946, lsl #48
; CHECK-NEXT: mov x17, x22
; CHECK-NEXT: pacdb x17, x16
; CHECK-NEXT: str x17, [sp, #8]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: bl _foo
; CHECK-NEXT: mov x1, x0
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: and x29, x29, #0xefffffffffffffff
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: br x1
entry:
%x1 = tail call i64 asm "", "={x1}"()
%x2 = tail call i64 asm "", "={x2}"()
%x3 = tail call i64 asm "", "={x3}"()
%x4 = tail call i64 asm "", "={x4}"()
%x5 = tail call i64 asm "", "={x5}"()
%x6 = tail call i64 asm "", "={x6}"()
%x7 = tail call i64 asm "", "={x7}"()
%x8 = tail call i64 asm "", "={x8}"()
%x9 = tail call i64 asm "", "={x9}"()
%x11 = tail call i64 asm "", "={x11}"()
%x12 = tail call i64 asm "", "={x12}"()
%x13 = tail call i64 asm "", "={x13}"()
%x14 = tail call i64 asm "", "={x14}"()
%x15 = tail call i64 asm "", "={x15}"()
%l = load i64, ptr %src, align 8
%c = icmp eq i64 %l, 0
br i1 %c, label %then.1, label %exit
then.1:
store i64 0, ptr %src
br label %exit
exit:
%p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
tail call void asm sideeffect "", "{x1}"(i64 %x1)
tail call void asm sideeffect "", "{x2}"(i64 %x2)
tail call void asm sideeffect "", "{x3}"(i64 %x3)
tail call void asm sideeffect "", "{x4}"(i64 %x4)
tail call void asm sideeffect "", "{x5}"(i64 %x5)
tail call void asm sideeffect "", "{x6}"(i64 %x6)
tail call void asm sideeffect "", "{x7}"(i64 %x7)
tail call void asm sideeffect "", "{x8}"(i64 %x8)
tail call void asm sideeffect "", "{x9}"(i64 %x9)
tail call void asm sideeffect "", "{x11}"(i64 %x11)
tail call void asm sideeffect "", "{x12}"(i64 %x12)
tail call void asm sideeffect "", "{x13}"(i64 %x13)
tail call void asm sideeffect "", "{x14}"(i64 %x14)
tail call void asm sideeffect "", "{x15}"(i64 %x15)
%r = call i64 @foo(ptr %p)
%fn = inttoptr i64 %r to ptr
musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
ret void
}
; There is only 1 available scratch registers left, shrink-wrapping cannot
; happen because StoreSwiftAsyncContext needs 2 free scratch registers.
define swifttailcc void @test_async_with_jumptable_1_available_reg_left(ptr %src, ptr swiftasync %as) #0 {
; CHECK-LABEL: test_async_with_jumptable_1_available_reg_left:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: orr x29, x29, #0x1000000000000000
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x16, sp, #8
; CHECK-NEXT: movk x16, #49946, lsl #48
; CHECK-NEXT: mov x17, x22
; CHECK-NEXT: pacdb x17, x16
; CHECK-NEXT: str x17, [sp, #8]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov x20, x22
; CHECK-NEXT: mov x22, x0
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldr x10, [x22]
; CHECK-NEXT: cbnz x10, LBB5_2
; CHECK-NEXT: ; %bb.1: ; %then.1
; CHECK-NEXT: str xzr, [x22]
; CHECK-NEXT: mov x0, x22
; CHECK-NEXT: LBB5_2: ; %exit
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: bl _foo
; CHECK-NEXT: mov x1, x0
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: and x29, x29, #0xefffffffffffffff
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: br x1
entry:
%x1 = tail call i64 asm "", "={x1}"()
%x2 = tail call i64 asm "", "={x2}"()
%x3 = tail call i64 asm "", "={x3}"()
%x4 = tail call i64 asm "", "={x4}"()
%x5 = tail call i64 asm "", "={x5}"()
%x6 = tail call i64 asm "", "={x6}"()
%x7 = tail call i64 asm "", "={x7}"()
%x8 = tail call i64 asm "", "={x8}"()
%x9 = tail call i64 asm "", "={x9}"()
%x11 = tail call i64 asm "", "={x11}"()
%x12 = tail call i64 asm "", "={x12}"()
%x13 = tail call i64 asm "", "={x13}"()
%x14 = tail call i64 asm "", "={x14}"()
%x15 = tail call i64 asm "", "={x15}"()
%x16 = tail call i64 asm "", "={x16}"()
%l = load i64, ptr %src, align 8
%c = icmp eq i64 %l, 0
br i1 %c, label %then.1, label %exit
then.1:
store i64 0, ptr %src
br label %exit
exit:
%p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
tail call void asm sideeffect "", "{x1}"(i64 %x1)
tail call void asm sideeffect "", "{x2}"(i64 %x2)
tail call void asm sideeffect "", "{x3}"(i64 %x3)
tail call void asm sideeffect "", "{x4}"(i64 %x4)
tail call void asm sideeffect "", "{x5}"(i64 %x5)
tail call void asm sideeffect "", "{x6}"(i64 %x6)
tail call void asm sideeffect "", "{x7}"(i64 %x7)
tail call void asm sideeffect "", "{x8}"(i64 %x8)
tail call void asm sideeffect "", "{x9}"(i64 %x9)
tail call void asm sideeffect "", "{x11}"(i64 %x11)
tail call void asm sideeffect "", "{x12}"(i64 %x12)
tail call void asm sideeffect "", "{x13}"(i64 %x13)
tail call void asm sideeffect "", "{x14}"(i64 %x14)
tail call void asm sideeffect "", "{x15}"(i64 %x15)
tail call void asm sideeffect "", "{x16}"(i64 %x16)
%r = call i64 @foo(ptr %p)
%fn = inttoptr i64 %r to ptr
musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
ret void
}
declare i64 @foo(ptr)
attributes #0 = { "frame-pointer"="non-leaf" }