
Recent upstream trends have moved away from explicitly using `-verify-machineinstrs`, as it's already covered by the expensive checks. This PR removes almost all `-verify-machineinstrs` from tests in `llvm/test/CodeGen/AMDGPU/*.ll`, leaving only those tests where its removal currently causes failures.
340 lines
13 KiB
LLVM
340 lines
13 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
|
|
|
|
define void @flat_inst_offset(ptr nocapture %p) {
|
|
; GFX9-LABEL: flat_inst_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: flat_load_dword v2, v[0:1] offset:4
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_add_u32_e32 v2, 1, v2
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2 offset:4
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: flat_inst_offset:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, 4
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
|
|
; GFX10-NEXT: flat_load_dword v2, v[0:1]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v2
|
|
; GFX10-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: flat_inst_offset:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: flat_load_b32 v2, v[0:1] offset:4
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v2, 1, v2
|
|
; GFX11-NEXT: flat_store_b32 v[0:1], v2 offset:4
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr inbounds i32, ptr %p, i64 1
|
|
%load = load i32, ptr %gep, align 4
|
|
%inc = add nsw i32 %load, 1
|
|
store i32 %inc, ptr %gep, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @global_inst_offset(ptr addrspace(1) nocapture %p) {
|
|
; GFX9-LABEL: global_inst_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v2, v[0:1], off offset:4
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_add_u32_e32 v2, 1, v2
|
|
; GFX9-NEXT: global_store_dword v[0:1], v2, off offset:4
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: global_inst_offset:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dword v2, v[0:1], off offset:4
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v2
|
|
; GFX10-NEXT: global_store_dword v[0:1], v2, off offset:4
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: global_inst_offset:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_b32 v2, v[0:1], off offset:4
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v2, 1, v2
|
|
; GFX11-NEXT: global_store_b32 v[0:1], v2, off offset:4
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr inbounds i32, ptr addrspace(1) %p, i64 1
|
|
%load = load i32, ptr addrspace(1) %gep, align 4
|
|
%inc = add nsw i32 %load, 1
|
|
store i32 %inc, ptr addrspace(1) %gep, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_i16_lo(ptr %arg, ptr %out) {
|
|
; GFX9-LABEL: load_i16_lo:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX9-NEXT: flat_load_short_d16 v2, v[0:1] offset:8
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_pk_add_u16 v2, v2, v2
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: load_i16_lo:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_add_u32 s0, s0, 8
|
|
; GFX10-NEXT: s_addc_u32 s1, s1, 0
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX10-NEXT: flat_load_short_d16 v2, v[0:1]
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_pk_add_u16 v2, v2, v2
|
|
; GFX10-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX10-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: load_i16_lo:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
|
|
; GFX11-NEXT: flat_load_d16_b16 v2, v[0:1] offset:8
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_pk_add_u16 v2, v2, v2
|
|
; GFX11-NEXT: flat_store_b32 v[0:1], v2
|
|
; GFX11-NEXT: s_endpgm
|
|
%gep = getelementptr inbounds i16, ptr %arg, i32 4
|
|
%ld = load i16, ptr %gep, align 2
|
|
%vec = insertelement <2 x i16> <i16 poison, i16 0>, i16 %ld, i32 0
|
|
%v = add <2 x i16> %vec, %vec
|
|
store <2 x i16> %v, ptr %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_i16_hi(ptr %arg, ptr %out) {
|
|
; GFX9-LABEL: load_i16_hi:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX9-NEXT: flat_load_short_d16_hi v2, v[0:1] offset:8
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_pk_add_u16 v2, v2, v2
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: load_i16_hi:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_add_u32 s0, s0, 8
|
|
; GFX10-NEXT: s_addc_u32 s1, s1, 0
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX10-NEXT: flat_load_short_d16_hi v2, v[0:1]
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_pk_add_u16 v2, v2, v2
|
|
; GFX10-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX10-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: load_i16_hi:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
|
|
; GFX11-NEXT: flat_load_d16_hi_b16 v2, v[0:1] offset:8
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_pk_add_u16 v2, v2, v2
|
|
; GFX11-NEXT: flat_store_b32 v[0:1], v2
|
|
; GFX11-NEXT: s_endpgm
|
|
%gep = getelementptr inbounds i16, ptr %arg, i32 4
|
|
%ld = load i16, ptr %gep, align 2
|
|
%vec = insertelement <2 x i16> <i16 0, i16 poison>, i16 %ld, i32 1
|
|
%v = add <2 x i16> %vec, %vec
|
|
store <2 x i16> %v, ptr %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_half_lo(ptr %arg, ptr %out) {
|
|
; GFX9-LABEL: load_half_lo:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX9-NEXT: flat_load_short_d16 v2, v[0:1] offset:8
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_pk_add_f16 v2, v2, v2
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: load_half_lo:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_add_u32 s0, s0, 8
|
|
; GFX10-NEXT: s_addc_u32 s1, s1, 0
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX10-NEXT: flat_load_short_d16 v2, v[0:1]
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_pk_add_f16 v2, v2, v2
|
|
; GFX10-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX10-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: load_half_lo:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
|
|
; GFX11-NEXT: flat_load_d16_b16 v2, v[0:1] offset:8
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_pk_add_f16 v2, v2, v2
|
|
; GFX11-NEXT: flat_store_b32 v[0:1], v2
|
|
; GFX11-NEXT: s_endpgm
|
|
%gep = getelementptr inbounds half, ptr %arg, i32 4
|
|
%ld = load half, ptr %gep, align 2
|
|
%vec = insertelement <2 x half> <half poison, half 0xH0000>, half %ld, i32 0
|
|
%v = fadd <2 x half> %vec, %vec
|
|
store <2 x half> %v, ptr %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_half_hi(ptr %arg, ptr %out) {
|
|
; GFX9-LABEL: load_half_hi:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX9-NEXT: flat_load_short_d16_hi v2, v[0:1] offset:8
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_pk_add_f16 v2, v2, v2
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: load_half_hi:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_add_u32 s0, s0, 8
|
|
; GFX10-NEXT: s_addc_u32 s1, s1, 0
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX10-NEXT: flat_load_short_d16_hi v2, v[0:1]
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_pk_add_f16 v2, v2, v2
|
|
; GFX10-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX10-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: load_half_hi:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
|
|
; GFX11-NEXT: flat_load_d16_hi_b16 v2, v[0:1] offset:8
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_pk_add_f16 v2, v2, v2
|
|
; GFX11-NEXT: flat_store_b32 v[0:1], v2
|
|
; GFX11-NEXT: s_endpgm
|
|
%gep = getelementptr inbounds half, ptr %arg, i32 4
|
|
%ld = load half, ptr %gep, align 2
|
|
%vec = insertelement <2 x half> <half 0xH0000, half poison>, half %ld, i32 1
|
|
%v = fadd <2 x half> %vec, %vec
|
|
store <2 x half> %v, ptr %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_float_lo(ptr %arg, ptr %out) {
|
|
; GFX9-LABEL: load_float_lo:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX9-NEXT: flat_load_dword v2, v[0:1] offset:16
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_add_f32_e32 v2, v2, v2
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: load_float_lo:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_add_u32 s0, s0, 16
|
|
; GFX10-NEXT: s_addc_u32 s1, s1, 0
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX10-NEXT: flat_load_dword v2, v[0:1]
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_add_f32_e32 v2, v2, v2
|
|
; GFX10-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX10-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: load_float_lo:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
|
|
; GFX11-NEXT: flat_load_b32 v2, v[0:1] offset:16
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_add_f32_e32 v2, v2, v2
|
|
; GFX11-NEXT: flat_store_b32 v[0:1], v2
|
|
; GFX11-NEXT: s_endpgm
|
|
%gep = getelementptr inbounds float, ptr %arg, i32 4
|
|
%ld = load float, ptr %gep, align 4
|
|
%v = fadd float %ld, %ld
|
|
store float %v, ptr %out, align 4
|
|
ret void
|
|
}
|