
The existing way of managing clustered nodes was done through adding weak edges between the neighbouring cluster nodes, which is a sort of ordered queue. And this will be later recorded as `NextClusterPred` or `NextClusterSucc` in `ScheduleDAGMI`. But actually the instruction may be picked not in the exact order of the queue. For example, we have a queue of cluster nodes A B C. But during scheduling, node B might be picked first, then it will be very likely that we only cluster B and C for Top-Down scheduling (leaving A alone). Another issue is: ``` if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum) std::swap(SUa, SUb); if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) ``` may break the cluster queue. For example, we want to cluster nodes (order as in `MemOpRecords`): 1 3 2. 1(SUa) will be pred of 3(SUb) normally. But when it comes to (3, 2), As 3(SUa) > 2(SUb), we would reorder the two nodes, which makes 2 be pred of 3. This makes both 1 and 2 become preds of 3, but there is no edge between 1 and 2. Thus we get a broken cluster chain. To fix both issues, we introduce an unordered set in the change. This could help improve clustering in some hard case. One key reason the change causes so many test check changes is: As the cluster candidates are not ordered now, the candidates might be picked in different order from before. The most affected targets are: AMDGPU, AArch64, RISCV. For RISCV, it seems to me most are just minor instruction reorder, don't see obvious regression. For AArch64, there were some combining of ldr into ldp being affected. With two cases being regressed and two being improved. This has more deeper reason that machine scheduler cannot cluster them well both before and after the change, and the load combine algorithm later is also not smart enough. For AMDGPU, some cases have more v_dual instructions used while some are regressed. It seems less critical. Seems like test `v_vselect_v32bf16` gets more buffer_load being claused.
958 lines
42 KiB
LLVM
958 lines
42 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 -mattr=-unaligned-access-mode < %s | FileCheck %s -check-prefixes=GCN,ALIGNED,ALIGNED-SDAG
|
|
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 -mattr=-unaligned-access-mode < %s | FileCheck %s -check-prefixes=GCN,ALIGNED,ALIGNED-GISEL
|
|
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 -mattr=+unaligned-access-mode < %s | FileCheck %s -check-prefixes=GCN,UNALIGNED,UNALIGNED-SDAG
|
|
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 -mattr=+unaligned-access-mode < %s | FileCheck %s -check-prefixes=GCN,UNALIGNED,UNALIGNED-GISEL
|
|
|
|
define amdgpu_kernel void @ds1align1(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds1align1:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read_u8 v0, v0
|
|
; GCN-NEXT: v_mov_b32_e32 v1, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write_b8 v1, v0
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load i8, ptr addrspace(3) %in, align 1
|
|
store i8 %val, ptr addrspace(3) %out, align 1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds2align1(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds2align1:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v0, v0 offset:1
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v2, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v2, v1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v2, v0 offset:1
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds2align1:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v0, v0 offset:1
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v2, s1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v0, v0, 8, v1
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v2, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v2, v1 offset:1
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds2align1:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_u16 v0, v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v1, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b16 v1, v0
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load i16, ptr addrspace(3) %in, align 1
|
|
store i16 %val, ptr addrspace(3) %out, align 1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds2align2(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds2align2:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read_u16 v0, v0
|
|
; GCN-NEXT: v_mov_b32_e32 v1, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write_b16 v1, v0
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load i16, ptr addrspace(3) %in, align 2
|
|
store i16 %val, ptr addrspace(3) %out, align 2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds4align1(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds4align1:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v2, v0 offset:1
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v3, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v0, v0 offset:3
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v4, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v4, v1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v4, v2 offset:1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v4, v3 offset:2
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v4, v0 offset:3
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds4align1:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v4, 8
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v2, v0 offset:1
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v3, v0 offset:3
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v0, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v5, s1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v1, v2, 8, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v3
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v0, v2, v0, v1
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:1
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v1, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v5, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:3
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds4align1:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b32 v0, v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v1, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b32 v1, v0
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load i32, ptr addrspace(3) %in, align 1
|
|
store i32 %val, ptr addrspace(3) %out, align 1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds4align2(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds4align2:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v1, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v0, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v2, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v2, v1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v2, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds4align2:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v0, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v2, s1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v2, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v2, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds4align2:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b32 v0, v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v1, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b32 v1, v0
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load i32, ptr addrspace(3) %in, align 2
|
|
store i32 %val, ptr addrspace(3) %out, align 2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds4align4(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds4align4:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read_b32 v0, v0
|
|
; GCN-NEXT: v_mov_b32_e32 v1, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write_b32 v1, v0
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load i32, ptr addrspace(3) %in, align 4
|
|
store i32 %val, ptr addrspace(3) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds8align1(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds8align1:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v2, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v3, v0 offset:1
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v4, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v5, v0 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v6, v0 offset:5
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v7, v0 offset:3
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v8, v0 offset:6
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v0, v0 offset:7
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v1, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v5 offset:4
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v6 offset:5
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v2
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v3 offset:1
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v4 offset:2
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(7)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v7 offset:3
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(7)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v8 offset:6
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(7)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v0 offset:7
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds8align1:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v2, v0 offset:1
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v3, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v4, v0 offset:3
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v5, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v6, v0 offset:5
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v7, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v0, v0 offset:7
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(6)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v1, v2, 8, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v4
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v1, v2, v3, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v2, v6, 8, v5
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v0, 24, v0
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v3, 16, v7
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v0, v0, v3, v2
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v2, 8, v1
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v3, s1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v3, v1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v3, v2 offset:1
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v2, 8
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v4, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v3, v1 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v3, v4 offset:3
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v3, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v3, v1 offset:5
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v1, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v3, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v3, v1 offset:7
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds8align1:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b64 v[0:1], v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v2, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b64 v2, v[0:1]
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load <2 x i32>, ptr addrspace(3) %in, align 1
|
|
store <2 x i32> %val, ptr addrspace(3) %out, align 1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds8align2(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds8align2:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v1, v0 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v2, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v3, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v0, v0 offset:6
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v4, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v1 offset:4
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v2
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v3 offset:2
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v0 offset:6
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds8align2:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v2, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v3, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v0, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v4, s1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v1, v2, 16, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v3
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v4, v1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v4, v1 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v4, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v4, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds8align2:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b64 v[0:1], v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v2, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b64 v2, v[0:1]
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load <2 x i32>, ptr addrspace(3) %in, align 2
|
|
store <2 x i32> %val, ptr addrspace(3) %out, align 2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds8align4(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds8align4:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read2_b32 v[0:1], v0 offset1:1
|
|
; GCN-NEXT: v_mov_b32_e32 v2, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write2_b32 v2, v0, v1 offset1:1
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load <2 x i32>, ptr addrspace(3) %in, align 4
|
|
store <2 x i32> %val, ptr addrspace(3) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds8align8(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds8align8:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read_b64 v[0:1], v0
|
|
; GCN-NEXT: v_mov_b32_e32 v2, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write_b64 v2, v[0:1]
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load <2 x i32>, ptr addrspace(3) %in, align 8
|
|
store <2 x i32> %val, ptr addrspace(3) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds12align1(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds12align1:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v2, v0 offset:1
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v3, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v4, v0 offset:3
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v5, v0 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v6, v0 offset:5
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v7, v0 offset:6
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v8, v0 offset:7
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v9, v0 offset:8
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v10, v0 offset:9
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v11, v0 offset:10
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v0, v0 offset:11
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v12, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v9 offset:8
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v10 offset:9
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v5 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v6 offset:5
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v1
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v2 offset:1
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v3 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v4 offset:3
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v7 offset:6
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v8 offset:7
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(11)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v11 offset:10
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(11)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v12, v0 offset:11
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds12align1:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v2, v0 offset:1
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v3, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v4, v0 offset:3
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v5, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v6, v0 offset:5
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v7, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v8, v0 offset:7
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(6)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v1, v2, 8, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v4
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v1, v2, v3, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v2, v6, 8, v5
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v3, v0 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v4, v0 offset:9
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v5, v0 offset:10
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v0, v0 offset:11
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v6, 24, v8
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v7, 16, v7
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v3, v4, 8, v3
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v4, 16, v5
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v0, 24, v0
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v0, v0, v4, v3
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v3, 8, v1
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v4, s1
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v2, v6, v7, v2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v3 offset:1
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v3, 8
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v5, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v4, v1 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v5 offset:3
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v2 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v1 offset:5
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v1, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v4, v2 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v1 offset:7
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v0 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v1 offset:9
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v1, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v4, v0 offset:10
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v4, v1 offset:11
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds12align1:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b96 v[0:2], v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v3, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b96 v3, v[0:2]
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load <3 x i32>, ptr addrspace(3) %in, align 1
|
|
store <3 x i32> %val, ptr addrspace(3) %out, align 1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds12align2(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds12align2:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v2, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v3, v0 offset:8
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v4, v0 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v5, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v6, v0 offset:6
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v0, v0 offset:10
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v1, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v3 offset:8
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v4 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v2
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v5 offset:2
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v6 offset:6
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v0 offset:10
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds12align2:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v2, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v3, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v4, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v5, v0 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v0, v0 offset:10
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v6, s1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v1, v2, 16, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v2, v4, 16, v3
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v5
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v6, v1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v6, v1 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v6, v2 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v6, v2 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v6, v0 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v6, v0 offset:10
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds12align2:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b96 v[0:2], v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v3, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b96 v3, v[0:2]
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load <3 x i32>, ptr addrspace(3) %in, align 2
|
|
store <3 x i32> %val, ptr addrspace(3) %out, align 2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds12align4(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-LABEL: ds12align4:
|
|
; ALIGNED: ; %bb.0:
|
|
; ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-NEXT: v_mov_b32_e32 v2, s0
|
|
; ALIGNED-NEXT: ds_read2_b32 v[0:1], v2 offset1:1
|
|
; ALIGNED-NEXT: ds_read_b32 v2, v2 offset:8
|
|
; ALIGNED-NEXT: v_mov_b32_e32 v3, s1
|
|
; ALIGNED-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-NEXT: ds_write2_b32 v3, v0, v1 offset1:1
|
|
; ALIGNED-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-NEXT: ds_write_b32 v3, v2 offset:8
|
|
; ALIGNED-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-SDAG-LABEL: ds12align4:
|
|
; UNALIGNED-SDAG: ; %bb.0:
|
|
; UNALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-SDAG-NEXT: v_mov_b32_e32 v2, s0
|
|
; UNALIGNED-SDAG-NEXT: ds_read2_b32 v[0:1], v2 offset1:1
|
|
; UNALIGNED-SDAG-NEXT: ds_read_b32 v2, v2 offset:8
|
|
; UNALIGNED-SDAG-NEXT: v_mov_b32_e32 v3, s1
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; UNALIGNED-SDAG-NEXT: ds_write2_b32 v3, v0, v1 offset1:1
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; UNALIGNED-SDAG-NEXT: ds_write_b32 v3, v2 offset:8
|
|
; UNALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-GISEL-LABEL: ds12align4:
|
|
; UNALIGNED-GISEL: ; %bb.0:
|
|
; UNALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-GISEL-NEXT: ds_read_b96 v[0:2], v0
|
|
; UNALIGNED-GISEL-NEXT: v_mov_b32_e32 v3, s1
|
|
; UNALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-GISEL-NEXT: ds_write_b96 v3, v[0:2]
|
|
; UNALIGNED-GISEL-NEXT: s_endpgm
|
|
%val = load <3 x i32>, ptr addrspace(3) %in, align 4
|
|
store <3 x i32> %val, ptr addrspace(3) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds12align8(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds12align8:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v2, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_b64 v[0:1], v2
|
|
; ALIGNED-SDAG-NEXT: ds_read_b32 v2, v2 offset:8
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v3, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b64 v3, v[0:1]
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b32 v3, v2 offset:8
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds12align8:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v2, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read2_b32 v[0:1], v2 offset1:1
|
|
; ALIGNED-GISEL-NEXT: ds_read_b32 v2, v2 offset:8
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v3, s1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-GISEL-NEXT: ds_write2_b32 v3, v0, v1 offset1:1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-GISEL-NEXT: ds_write_b32 v3, v2 offset:8
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-SDAG-LABEL: ds12align8:
|
|
; UNALIGNED-SDAG: ; %bb.0:
|
|
; UNALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-SDAG-NEXT: ds_read_b32 v2, v0 offset:8
|
|
; UNALIGNED-SDAG-NEXT: ds_read_b64 v[0:1], v0
|
|
; UNALIGNED-SDAG-NEXT: v_mov_b32_e32 v3, s1
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; UNALIGNED-SDAG-NEXT: ds_write_b32 v3, v2 offset:8
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; UNALIGNED-SDAG-NEXT: ds_write_b64 v3, v[0:1]
|
|
; UNALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-GISEL-LABEL: ds12align8:
|
|
; UNALIGNED-GISEL: ; %bb.0:
|
|
; UNALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-GISEL-NEXT: ds_read_b96 v[0:2], v0
|
|
; UNALIGNED-GISEL-NEXT: v_mov_b32_e32 v3, s1
|
|
; UNALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-GISEL-NEXT: ds_write_b96 v3, v[0:2]
|
|
; UNALIGNED-GISEL-NEXT: s_endpgm
|
|
%val = load <3 x i32>, ptr addrspace(3) %in, align 8
|
|
store <3 x i32> %val, ptr addrspace(3) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds12align16(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds12align16:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read_b96 v[0:2], v0
|
|
; GCN-NEXT: v_mov_b32_e32 v3, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write_b96 v3, v[0:2]
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load <3 x i32>, ptr addrspace(3) %in, align 16
|
|
store <3 x i32> %val, ptr addrspace(3) %out, align 16
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds16align1(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds16align1:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v2, v0 offset:1
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v3, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v4, v0 offset:3
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v5, v0 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v6, v0 offset:5
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v7, v0 offset:6
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v8, v0 offset:7
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v9, v0 offset:8
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v10, v0 offset:9
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v11, v0 offset:10
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v12, v0 offset:11
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v13, v0 offset:12
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v14, v0 offset:13
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v15, v0 offset:14
|
|
; ALIGNED-SDAG-NEXT: ds_read_u8 v0, v0 offset:15
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v16, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v13 offset:12
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v14 offset:13
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v1
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v2 offset:1
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v5 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v6 offset:5
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v9 offset:8
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v10 offset:9
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v3 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v4 offset:3
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v7 offset:6
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v8 offset:7
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v11 offset:10
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v12 offset:11
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(14)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v15 offset:14
|
|
; ALIGNED-SDAG-NEXT: ds_write_b8 v16, v0 offset:15
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds16align1:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v2, v0 offset:1
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v3, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v4, v0 offset:3
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v5, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v6, v0 offset:5
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v7, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v8, v0 offset:7
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(6)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v1, v2, 8, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v4
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v1, v2, v3, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v2, v6, 8, v5
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v3, 24, v8
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v4, 16, v7
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v2, v3, v4, v2
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v3, v0 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v4, v0 offset:9
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v5, v0 offset:10
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v6, v0 offset:11
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v7, v0 offset:12
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v8, v0 offset:13
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v9, v0 offset:14
|
|
; ALIGNED-GISEL-NEXT: ds_read_u8 v0, v0 offset:15
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(6)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v3, v4, 8, v3
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v4, 24, v6
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v3, v4, v5, v3
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v4, v8, 8, v7
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v0, 24, v0
|
|
; ALIGNED-GISEL-NEXT: v_lshlrev_b32_e32 v5, 16, v9
|
|
; ALIGNED-GISEL-NEXT: v_or3_b32 v0, v0, v5, v4
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v4, 8, v1
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v5, s1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v4 offset:1
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v4, 8
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v6, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v5, v1 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v6 offset:3
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v2 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:5
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v1, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v5, v2 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:7
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v3
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v3 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:9
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v1, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v5, v3 offset:10
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:11
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_e32 v1, 8, v0
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v0 offset:12
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:13
|
|
; ALIGNED-GISEL-NEXT: v_lshrrev_b16_sdwa v1, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8_d16_hi v5, v0 offset:14
|
|
; ALIGNED-GISEL-NEXT: ds_write_b8 v5, v1 offset:15
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds16align1:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b128 v[0:3], v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v4, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b128 v4, v[0:3]
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load <4 x i32>, ptr addrspace(3) %in, align 1
|
|
store <4 x i32> %val, ptr addrspace(3) %out, align 1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds16align2(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-SDAG-LABEL: ds16align2:
|
|
; ALIGNED-SDAG: ; %bb.0:
|
|
; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v2, v0
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v3, v0 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v4, v0 offset:4
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v5, v0 offset:12
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v6, v0 offset:6
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v7, v0 offset:8
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v8, v0 offset:10
|
|
; ALIGNED-SDAG-NEXT: ds_read_u16 v0, v0 offset:14
|
|
; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v1, s1
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v5 offset:12
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v2
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v4 offset:4
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v7 offset:8
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v3 offset:2
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v6 offset:6
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(7)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v8 offset:10
|
|
; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(7)
|
|
; ALIGNED-SDAG-NEXT: ds_write_b16 v1, v0 offset:14
|
|
; ALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; ALIGNED-GISEL-LABEL: ds16align2:
|
|
; ALIGNED-GISEL: ; %bb.0:
|
|
; ALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v1, v0
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v2, v0 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v3, v0 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v4, v0 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v5, v0 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v6, v0 offset:10
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v7, v0 offset:12
|
|
; ALIGNED-GISEL-NEXT: ds_read_u16 v0, v0 offset:14
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(6)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v1, v2, 16, v1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(4)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v2, v4, 16, v3
|
|
; ALIGNED-GISEL-NEXT: v_mov_b32_e32 v4, s1
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(2)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v3, v6, 16, v5
|
|
; ALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v7
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v4, v1
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v4, v1 offset:2
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v4, v2 offset:4
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v4, v2 offset:6
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v4, v3 offset:8
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v4, v3 offset:10
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16 v4, v0 offset:12
|
|
; ALIGNED-GISEL-NEXT: ds_write_b16_d16_hi v4, v0 offset:14
|
|
; ALIGNED-GISEL-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-LABEL: ds16align2:
|
|
; UNALIGNED: ; %bb.0:
|
|
; UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-NEXT: ds_read_b128 v[0:3], v0
|
|
; UNALIGNED-NEXT: v_mov_b32_e32 v4, s1
|
|
; UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-NEXT: ds_write_b128 v4, v[0:3]
|
|
; UNALIGNED-NEXT: s_endpgm
|
|
%val = load <4 x i32>, ptr addrspace(3) %in, align 2
|
|
store <4 x i32> %val, ptr addrspace(3) %out, align 2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds16align4(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; ALIGNED-LABEL: ds16align4:
|
|
; ALIGNED: ; %bb.0:
|
|
; ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; ALIGNED-NEXT: s_waitcnt lgkmcnt(0)
|
|
; ALIGNED-NEXT: v_mov_b32_e32 v2, s0
|
|
; ALIGNED-NEXT: ds_read2_b32 v[0:1], v2 offset1:1
|
|
; ALIGNED-NEXT: ds_read2_b32 v[2:3], v2 offset0:2 offset1:3
|
|
; ALIGNED-NEXT: v_mov_b32_e32 v4, s1
|
|
; ALIGNED-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-NEXT: ds_write2_b32 v4, v0, v1 offset1:1
|
|
; ALIGNED-NEXT: s_waitcnt lgkmcnt(1)
|
|
; ALIGNED-NEXT: ds_write2_b32 v4, v2, v3 offset0:2 offset1:3
|
|
; ALIGNED-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-SDAG-LABEL: ds16align4:
|
|
; UNALIGNED-SDAG: ; %bb.0:
|
|
; UNALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-SDAG-NEXT: v_mov_b32_e32 v2, s0
|
|
; UNALIGNED-SDAG-NEXT: ds_read2_b32 v[0:1], v2 offset0:2 offset1:3
|
|
; UNALIGNED-SDAG-NEXT: ds_read2_b32 v[2:3], v2 offset1:1
|
|
; UNALIGNED-SDAG-NEXT: v_mov_b32_e32 v4, s1
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; UNALIGNED-SDAG-NEXT: ds_write2_b32 v4, v0, v1 offset0:2 offset1:3
|
|
; UNALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(1)
|
|
; UNALIGNED-SDAG-NEXT: ds_write2_b32 v4, v2, v3 offset1:1
|
|
; UNALIGNED-SDAG-NEXT: s_endpgm
|
|
;
|
|
; UNALIGNED-GISEL-LABEL: ds16align4:
|
|
; UNALIGNED-GISEL: ; %bb.0:
|
|
; UNALIGNED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; UNALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-GISEL-NEXT: v_mov_b32_e32 v0, s0
|
|
; UNALIGNED-GISEL-NEXT: ds_read2_b64 v[0:3], v0 offset1:1
|
|
; UNALIGNED-GISEL-NEXT: v_mov_b32_e32 v4, s1
|
|
; UNALIGNED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
|
|
; UNALIGNED-GISEL-NEXT: ds_write2_b64 v4, v[0:1], v[2:3] offset1:1
|
|
; UNALIGNED-GISEL-NEXT: s_endpgm
|
|
%val = load <4 x i32>, ptr addrspace(3) %in, align 4
|
|
store <4 x i32> %val, ptr addrspace(3) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds16align8(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds16align8:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read2_b64 v[0:3], v0 offset1:1
|
|
; GCN-NEXT: v_mov_b32_e32 v4, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write2_b64 v4, v[0:1], v[2:3] offset1:1
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load <4 x i32>, ptr addrspace(3) %in, align 8
|
|
store <4 x i32> %val, ptr addrspace(3) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @ds16align16(ptr addrspace(3) %in, ptr addrspace(3) %out) {
|
|
; GCN-LABEL: ds16align16:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
; GCN-NEXT: ds_read_b128 v[0:3], v0
|
|
; GCN-NEXT: v_mov_b32_e32 v4, s1
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: ds_write_b128 v4, v[0:3]
|
|
; GCN-NEXT: s_endpgm
|
|
%val = load <4 x i32>, ptr addrspace(3) %in, align 16
|
|
store <4 x i32> %val, ptr addrspace(3) %out, align 16
|
|
ret void
|
|
}
|