Ruiling, Song 0487db1f13
MachineScheduler: Improve instruction clustering (#137784)
The existing way of managing clustered nodes was done through adding
weak edges between the neighbouring cluster nodes, which is a sort of
ordered queue. And this will be later recorded as `NextClusterPred` or
`NextClusterSucc` in `ScheduleDAGMI`.

But actually the instruction may be picked not in the exact order of the
queue. For example, we have a queue of cluster nodes A B C. But during
scheduling, node B might be picked first, then it will be very likely
that we only cluster B and C for Top-Down scheduling (leaving A alone).

Another issue is:
```
   if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum)
      std::swap(SUa, SUb);
   if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
```
may break the cluster queue.

For example, we want to cluster nodes (order as in `MemOpRecords`): 1 3
2. 1(SUa) will be pred of 3(SUb) normally. But when it comes to (3, 2),
As 3(SUa) > 2(SUb), we would reorder the two nodes, which makes 2 be
pred of 3. This makes both 1 and 2 become preds of 3, but there is no
edge between 1 and 2. Thus we get a broken cluster chain.

To fix both issues, we introduce an unordered set in the change. This
could help improve clustering in some hard case.

One key reason the change causes so many test check changes is: As the
cluster candidates are not ordered now, the candidates might be picked
in different order from before.

The most affected targets are: AMDGPU, AArch64, RISCV.

For RISCV, it seems to me most are just minor instruction reorder, don't
see obvious regression.

For AArch64, there were some combining of ldr into ldp being affected.
With two cases being regressed and two being improved. This has more
deeper reason that machine scheduler cannot cluster them well both
before and after the change, and the load combine algorithm later is
also not smart enough.

For AMDGPU, some cases have more v_dual instructions used while some are
regressed. It seems less critical. Seems like test `v_vselect_v32bf16`
gets more buffer_load being claused.
2025-06-05 15:28:04 +08:00

1306 lines
47 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GFX6 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s
define amdgpu_kernel void @or_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
; GFX6-LABEL: or_v2i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v1, v1, v3
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: or_v2i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: or_v2i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 3, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT * T0.Y, T0.Y, T0.W,
; EG-NEXT: OR_INT T0.X, T0.X, T0.Z,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%b_ptr = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 1
%a = load <2 x i32>, ptr addrspace(1) %in
%b = load <2 x i32>, ptr addrspace(1) %b_ptr
%result = or <2 x i32> %a, %b
store <2 x i32> %result, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @or_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
; GFX6-LABEL: or_v4i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
; GFX6-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v3, v3, v7
; GFX6-NEXT: v_or_b32_e32 v2, v2, v6
; GFX6-NEXT: v_or_b32_e32 v1, v1, v5
; GFX6-NEXT: v_or_b32_e32 v0, v0, v4
; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: or_v4i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
; GFX8-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v3, v3, v7
; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
; GFX8-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: or_v4i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 1 @6
; EG-NEXT: ALU 5, @11, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_128 T1.XYZW, T0.X, 16, #1
; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 10:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 11:
; EG-NEXT: OR_INT * T0.W, T0.W, T1.W,
; EG-NEXT: OR_INT * T0.Z, T0.Z, T1.Z,
; EG-NEXT: OR_INT * T0.Y, T0.Y, T1.Y,
; EG-NEXT: OR_INT T0.X, T0.X, T1.X,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%b_ptr = getelementptr <4 x i32>, ptr addrspace(1) %in, i32 1
%a = load <4 x i32>, ptr addrspace(1) %in
%b = load <4 x i32>, ptr addrspace(1) %b_ptr
%result = or <4 x i32> %a, %b
store <4 x i32> %result, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @scalar_or_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
; GFX6-LABEL: scalar_or_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_or_b32 s0, s2, s3
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: v_mov_b32_e32 v0, s0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_or_b32 s0, s2, s3
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; EG-NEXT: OR_INT * T1.X, KC0[2].Z, KC0[2].W,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%or = or i32 %a, %b
store i32 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @vector_or_i32(ptr addrspace(1) %out, ptr addrspace(1) %a, i32 %b) {
; GFX6-LABEL: vector_or_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_load_dword s12, s[4:5], 0xd
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, s12, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dword s12, s[4:5], 0x34
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, s12, v0
; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 2, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT T0.X, T0.X, KC0[2].W,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%loada = load i32, ptr addrspace(1) %a
%or = or i32 %loada, %b
store i32 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @scalar_or_literal_i32(ptr addrspace(1) %out, i32 %a) {
; GFX6-LABEL: scalar_or_literal_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dword s6, s[4:5], 0xb
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_or_b32 s4, s6, 0x1869f
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_literal_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s3, 0xf000
; GFX8-NEXT: s_mov_b32 s2, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_or_b32 s4, s6, 0x1869f
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_literal_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; EG-NEXT: OR_INT * T1.X, KC0[2].Z, literal.y,
; EG-NEXT: 2(2.802597e-45), 99999(1.401284e-40)
%or = or i32 %a, 99999
store i32 %or, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @scalar_or_literal_i64(ptr addrspace(1) %out, [8 x i32], i64 %a) {
; GFX6-LABEL: scalar_or_literal_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x13
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_or_b32 s4, s7, 0xf237b
; GFX6-NEXT: s_or_b32 s5, s6, 0x3039
; GFX6-NEXT: v_mov_b32_e32 v0, s5
; GFX6-NEXT: v_mov_b32_e32 v1, s4
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_literal_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x4c
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s3, 0xf000
; GFX8-NEXT: s_mov_b32 s2, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_or_b32 s4, s7, 0xf237b
; GFX8-NEXT: s_or_b32 s5, s6, 0x3039
; GFX8-NEXT: v_mov_b32_e32 v0, s5
; GFX8-NEXT: v_mov_b32_e32 v1, s4
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_literal_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 4, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: OR_INT * T0.Y, KC0[5].X, literal.x,
; EG-NEXT: 992123(1.390260e-39), 0(0.000000e+00)
; EG-NEXT: OR_INT T0.X, KC0[4].W, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: 12345(1.729903e-41), 2(2.802597e-45)
%or = or i64 %a, 4261135838621753
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @scalar_or_literal_multi_use_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
; GFX6-LABEL: scalar_or_literal_multi_use_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x13
; GFX6-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x1d
; GFX6-NEXT: s_movk_i32 s8, 0x3039
; GFX6-NEXT: s_mov_b32 s9, 0xf237b
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX6-NEXT: v_mov_b32_e32 v0, s6
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v1, s7
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_add_u32 s0, s4, 0x3039
; GFX6-NEXT: s_addc_u32 s1, s5, 0xf237b
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, s0
; GFX6-NEXT: v_mov_b32_e32 v1, s1
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_literal_multi_use_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x4c
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x74
; GFX8-NEXT: s_movk_i32 s8, 0x3039
; GFX8-NEXT: s_mov_b32 s9, 0xf237b
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_mov_b32 s3, 0xf000
; GFX8-NEXT: s_mov_b32 s2, -1
; GFX8-NEXT: v_mov_b32_e32 v1, s7
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX8-NEXT: s_add_u32 s0, s4, 0x3039
; GFX8-NEXT: s_addc_u32 s1, s5, 0xf237b
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_literal_multi_use_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 12, @6, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T3.XY, T4.X, 0
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T2.X, 0
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T2.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 6:
; EG-NEXT: ADDC_UINT * T0.W, KC0[7].Y, literal.x,
; EG-NEXT: 12345(1.729903e-41), 0(0.000000e+00)
; EG-NEXT: ADD_INT T0.X, KC0[7].Y, literal.x,
; EG-NEXT: ADD_INT * T0.W, KC0[7].Z, PV.W,
; EG-NEXT: 12345(1.729903e-41), 0(0.000000e+00)
; EG-NEXT: ADD_INT T1.X, PV.W, literal.x,
; EG-NEXT: MOV * T2.X, literal.y,
; EG-NEXT: 992123(1.390260e-39), 0(0.000000e+00)
; EG-NEXT: OR_INT * T3.Y, KC0[5].X, literal.x,
; EG-NEXT: 992123(1.390260e-39), 0(0.000000e+00)
; EG-NEXT: OR_INT T3.X, KC0[4].W, literal.x,
; EG-NEXT: LSHR * T4.X, KC0[2].Y, literal.y,
; EG-NEXT: 12345(1.729903e-41), 2(2.802597e-45)
%or = or i64 %a, 4261135838621753
store i64 %or, ptr addrspace(1) %out
%foo = add i64 %b, 4261135838621753
store volatile i64 %foo, ptr addrspace(1) poison
ret void
}
define amdgpu_kernel void @scalar_or_inline_imm_i64(ptr addrspace(1) %out, [8 x i32], i64 %a) {
; GFX6-LABEL: scalar_or_inline_imm_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x13
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_or_b32 s4, s6, 63
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s7
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_inline_imm_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x4c
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s3, 0xf000
; GFX8-NEXT: s_mov_b32 s2, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_or_b32 s4, s6, 63
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s7
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_inline_imm_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: MOV * T0.Y, KC0[5].X,
; EG-NEXT: OR_INT T0.X, KC0[4].W, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: 63(8.828180e-44), 2(2.802597e-45)
%or = or i64 %a, 63
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @scalar_or_inline_imm_multi_use_i64(ptr addrspace(1) %out, i64 %a, i64 %b) {
; GFX6-LABEL: scalar_or_inline_imm_multi_use_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_or_b32 s0, s2, 63
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: v_mov_b32_e32 v0, s0
; GFX6-NEXT: v_mov_b32_e32 v1, s3
; GFX6-NEXT: s_add_u32 s0, s8, 63
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_addc_u32 s1, s9, 0
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, s0
; GFX6-NEXT: v_mov_b32_e32 v1, s1
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_inline_imm_multi_use_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_or_b32 s0, s2, 63
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s3
; GFX8-NEXT: s_add_u32 s0, s8, 63
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_addc_u32 s1, s9, 0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_inline_imm_multi_use_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 9, @6, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T3.XY, T4.X, 0
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T2.X, 0
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T2.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 6:
; EG-NEXT: ADD_INT T0.X, KC0[3].Y, literal.x,
; EG-NEXT: ADDC_UINT * T0.W, KC0[3].Y, literal.x,
; EG-NEXT: 63(8.828180e-44), 0(0.000000e+00)
; EG-NEXT: ADD_INT T1.X, KC0[3].Z, PV.W,
; EG-NEXT: MOV * T2.X, literal.x,
; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
; EG-NEXT: MOV * T3.Y, KC0[3].X,
; EG-NEXT: OR_INT T3.X, KC0[2].W, literal.x,
; EG-NEXT: LSHR * T4.X, KC0[2].Y, literal.y,
; EG-NEXT: 63(8.828180e-44), 2(2.802597e-45)
%or = or i64 %a, 63
store i64 %or, ptr addrspace(1) %out
%foo = add i64 %b, 63
store volatile i64 %foo, ptr addrspace(1) poison
ret void
}
define amdgpu_kernel void @scalar_or_neg_inline_imm_i64(ptr addrspace(1) %out, [8 x i32], i64 %a) {
; GFX6-LABEL: scalar_or_neg_inline_imm_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dword s6, s[4:5], 0x13
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v1, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_or_b32 s4, s6, -8
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_neg_inline_imm_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x4c
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s3, 0xf000
; GFX8-NEXT: s_mov_b32 s2, -1
; GFX8-NEXT: v_mov_b32_e32 v1, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_or_b32 s4, s6, -8
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_neg_inline_imm_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 4, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: OR_INT T0.X, KC0[4].W, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: -8(nan), 2(2.802597e-45)
; EG-NEXT: MOV * T0.Y, literal.x,
; EG-NEXT: -1(nan), 0(0.000000e+00)
%or = or i64 %a, -8
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @vector_or_literal_i32(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX6-LABEL: vector_or_literal_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, 0xffff, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_literal_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, 0xffff, v0
; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_literal_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 2, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT T0.X, T0.X, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: 65535(9.183409e-41), 2(2.802597e-45)
%loada = load i32, ptr addrspace(1) %a, align 4
%or = or i32 %loada, 65535
store i32 %or, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @vector_or_inline_immediate_i32(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX6-LABEL: vector_or_inline_immediate_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, 4, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_inline_immediate_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, 4, v0
; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_inline_immediate_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 2, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT T0.X, T0.X, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: 4(5.605194e-45), 2(2.802597e-45)
%loada = load i32, ptr addrspace(1) %a, align 4
%or = or i32 %loada, 4
store i32 %or, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @scalar_or_i64(ptr addrspace(1) %out, i64 %a, i64 %b) {
; GFX6-LABEL: scalar_or_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
; GFX6-NEXT: v_mov_b32_e32 v0, s0
; GFX6-NEXT: v_mov_b32_e32 v1, s1
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_or_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: OR_INT * T0.Y, KC0[3].X, KC0[3].Z,
; EG-NEXT: OR_INT * T0.X, KC0[2].W, KC0[3].Y,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%or = or i64 %a, %b
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @vector_or_i64(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX6-LABEL: vector_or_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s12, s2
; GFX6-NEXT: s_mov_b32 s13, s3
; GFX6-NEXT: s_mov_b32 s14, s6
; GFX6-NEXT: s_mov_b32 s15, s7
; GFX6-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX6-NEXT: buffer_load_dwordx2 v[2:3], off, s[12:15], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s12, s2
; GFX8-NEXT: s_mov_b32 s13, s3
; GFX8-NEXT: s_mov_b32 s14, s6
; GFX8-NEXT: s_mov_b32 s15, s7
; GFX8-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX8-NEXT: buffer_load_dwordx2 v[2:3], off, s[12:15], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 1 @6
; EG-NEXT: ALU 3, @12, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_64 T1.XY, T1.X, 0, #1
; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 10:
; EG-NEXT: MOV T0.X, KC0[2].Z,
; EG-NEXT: MOV * T1.X, KC0[2].W,
; EG-NEXT: ALU clause starting at 12:
; EG-NEXT: OR_INT * T0.Y, T0.Y, T1.Y,
; EG-NEXT: OR_INT T0.X, T0.X, T1.X,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%loada = load i64, ptr addrspace(1) %a, align 8
%loadb = load i64, ptr addrspace(1) %b, align 8
%or = or i64 %loada, %loadb
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @scalar_vector_or_i64(ptr addrspace(1) %out, ptr addrspace(1) %a, i64 %b) {
; GFX6-LABEL: scalar_vector_or_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0xd
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, s12, v0
; GFX6-NEXT: v_or_b32_e32 v1, s13, v1
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_vector_or_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x34
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, s12, v0
; GFX8-NEXT: v_or_b32_e32 v1, s13, v1
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: scalar_vector_or_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 3, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT * T0.Y, T0.Y, KC0[3].X,
; EG-NEXT: OR_INT T0.X, T0.X, KC0[2].W,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%loada = load i64, ptr addrspace(1) %a
%or = or i64 %loada, %b
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @vector_or_i64_loadimm(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX6-LABEL: vector_or_i64_loadimm:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v1, 0x146f, v1
; GFX6-NEXT: v_or_b32_e32 v0, 0xdf77987f, v0
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_i64_loadimm:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v1, 0x146f, v1
; GFX8-NEXT: v_or_b32_e32 v0, 0xdf77987f, v0
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_i64_loadimm:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 4, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT * T0.Y, T0.Y, literal.x,
; EG-NEXT: 5231(7.330192e-42), 0(0.000000e+00)
; EG-NEXT: OR_INT T0.X, T0.X, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: -545810305(-1.784115e+19), 2(2.802597e-45)
%loada = load i64, ptr addrspace(1) %a, align 8
%or = or i64 %loada, 22470723082367
store i64 %or, ptr addrspace(1) %out
ret void
}
; FIXME: The or 0 should really be removed.
define amdgpu_kernel void @vector_or_i64_imm(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX6-LABEL: vector_or_i64_imm:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, 8, v0
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_i64_imm:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, 8, v0
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_i64_imm:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 2, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT T0.X, T0.X, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45)
%loada = load i64, ptr addrspace(1) %a, align 8
%or = or i64 %loada, 8
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @vector_or_i64_neg_inline_imm(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX6-LABEL: vector_or_i64_neg_inline_imm:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: v_mov_b32_e32 v1, -1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, -8, v0
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_i64_neg_inline_imm:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: v_mov_b32_e32 v1, -1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, -8, v0
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_i64_neg_inline_imm:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 4, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT T0.X, T0.X, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: -8(nan), 2(2.802597e-45)
; EG-NEXT: MOV * T0.Y, literal.x,
; EG-NEXT: -1(nan), 0(0.000000e+00)
%loada = load i64, ptr addrspace(1) %a, align 8
%or = or i64 %loada, -8
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @vector_or_i64_neg_literal(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX6-LABEL: vector_or_i64_neg_literal:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s8, s2
; GFX6-NEXT: s_mov_b32 s9, s3
; GFX6-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: v_mov_b32_e32 v1, -1
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_or_b32_e32 v0, 0xffffff38, v0
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: vector_or_i64_neg_literal:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s8, s2
; GFX8-NEXT: s_mov_b32 s9, s3
; GFX8-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: v_mov_b32_e32 v1, -1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_or_b32_e32 v0, 0xffffff38, v0
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: vector_or_i64_neg_literal:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
; EG-NEXT: ALU 4, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
; EG-NEXT: OR_INT T0.X, T0.X, literal.x,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
; EG-NEXT: -200(nan), 2(2.802597e-45)
; EG-NEXT: MOV * T0.Y, literal.x,
; EG-NEXT: -1(nan), 0(0.000000e+00)
%loada = load i64, ptr addrspace(1) %a, align 8
%or = or i64 %loada, -200
store i64 %or, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @trunc_i64_or_to_i32(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
; GFX6-LABEL: trunc_i64_or_to_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dword s6, s[4:5], 0x13
; GFX6-NEXT: s_load_dword s7, s[4:5], 0x1d
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_or_b32 s4, s7, s6
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: trunc_i64_or_to_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x4c
; GFX8-NEXT: s_load_dword s7, s[4:5], 0x74
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s3, 0xf000
; GFX8-NEXT: s_mov_b32 s2, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_or_b32 s4, s7, s6
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: trunc_i64_or_to_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
; EG-NEXT: OR_INT * T1.X, KC0[7].Y, KC0[4].W,
%add = or i64 %b, %a
%trunc = trunc i64 %add to i32
store i32 %trunc, ptr addrspace(1) %out, align 8
ret void
}
define amdgpu_kernel void @or_i1(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
; GFX6-LABEL: or_i1:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s10, s6
; GFX6-NEXT: s_mov_b32 s11, s7
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_mov_b32 s12, s2
; GFX6-NEXT: s_mov_b32 s13, s3
; GFX6-NEXT: s_mov_b32 s14, s6
; GFX6-NEXT: s_mov_b32 s15, s7
; GFX6-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX6-NEXT: buffer_load_dword v1, off, s[12:15], 0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: s_waitcnt vmcnt(1)
; GFX6-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX6-NEXT: v_max_f32_e32 v0, v1, v0
; GFX6-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: or_i1:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s10, s6
; GFX8-NEXT: s_mov_b32 s11, s7
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s12, s2
; GFX8-NEXT: s_mov_b32 s13, s3
; GFX8-NEXT: s_mov_b32 s14, s6
; GFX8-NEXT: s_mov_b32 s15, s7
; GFX8-NEXT: buffer_load_dword v0, off, s[8:11], 0
; GFX8-NEXT: buffer_load_dword v1, off, s[12:15], 0
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: s_waitcnt vmcnt(1)
; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX8-NEXT: v_max_f32_e32 v0, v1, v0
; GFX8-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: or_i1:
; EG: ; %bb.0:
; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 1 @6
; EG-NEXT: ALU 4, @12, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
; EG-NEXT: VTX_READ_32 T1.X, T1.X, 0, #1
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 10:
; EG-NEXT: MOV T0.X, KC0[2].Z,
; EG-NEXT: MOV * T1.X, KC0[2].W,
; EG-NEXT: ALU clause starting at 12:
; EG-NEXT: MAX_DX10 * T0.W, T0.X, T1.X,
; EG-NEXT: SETGE_DX10 * T0.W, PV.W, 0.0,
; EG-NEXT: AND_INT T0.X, PV.W, 1,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%a = load float, ptr addrspace(1) %in0
%b = load float, ptr addrspace(1) %in1
%acmp = fcmp oge float %a, 0.000000e+00
%bcmp = fcmp oge float %b, 0.000000e+00
%or = or i1 %acmp, %bcmp
%result = zext i1 %or to i32
store i32 %result, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @s_or_i1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
; GFX6-LABEL: s_or_i1:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
; GFX6-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_cmp_eq_u32 s0, s1
; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX6-NEXT: s_cmp_eq_u32 s2, s3
; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX6-NEXT: buffer_store_byte v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_or_i1:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_cmp_eq_u32 s0, s1
; GFX8-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX8-NEXT: s_cmp_eq_u32 s2, s3
; GFX8-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX8-NEXT: buffer_store_byte v0, off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
; EG-LABEL: s_or_i1:
; EG: ; %bb.0:
; EG-NEXT: ALU 14, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: SETE_INT T0.W, KC0[3].X, KC0[3].Y,
; EG-NEXT: SETE_INT * T1.W, KC0[2].Z, KC0[2].W,
; EG-NEXT: AND_INT T2.W, KC0[2].Y, literal.x,
; EG-NEXT: OR_INT * T0.W, PS, PV.W,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PS, 1,
; EG-NEXT: LSHL * T1.W, PV.W, literal.x,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
; EG-NEXT: MOV T0.Y, 0.0,
; EG-NEXT: MOV * T0.Z, 0.0,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%cmp0 = icmp eq i32 %a, %b
%cmp1 = icmp eq i32 %c, %d
%or = or i1 %cmp0, %cmp1
store i1 %or, ptr addrspace(1) %out
ret void
}