llvm-project/llvm/test/CodeGen/X86/SwizzleShuff.ll
Noah Goldstein 69a322fed1 Add new pass X86FixupInstTuning for fixing up machine-instruction selection.
There are a variety of cases where we want more control over the exact
instruction emitted. This commit creates a new pass to fixup
instructions after the DAG has been lowered. The pass is only meant to
replace instructions that are guranteed to be interchangable, not to
do analysis for special cases.

Handling these instruction changes in in X86ISelLowering of
X86ISelDAGToDAG isn't ideal, as its liable to either break existing
patterns that expected a certain instruction or generate infinite
loops.

As well, operating as the MachineInstruction level allows us to access
scheduling/code size information for making the decisions.

Currently only implements `{v}permilps` -> `{v}shufps/{v}shufd` but
more transforms can be added.

Differential Revision: https://reviews.llvm.org/D143787
2023-02-27 18:53:25 -06:00

78 lines
2.6 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
; Check that we perform a scalar XOR on i32.
define void @pull_bitcast(ptr %pA, ptr %pB) {
; CHECK-LABEL: pull_bitcast:
; CHECK: # %bb.0:
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: xorl %eax, (%rdi)
; CHECK-NEXT: retq
%A = load <4 x i8>, ptr %pA
%B = load <4 x i8>, ptr %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, ptr %pA
ret void
}
define <4 x i32> @multi_use_swizzle(ptr %pA, ptr %pB) {
; CHECK-LABEL: multi_use_swizzle:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],mem[1,2]
; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3,2,2]
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,1,0,2]
; CHECK-NEXT: vxorps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%A = load <4 x i32>, ptr %pA
%B = load <4 x i32>, ptr %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 6>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 2>
%S2 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 2>
%R = xor <4 x i32> %S1, %S2
ret <4 x i32> %R
}
define <4 x i8> @pull_bitcast2(ptr %pA, ptr %pB, ptr %pC) {
; CHECK-LABEL: pull_bitcast2:
; CHECK: # %bb.0:
; CHECK-NEXT: movl (%rdi), %eax
; CHECK-NEXT: movl %eax, (%rdx)
; CHECK-NEXT: xorl (%rsi), %eax
; CHECK-NEXT: vmovd %eax, %xmm0
; CHECK-NEXT: movl %eax, (%rdi)
; CHECK-NEXT: retq
%A = load <4 x i8>, ptr %pA
store <4 x i8> %A, ptr %pC
%B = load <4 x i8>, ptr %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, ptr %pA
ret <4 x i8> %C
}
define <4 x i32> @reverse_1(ptr %pA, ptr %pB) {
; CHECK-LABEL: reverse_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0
; CHECK-NEXT: retq
%A = load <4 x i32>, ptr %pA
%B = load <4 x i32>, ptr %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i32> %S1
}
define <4 x i32> @no_reverse_shuff(ptr %pA, ptr %pB) {
; CHECK-LABEL: no_reverse_shuff:
; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; CHECK-NEXT: retq
%A = load <4 x i32>, ptr %pA
%B = load <4 x i32>, ptr %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 2>
ret <4 x i32> %S1
}