llvm-project/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i64-add.ll
David Green 8998ff53c9 Revert "[ARM] Allow D-reg copies to use VMOVD with fpregs64"
This reverts commit 0a762ec1b09d96734a3462f8792a5574d089b24d.

Some CPUs enable fp64 by default (such as cortex-m7). When specifying a
single-precision fpu with them like -mfpu=fpv5-sp-d16, the fp64 feature will
be disabled, but fpreg64 will not. We need to disable them both correctly under
clang in order for the backend to be able to use the reliably. In the meantime
this reverts 0a762ec1b09d96734 until that issue is fixed.
2023-06-01 17:49:25 +01:00

165 lines
6.4 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+mve -o - --verify-machineinstrs | FileCheck %s
target triple = "thumbv8.1m.main-none-none-eabi"
; Expected to not transform
define arm_aapcs_vfpcc <2 x i64> @complex_add_v2i64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: complex_add_v2i64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: vmov r0, r1, d0
; CHECK-NEXT: vmov r2, r3, d3
; CHECK-NEXT: adds.w lr, r2, r0
; CHECK-NEXT: adc.w r12, r3, r1
; CHECK-NEXT: vmov r2, r3, d1
; CHECK-NEXT: vmov r1, r0, d2
; CHECK-NEXT: subs r1, r1, r2
; CHECK-NEXT: vmov q0[2], q0[0], r1, lr
; CHECK-NEXT: sbcs r0, r3
; CHECK-NEXT: vmov q0[3], q0[1], r0, r12
; CHECK-NEXT: pop {r7, pc}
entry:
%a.real = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <1 x i32> <i32 0>
%a.imag = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <1 x i32> <i32 1>
%b.real = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <1 x i32> <i32 0>
%b.imag = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <1 x i32> <i32 1>
%0 = sub <1 x i64> %b.real, %a.imag
%1 = add <1 x i64> %b.imag, %a.real
%interleaved.vec = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
ret <2 x i64> %interleaved.vec
}
; Expected to not transform
define arm_aapcs_vfpcc <4 x i64> @complex_add_v4i64(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: complex_add_v4i64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: vmov q4, q1
; CHECK-NEXT: vmov r2, r3, d7
; CHECK-NEXT: vmov r0, r1, d8
; CHECK-NEXT: adds.w lr, r2, r0
; CHECK-NEXT: adc.w r12, r3, r1
; CHECK-NEXT: vmov r2, r3, d0
; CHECK-NEXT: vmov r1, r0, d5
; CHECK-NEXT: adds r1, r1, r2
; CHECK-NEXT: adcs r0, r3
; CHECK-NEXT: vmov q1[2], q1[0], r1, lr
; CHECK-NEXT: vmov q1[3], q1[1], r0, r12
; CHECK-NEXT: vmov r0, r1, d9
; CHECK-NEXT: vmov r2, r3, d6
; CHECK-NEXT: subs.w lr, r2, r0
; CHECK-NEXT: sbc.w r12, r3, r1
; CHECK-NEXT: vmov r2, r3, d1
; CHECK-NEXT: vmov r1, r0, d4
; CHECK-NEXT: vmov.f32 s2, s4
; CHECK-NEXT: vmov.f32 s3, s5
; CHECK-NEXT: subs r1, r1, r2
; CHECK-NEXT: vmov q2[2], q2[0], r1, lr
; CHECK-NEXT: sbcs r0, r3
; CHECK-NEXT: vmov q2[3], q2[1], r0, r12
; CHECK-NEXT: vmov.f32 s0, s8
; CHECK-NEXT: vmov.f32 s4, s10
; CHECK-NEXT: vmov.f32 s1, s9
; CHECK-NEXT: vmov.f32 s5, s11
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r7, pc}
entry:
%a.real = shufflevector <4 x i64> %a, <4 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
%a.imag = shufflevector <4 x i64> %a, <4 x i64> zeroinitializer, <2 x i32> <i32 1, i32 3>
%b.real = shufflevector <4 x i64> %b, <4 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
%b.imag = shufflevector <4 x i64> %b, <4 x i64> zeroinitializer, <2 x i32> <i32 1, i32 3>
%0 = sub <2 x i64> %b.real, %a.imag
%1 = add <2 x i64> %b.imag, %a.real
%interleaved.vec = shufflevector <2 x i64> %0, <2 x i64> %1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
ret <4 x i64> %interleaved.vec
}
; Expected to not transform
define arm_aapcs_vfpcc <8 x i64> @complex_add_v8i64(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: complex_add_v8i64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: add r2, sp, #72
; CHECK-NEXT: vmov q4, q1
; CHECK-NEXT: vldrw.u32 q5, [r2]
; CHECK-NEXT: vmov r0, r1, d8
; CHECK-NEXT: vmov r2, r3, d11
; CHECK-NEXT: adds.w lr, r2, r0
; CHECK-NEXT: adc.w r12, r3, r1
; CHECK-NEXT: add r1, sp, #56
; CHECK-NEXT: vldrw.u32 q6, [r1]
; CHECK-NEXT: vmov r2, r3, d0
; CHECK-NEXT: vmov r1, r0, d13
; CHECK-NEXT: adds r1, r1, r2
; CHECK-NEXT: adcs r0, r3
; CHECK-NEXT: vmov q1[2], q1[0], r1, lr
; CHECK-NEXT: vmov q1[3], q1[1], r0, r12
; CHECK-NEXT: vmov r0, r1, d9
; CHECK-NEXT: vmov r2, r3, d10
; CHECK-NEXT: subs.w lr, r2, r0
; CHECK-NEXT: sbc.w r12, r3, r1
; CHECK-NEXT: vmov r2, r3, d1
; CHECK-NEXT: vmov r1, r0, d12
; CHECK-NEXT: vmov.f32 s2, s4
; CHECK-NEXT: vmov.f32 s3, s5
; CHECK-NEXT: subs r1, r1, r2
; CHECK-NEXT: add r2, sp, #104
; CHECK-NEXT: vldrw.u32 q5, [r2]
; CHECK-NEXT: sbcs r0, r3
; CHECK-NEXT: vmov q4[2], q4[0], r1, lr
; CHECK-NEXT: vmov q4[3], q4[1], r0, r12
; CHECK-NEXT: vmov r0, r1, d6
; CHECK-NEXT: vmov r2, r3, d11
; CHECK-NEXT: vmov.f32 s0, s16
; CHECK-NEXT: vmov.f32 s4, s18
; CHECK-NEXT: vmov.f32 s1, s17
; CHECK-NEXT: vmov.f32 s5, s19
; CHECK-NEXT: adds.w lr, r2, r0
; CHECK-NEXT: adc.w r12, r3, r1
; CHECK-NEXT: add r1, sp, #88
; CHECK-NEXT: vldrw.u32 q6, [r1]
; CHECK-NEXT: vmov r2, r3, d4
; CHECK-NEXT: vmov r1, r0, d13
; CHECK-NEXT: adds r1, r1, r2
; CHECK-NEXT: adcs r0, r3
; CHECK-NEXT: vmov q4[2], q4[0], r1, lr
; CHECK-NEXT: vmov q4[3], q4[1], r0, r12
; CHECK-NEXT: vmov r0, r1, d7
; CHECK-NEXT: vmov r2, r3, d10
; CHECK-NEXT: subs.w lr, r2, r0
; CHECK-NEXT: sbc.w r12, r3, r1
; CHECK-NEXT: vmov r2, r3, d5
; CHECK-NEXT: vmov r1, r0, d12
; CHECK-NEXT: vmov.f32 s10, s16
; CHECK-NEXT: vmov.f32 s11, s17
; CHECK-NEXT: subs r1, r1, r2
; CHECK-NEXT: vmov q3[2], q3[0], r1, lr
; CHECK-NEXT: sbcs r0, r3
; CHECK-NEXT: vmov q3[3], q3[1], r0, r12
; CHECK-NEXT: vmov.f32 s16, s14
; CHECK-NEXT: vmov.f32 s8, s12
; CHECK-NEXT: vmov.f32 s17, s15
; CHECK-NEXT: vmov.f32 s9, s13
; CHECK-NEXT: vmov q3, q4
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: pop {r7, pc}
entry:
%a.real = shufflevector <8 x i64> %a, <8 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%a.imag = shufflevector <8 x i64> %a, <8 x i64> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
%b.real = shufflevector <8 x i64> %b, <8 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%b.imag = shufflevector <8 x i64> %b, <8 x i64> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
%0 = sub <4 x i64> %b.real, %a.imag
%1 = add <4 x i64> %b.imag, %a.real
%interleaved.vec = shufflevector <4 x i64> %0, <4 x i64> %1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
ret <8 x i64> %interleaved.vec
}