Matt Arsenault 58a88001f3
PeepholeOpt: Fix looking for def of current copy to coalesce (#125533)
This fixes the handling of subregister extract copies. This
will allow AMDGPU to remove its implementation of
shouldRewriteCopySrc, which exists as a 10 year old workaround
to this bug. peephole-opt-fold-reg-sequence-subreg.mir will
show the expected improvement once the custom implementation
is removed.

The copy coalescing processing here is overly abstracted
from what's actually happening. Previously when visiting
coalescable copy-like instructions, we would parse the
sources one at a time and then pass the def of the root
instruction into findNextSource. This means that the
first thing the new ValueTracker constructed would do
is getVRegDef to find the instruction we are currently
processing. This adds an unnecessary step, placing
a useless entry in the RewriteMap, and required skipping
the no-op case where getNewSource would return the original
source operand. This was a problem since in the case
of a subregister extract, shouldRewriteCopySource would always
say that it is useful to rewrite and the use-def chain walk
would abort, returning the original operand. Move the process
to start looking at the source operand to begin with.

This does not fix the confused handling in the uncoalescable
copy case which is proving to be more difficult. Some currently
handled cases have multiple defs from a single source, and other
handled cases have 0 input operands. It would be simpler if
this was implemented with isCopyLikeInstr, rather than guessing
at the operand structure as it does now.

There are some improvements and some regressions. The
regressions appear to be downstream issues for the most part. One
of the uglier regressions is in PPC, where a sequence of insert_subrgs
is used to build registers. I opened #125502 to use reg_sequence instead,
which may help.

The worst regression is an absurd SPARC testcase using a <251 x fp128>,
which uses a very long chain of insert_subregs.

We need improved subregister handling locally in PeepholeOptimizer,
and other pasess like MachineCSE to fix some of the other regressions.
We should handle subregister composes and folding more indexes
into insert_subreg and reg_sequence.
2025-02-05 23:29:02 +07:00

866 lines
32 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=armv7a-eabi -mattr=+neon -float-abi=hard %s -o - | FileCheck %s
define <8 x i8> @vmuli8(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmuli8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.i8 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = mul <8 x i8> %A, %B
ret <8 x i8> %tmp3
}
define <4 x i16> @vmuli16(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: vmuli16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.i16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = mul <4 x i16> %A, %B
ret <4 x i16> %tmp3
}
define <2 x i32> @vmuli32(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: vmuli32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.i32 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = mul <2 x i32> %A, %B
ret <2 x i32> %tmp3
}
define <2 x float> @vmulf32(<2 x float> %A, <2 x float> %B) nounwind {
; CHECK-LABEL: vmulf32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.f32 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fmul <2 x float> %A, %B
ret <2 x float> %tmp3
}
define <8 x i8> @vmulp8(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmulp8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.p8 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %A, <8 x i8> %B)
ret <8 x i8> %tmp3
}
define <16 x i8> @vmulQi8(<16 x i8> %A, <16 x i8> %B) nounwind {
; CHECK-LABEL: vmulQi8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.i8 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = mul <16 x i8> %A, %B
ret <16 x i8> %tmp3
}
define <8 x i16> @vmulQi16(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-LABEL: vmulQi16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.i16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = mul <8 x i16> %A, %B
ret <8 x i16> %tmp3
}
define <4 x i32> @vmulQi32(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-LABEL: vmulQi32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.i32 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = mul <4 x i32> %A, %B
ret <4 x i32> %tmp3
}
define <4 x float> @vmulQf32(<4 x float> %A, <4 x float> %B) nounwind {
; CHECK-LABEL: vmulQf32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.f32 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fmul <4 x float> %A, %B
ret <4 x float> %tmp3
}
define <16 x i8> @vmulQp8(<16 x i8> %A, <16 x i8> %B) nounwind {
; CHECK-LABEL: vmulQp8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmul.p8 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %A, <16 x i8> %B)
ret <16 x i8> %tmp3
}
declare <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
declare <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
define arm_aapcs_vfpcc <2 x float> @test_vmul_lanef32(<2 x float> %arg0_float32x2_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmul_lanef32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmul.f32 d0, d0, d1[0]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <2 x i32> zeroinitializer ; <<2 x float>> [#uses=1]
%1 = fmul <2 x float> %0, %arg0_float32x2_t ; <<2 x float>> [#uses=1]
ret <2 x float> %1
}
define arm_aapcs_vfpcc <4 x i16> @test_vmul_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
; CHECK-LABEL: test_vmul_lanes16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmul.i16 d0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses$
%1 = mul <4 x i16> %0, %arg0_int16x4_t ; <<4 x i16>> [#uses=1]
ret <4 x i16> %1
}
define arm_aapcs_vfpcc <2 x i32> @test_vmul_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmul_lanes32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmul.i32 d0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
%1 = mul <2 x i32> %0, %arg0_int32x2_t ; <<2 x i32>> [#uses=1]
ret <2 x i32> %1
}
define arm_aapcs_vfpcc <4 x float> @test_vmulQ_lanef32(<4 x float> %arg0_float32x4_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmulQ_lanef32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmul.f32 q0, q0, d2[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>$
%1 = fmul <4 x float> %0, %arg0_float32x4_t ; <<4 x float>> [#uses=1]
ret <4 x float> %1
}
define arm_aapcs_vfpcc <8 x i16> @test_vmulQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
; CHECK-LABEL: test_vmulQ_lanes16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmul.i16 q0, q0, d2[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%1 = mul <8 x i16> %0, %arg0_int16x8_t ; <<8 x i16>> [#uses=1]
ret <8 x i16> %1
}
define arm_aapcs_vfpcc <4 x i32> @test_vmulQ_lanes32(<4 x i32> %arg0_int32x4_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmulQ_lanes32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmul.i32 q0, q0, d2[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i32>> [#uses$
%1 = mul <4 x i32> %0, %arg0_int32x4_t ; <<4 x i32>> [#uses=1]
ret <4 x i32> %1
}
define <8 x i16> @vmulls8(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmulls8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.s8 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = sext <8 x i8> %A to <8 x i16>
%tmp4 = sext <8 x i8> %B to <8 x i16>
%tmp5 = mul <8 x i16> %tmp3, %tmp4
ret <8 x i16> %tmp5
}
define <8 x i16> @vmulls8_int(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmulls8_int:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.s8 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %A, <8 x i8> %B)
ret <8 x i16> %tmp3
}
define <4 x i32> @vmulls16(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: vmulls16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.s16 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = sext <4 x i16> %A to <4 x i32>
%tmp4 = sext <4 x i16> %B to <4 x i32>
%tmp5 = mul <4 x i32> %tmp3, %tmp4
ret <4 x i32> %tmp5
}
define <4 x i32> @vmulls16_int(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: vmulls16_int:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.s16 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %A, <4 x i16> %B)
ret <4 x i32> %tmp3
}
define <2 x i64> @vmulls32(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: vmulls32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.s32 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = sext <2 x i32> %A to <2 x i64>
%tmp4 = sext <2 x i32> %B to <2 x i64>
%tmp5 = mul <2 x i64> %tmp3, %tmp4
ret <2 x i64> %tmp5
}
define <2 x i64> @vmulls32_int(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: vmulls32_int:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.s32 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %A, <2 x i32> %B)
ret <2 x i64> %tmp3
}
define <8 x i16> @vmullu8(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmullu8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u8 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = zext <8 x i8> %A to <8 x i16>
%tmp4 = zext <8 x i8> %B to <8 x i16>
%tmp5 = mul <8 x i16> %tmp3, %tmp4
ret <8 x i16> %tmp5
}
define <8 x i16> @vmullu8_int(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmullu8_int:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u8 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %A, <8 x i8> %B)
ret <8 x i16> %tmp3
}
define <4 x i32> @vmullu16(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: vmullu16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u16 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = zext <4 x i16> %A to <4 x i32>
%tmp4 = zext <4 x i16> %B to <4 x i32>
%tmp5 = mul <4 x i32> %tmp3, %tmp4
ret <4 x i32> %tmp5
}
define <4 x i32> @vmullu16_int(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: vmullu16_int:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u16 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %A, <4 x i16> %B)
ret <4 x i32> %tmp3
}
define <2 x i64> @vmullu32(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: vmullu32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u32 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = zext <2 x i32> %A to <2 x i64>
%tmp4 = zext <2 x i32> %B to <2 x i64>
%tmp5 = mul <2 x i64> %tmp3, %tmp4
ret <2 x i64> %tmp5
}
define <2 x i64> @vmullu32_int(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: vmullu32_int:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u32 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %A, <2 x i32> %B)
ret <2 x i64> %tmp3
}
define <8 x i16> @vmulla8(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmulla8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u8 q0, d0, d1
; CHECK-NEXT: vbic.i16 q0, #0xff00
; CHECK-NEXT: bx lr
%tmp3 = zext <8 x i8> %A to <8 x i16>
%tmp4 = zext <8 x i8> %B to <8 x i16>
%tmp5 = mul <8 x i16> %tmp3, %tmp4
%and = and <8 x i16> %tmp5, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
ret <8 x i16> %and
}
define <4 x i32> @vmulla16(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: vmulla16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u16 q8, d0, d1
; CHECK-NEXT: vmov.i32 q9, #0xffff
; CHECK-NEXT: vand q0, q8, q9
; CHECK-NEXT: bx lr
%tmp3 = zext <4 x i16> %A to <4 x i32>
%tmp4 = zext <4 x i16> %B to <4 x i32>
%tmp5 = mul <4 x i32> %tmp3, %tmp4
%and = and <4 x i32> %tmp5, <i32 65535, i32 65535, i32 65535, i32 65535>
ret <4 x i32> %and
}
define <2 x i64> @vmulla32(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: vmulla32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.u32 q8, d0, d1
; CHECK-NEXT: vmov.i64 q9, #0xffffffff
; CHECK-NEXT: vand q0, q8, q9
; CHECK-NEXT: bx lr
%tmp3 = zext <2 x i32> %A to <2 x i64>
%tmp4 = zext <2 x i32> %B to <2 x i64>
%tmp5 = mul <2 x i64> %tmp3, %tmp4
%and = and <2 x i64> %tmp5, <i64 4294967295, i64 4294967295>
ret <2 x i64> %and
}
define <8 x i16> @vmullp8(<8 x i8> %A, <8 x i8> %B) nounwind {
; CHECK-LABEL: vmullp8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmull.p8 q0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %A, <8 x i8> %B)
ret <8 x i16> %tmp3
}
define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
; CHECK-LABEL: test_vmull_lanes16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.s16 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
%1 = sext <4 x i16> %arg0_int16x4_t to <4 x i32>
%2 = sext <4 x i16> %0 to <4 x i32>
%3 = mul <4 x i32> %1, %2
ret <4 x i32> %3
}
define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16_int(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
; CHECK-LABEL: test_vmull_lanes16_int:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.s16 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
%1 = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
ret <4 x i32> %1
}
define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmull_lanes32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.s32 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
%1 = sext <2 x i32> %arg0_int32x2_t to <2 x i64>
%2 = sext <2 x i32> %0 to <2 x i64>
%3 = mul <2 x i64> %1, %2
ret <2 x i64> %3
}
define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32_int(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmull_lanes32_int:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.s32 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
%1 = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %1
}
define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
; CHECK-LABEL: test_vmull_laneu16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.u16 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
%1 = zext <4 x i16> %arg0_uint16x4_t to <4 x i32>
%2 = zext <4 x i16> %0 to <4 x i32>
%3 = mul <4 x i32> %1, %2
ret <4 x i32> %3
}
define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16_int(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
; CHECK-LABEL: test_vmull_laneu16_int:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.u16 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
%1 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %arg0_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
ret <4 x i32> %1
}
define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmull_laneu32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.u32 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
%1 = zext <2 x i32> %arg0_uint32x2_t to <2 x i64>
%2 = zext <2 x i32> %0 to <2 x i64>
%3 = mul <2 x i64> %1, %2
ret <2 x i64> %3
}
define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32_int(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmull_laneu32_int:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.u32 q0, d0, d1[1]
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
%1 = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %arg0_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %1
}
define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanea16(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
; CHECK-LABEL: test_vmull_lanea16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.u16 q8, d0, d1[1]
; CHECK-NEXT: vmov.i32 q9, #0xffff
; CHECK-NEXT: vand q0, q8, q9
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
%1 = zext <4 x i16> %arg0_uint16x4_t to <4 x i32>
%2 = zext <4 x i16> %0 to <4 x i32>
%3 = mul <4 x i32> %1, %2
%and = and <4 x i32> %3, <i32 65535, i32 65535, i32 65535, i32 65535>
ret <4 x i32> %and
}
define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanea32(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
; CHECK-LABEL: test_vmull_lanea32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmull.u32 q8, d0, d1[1]
; CHECK-NEXT: vmov.i64 q9, #0xffffffff
; CHECK-NEXT: vand q0, q8, q9
; CHECK-NEXT: bx lr
entry:
%0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
%1 = zext <2 x i32> %arg0_uint32x2_t to <2 x i64>
%2 = zext <2 x i32> %0 to <2 x i64>
%3 = mul <2 x i64> %1, %2
%and = and <2 x i64> %3, <i64 4294967295, i64 4294967295>
ret <2 x i64> %and
}
declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
; Radar 8687140
; VMULL needs to recognize BUILD_VECTORs with sign/zero-extended elements.
define <8 x i16> @vmull_extvec_s8(<8 x i8> %arg) nounwind {
; CHECK-LABEL: vmull_extvec_s8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov.i8 d16, #0xf4
; CHECK-NEXT: vmull.s8 q0, d0, d16
; CHECK-NEXT: bx lr
%tmp3 = sext <8 x i8> %arg to <8 x i16>
%tmp4 = mul <8 x i16> %tmp3, <i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12>
ret <8 x i16> %tmp4
}
define <8 x i16> @vmull_extvec_u8(<8 x i8> %arg) nounwind {
; CHECK-LABEL: vmull_extvec_u8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov.i8 d16, #0xc
; CHECK-NEXT: vmull.u8 q0, d0, d16
; CHECK-NEXT: bx lr
%tmp3 = zext <8 x i8> %arg to <8 x i16>
%tmp4 = mul <8 x i16> %tmp3, <i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12>
ret <8 x i16> %tmp4
}
define <8 x i16> @vmull_noextvec_s8(<8 x i8> %arg) nounwind {
; Do not use VMULL if the BUILD_VECTOR element values are too big.
; CHECK-LABEL: vmull_noextvec_s8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmovl.s8 q8, d0
; CHECK-NEXT: adr r0, .LCPI44_0
; CHECK-NEXT: vld1.64 {d18, d19}, [r0:128]
; CHECK-NEXT: vmul.i16 q0, q8, q9
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI44_0:
; CHECK-NEXT: .short 64537 @ 0xfc19
; CHECK-NEXT: .short 64537 @ 0xfc19
; CHECK-NEXT: .short 64537 @ 0xfc19
; CHECK-NEXT: .short 64537 @ 0xfc19
; CHECK-NEXT: .short 64537 @ 0xfc19
; CHECK-NEXT: .short 64537 @ 0xfc19
; CHECK-NEXT: .short 64537 @ 0xfc19
; CHECK-NEXT: .short 64537 @ 0xfc19
%tmp3 = sext <8 x i8> %arg to <8 x i16>
%tmp4 = mul <8 x i16> %tmp3, <i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999>
ret <8 x i16> %tmp4
}
define <8 x i16> @vmull_noextvec_u8(<8 x i8> %arg) nounwind {
; Do not use VMULL if the BUILD_VECTOR element values are too big.
; CHECK-LABEL: vmull_noextvec_u8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmovl.u8 q8, d0
; CHECK-NEXT: adr r0, .LCPI45_0
; CHECK-NEXT: vld1.64 {d18, d19}, [r0:128]
; CHECK-NEXT: vmul.i16 q0, q8, q9
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI45_0:
; CHECK-NEXT: .short 999 @ 0x3e7
; CHECK-NEXT: .short 999 @ 0x3e7
; CHECK-NEXT: .short 999 @ 0x3e7
; CHECK-NEXT: .short 999 @ 0x3e7
; CHECK-NEXT: .short 999 @ 0x3e7
; CHECK-NEXT: .short 999 @ 0x3e7
; CHECK-NEXT: .short 999 @ 0x3e7
; CHECK-NEXT: .short 999 @ 0x3e7
%tmp3 = zext <8 x i8> %arg to <8 x i16>
%tmp4 = mul <8 x i16> %tmp3, <i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999>
ret <8 x i16> %tmp4
}
define <4 x i32> @vmull_extvec_s16(<4 x i16> %arg) nounwind {
; CHECK-LABEL: vmull_extvec_s16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmvn.i16 d16, #0xb
; CHECK-NEXT: vmull.s16 q0, d0, d16
; CHECK-NEXT: bx lr
%tmp3 = sext <4 x i16> %arg to <4 x i32>
%tmp4 = mul <4 x i32> %tmp3, <i32 -12, i32 -12, i32 -12, i32 -12>
ret <4 x i32> %tmp4
}
define <4 x i32> @vmull_extvec_u16(<4 x i16> %arg) nounwind {
; CHECK-LABEL: vmull_extvec_u16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, .LCPI47_0
; CHECK-NEXT: vmull.u16 q0, d0, d16
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI47_0:
; CHECK-NEXT: .short 1234 @ 0x4d2
; CHECK-NEXT: .short 1234 @ 0x4d2
; CHECK-NEXT: .short 1234 @ 0x4d2
; CHECK-NEXT: .short 1234 @ 0x4d2
%tmp3 = zext <4 x i16> %arg to <4 x i32>
%tmp4 = mul <4 x i32> %tmp3, <i32 1234, i32 1234, i32 1234, i32 1234>
ret <4 x i32> %tmp4
}
define <2 x i64> @vmull_extvec_s32(<2 x i32> %arg) nounwind {
; CHECK-LABEL: vmull_extvec_s32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, .LCPI48_0
; CHECK-NEXT: vmull.s32 q0, d0, d16
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI48_0:
; CHECK-NEXT: .long 4294966062 @ 0xfffffb2e
; CHECK-NEXT: .long 4294966062 @ 0xfffffb2e
%tmp3 = sext <2 x i32> %arg to <2 x i64>
%tmp4 = mul <2 x i64> %tmp3, <i64 -1234, i64 -1234>
ret <2 x i64> %tmp4
}
define <2 x i64> @vmull_extvec_u32(<2 x i32> %arg) nounwind {
; CHECK-LABEL: vmull_extvec_u32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, .LCPI49_0
; CHECK-NEXT: vmull.u32 q0, d0, d16
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI49_0:
; CHECK-NEXT: .long 1234 @ 0x4d2
; CHECK-NEXT: .long 1234 @ 0x4d2
%tmp3 = zext <2 x i32> %arg to <2 x i64>
%tmp4 = mul <2 x i64> %tmp3, <i64 1234, i64 1234>
ret <2 x i64> %tmp4
}
; rdar://9197392
define void @distribute(ptr %dst, ptr %src, i32 %mul) nounwind {
; CHECK-LABEL: distribute:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vld1.8 {d16, d17}, [r1]
; CHECK-NEXT: vdup.8 d18, r2
; CHECK-NEXT: vmull.u8 q10, d17, d18
; CHECK-NEXT: vmlal.u8 q10, d16, d18
; CHECK-NEXT: vst1.16 {d20, d21}, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = trunc i32 %mul to i8
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
%3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0(ptr %src, i32 1)
%4 = bitcast <16 x i8> %3 to <2 x double>
%5 = extractelement <2 x double> %4, i32 1
%6 = bitcast double %5 to <8 x i8>
%7 = zext <8 x i8> %6 to <8 x i16>
%8 = zext <8 x i8> %2 to <8 x i16>
%9 = extractelement <2 x double> %4, i32 0
%10 = bitcast double %9 to <8 x i8>
%11 = zext <8 x i8> %10 to <8 x i16>
%12 = add <8 x i16> %7, %11
%13 = mul <8 x i16> %12, %8
tail call void @llvm.arm.neon.vst1.p0.v8i16(ptr %dst, <8 x i16> %13, i32 2)
ret void
}
declare <16 x i8> @llvm.arm.neon.vld1.v16i8.p0(ptr, i32) nounwind readonly
declare void @llvm.arm.neon.vst1.p0.v8i16(ptr, <8 x i16>, i32) nounwind
; Take advantage of the Cortex-A8 multiplier accumulator forward.
%struct.uint8x8_t = type { <8 x i8> }
define void @distribute2(ptr nocapture %dst, ptr %src, i32 %mul) nounwind {
; CHECK-LABEL: distribute2:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vld1.8 {d16, d17}, [r1]
; CHECK-NEXT: vadd.i8 d16, d17, d16
; CHECK-NEXT: vdup.8 d17, r2
; CHECK-NEXT: vmul.i8 d16, d16, d17
; CHECK-NEXT: vstr d16, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = trunc i32 %mul to i8
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
%3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0(ptr %src, i32 1)
%4 = bitcast <16 x i8> %3 to <2 x double>
%5 = extractelement <2 x double> %4, i32 1
%6 = bitcast double %5 to <8 x i8>
%7 = extractelement <2 x double> %4, i32 0
%8 = bitcast double %7 to <8 x i8>
%9 = add <8 x i8> %6, %8
%10 = mul <8 x i8> %9, %2
store <8 x i8> %10, ptr %dst, align 8
ret void
}
define void @distribute2_commutative(ptr nocapture %dst, ptr %src, i32 %mul) nounwind {
; CHECK-LABEL: distribute2_commutative:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vld1.8 {d16, d17}, [r1]
; CHECK-NEXT: vadd.i8 d16, d17, d16
; CHECK-NEXT: vdup.8 d17, r2
; CHECK-NEXT: vmul.i8 d16, d17, d16
; CHECK-NEXT: vstr d16, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = trunc i32 %mul to i8
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
%3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0(ptr %src, i32 1)
%4 = bitcast <16 x i8> %3 to <2 x double>
%5 = extractelement <2 x double> %4, i32 1
%6 = bitcast double %5 to <8 x i8>
%7 = extractelement <2 x double> %4, i32 0
%8 = bitcast double %7 to <8 x i8>
%9 = add <8 x i8> %6, %8
%10 = mul <8 x i8> %2, %9
store <8 x i8> %10, ptr %dst, align 8
ret void
}
define <8 x i8> @no_distribute(<8 x i8> %a, <8 x i8> %b) nounwind {
; CHECK-LABEL: no_distribute:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadd.i8 d16, d0, d1
; CHECK-NEXT: vmul.i8 d0, d16, d16
; CHECK-NEXT: bx lr
entry:
%0 = add <8 x i8> %a, %b
%1 = mul <8x i8> %0, %0
ret <8 x i8> %1
}
; If one operand has a zero-extend and the other a sign-extend, vmull
; cannot be used.
define i16 @vmullWithInconsistentExtensions(<8 x i8> %vec) {
; CHECK-LABEL: vmullWithInconsistentExtensions:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmovl.s8 q8, d0
; CHECK-NEXT: vmov.i16 q9, #0xff
; CHECK-NEXT: vmul.i16 q8, q8, q9
; CHECK-NEXT: vmov.u16 r0, d16[0]
; CHECK-NEXT: bx lr
%1 = sext <8 x i8> %vec to <8 x i16>
%2 = mul <8 x i16> %1, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
%3 = extractelement <8 x i16> %2, i32 0
ret i16 %3
}
; A constant build_vector created for a vmull with half-width elements must
; not introduce illegal types. <rdar://problem/11324364>
define void @vmull_buildvector() nounwind optsize ssp align 2 {
; CHECK-LABEL: vmull_buildvector:
; CHECK: @ %bb.0: @ %entry
entry:
br i1 undef, label %for.end179, label %for.body.lr.ph
for.body.lr.ph: ; preds = %entry
br label %for.body
for.cond.loopexit: ; preds = %for.body33, %for.body
br i1 undef, label %for.end179, label %for.body
for.body: ; preds = %for.cond.loopexit, %for.body.lr.ph
br i1 undef, label %for.cond.loopexit, label %for.body33.lr.ph
for.body33.lr.ph: ; preds = %for.body
%.sub = select i1 undef, i32 0, i32 undef
br label %for.body33
for.body33: ; preds = %for.body33, %for.body33.lr.ph
%add45 = add i32 undef, undef
%vld155 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0(ptr undef, i32 1)
%0 = load ptr, ptr undef, align 4
%shuffle.i250 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
%1 = bitcast <1 x i64> %shuffle.i250 to <8 x i8>
%vmovl.i249 = zext <8 x i8> %1 to <8 x i16>
%shuffle.i246 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
%shuffle.i240 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> <i32 1>
%2 = bitcast <1 x i64> %shuffle.i240 to <8 x i8>
%3 = bitcast <16 x i8> undef to <2 x i64>
%vmovl.i237 = zext <8 x i8> undef to <8 x i16>
%shuffle.i234 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
%shuffle.i226 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
%vmovl.i225 = zext <8 x i8> undef to <8 x i16>
%mul.i223 = mul <8 x i16> %vmovl.i249, %vmovl.i249
%vshl_n = shl <8 x i16> %mul.i223, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
%vqsub2.i216 = tail call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>, <8 x i16> %vshl_n) nounwind
%mul.i209 = mul <8 x i16> undef, <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>
%vshr_n130 = lshr <8 x i16> undef, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
%vshr_n134 = lshr <8 x i16> %mul.i209, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
%sub.i205 = sub <8 x i16> <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>, %vshr_n130
%sub.i203 = sub <8 x i16> <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>, %vshr_n134
%add.i200 = add <8 x i16> %sub.i205, <i16 96, i16 96, i16 96, i16 96, i16 96, i16 96, i16 96, i16 96>
%add.i198 = add <8 x i16> %add.i200, %sub.i203
%mul.i194 = mul <8 x i16> %add.i198, %vmovl.i237
%mul.i191 = mul <8 x i16> %vshr_n130, undef
%add.i192 = add <8 x i16> %mul.i191, %mul.i194
%mul.i187 = mul <8 x i16> %vshr_n134, undef
%add.i188 = add <8 x i16> %mul.i187, %add.i192
%mul.i185 = mul <8 x i16> undef, undef
%add.i186 = add <8 x i16> %mul.i185, undef
%vrshr_n160 = tail call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i188, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
%vrshr_n163 = tail call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i186, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
%mul.i184 = mul <8 x i16> undef, %vrshr_n160
%mul.i181 = mul <8 x i16> undef, %vmovl.i225
%add.i182 = add <8 x i16> %mul.i181, %mul.i184
%vrshr_n170 = tail call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i182, <8 x i16> <i16 -7, i16 -7, i16 -7, i16 -7, i16 -7, i16 -7, i16 -7, i16 -7>)
%vqmovn1.i180 = tail call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %vrshr_n170) nounwind
%4 = bitcast <8 x i8> %vqmovn1.i180 to <1 x i64>
%shuffle.i = shufflevector <1 x i64> %4, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
%5 = bitcast <2 x i64> %shuffle.i to <16 x i8>
store <16 x i8> %5, ptr undef, align 16
%add177 = add nsw i32 undef, 16
br i1 undef, label %for.body33, label %for.cond.loopexit
for.end179: ; preds = %for.cond.loopexit, %entry
ret void
}
declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) nounwind readnone
; vmull lowering would create a zext(v4i8 load()) instead of a zextload(v4i8),
; creating an illegal type during legalization and causing an assert.
; PR15970
define void @no_illegal_types_vmull_sext(<4 x i32> %a) {
; CHECK-LABEL: no_illegal_types_vmull_sext:
; CHECK: @ %bb.0: @ %entry
entry:
%wide.load283.i = load <4 x i8>, ptr undef, align 1
%0 = sext <4 x i8> %wide.load283.i to <4 x i32>
%1 = sub nsw <4 x i32> %0, %a
%2 = mul nsw <4 x i32> %1, %1
%predphi290.v.i = select <4 x i1> undef, <4 x i32> undef, <4 x i32> %2
store <4 x i32> %predphi290.v.i, ptr undef, align 4
ret void
}
define void @no_illegal_types_vmull_zext(<4 x i32> %a) {
; CHECK-LABEL: no_illegal_types_vmull_zext:
; CHECK: @ %bb.0: @ %entry
entry:
%wide.load283.i = load <4 x i8>, ptr undef, align 1
%0 = zext <4 x i8> %wide.load283.i to <4 x i32>
%1 = sub nsw <4 x i32> %0, %a
%2 = mul nsw <4 x i32> %1, %1
%predphi290.v.i = select <4 x i1> undef, <4 x i32> undef, <4 x i32> %2
store <4 x i32> %predphi290.v.i, ptr undef, align 4
ret void
}
define void @fmul_splat(ptr %A, ptr nocapture %dst, float %tmp) nounwind {
; Look for a scalar float rather than a splat, then a vector*scalar multiply.
; CHECK-LABEL: fmul_splat:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.32 {d16, d17}, [r0]
; CHECK-NEXT: @ kill: def $s0 killed $s0 def $d0
; CHECK-NEXT: vmul.f32 q8, q8, d0[0]
; CHECK-NEXT: vst1.32 {d16, d17}, [r1]
; CHECK-NEXT: bx lr
%tmp5 = load <4 x float>, ptr %A, align 4
%tmp6 = insertelement <4 x float> undef, float %tmp, i32 0
%tmp7 = insertelement <4 x float> %tmp6, float %tmp, i32 1
%tmp8 = insertelement <4 x float> %tmp7, float %tmp, i32 2
%tmp9 = insertelement <4 x float> %tmp8, float %tmp, i32 3
%tmp10 = fmul <4 x float> %tmp9, %tmp5
store <4 x float> %tmp10, ptr %dst, align 4
ret void
}
define void @fmul_splat_load(ptr %A, ptr nocapture %dst, ptr nocapture readonly %src) nounwind {
; Look for doing a normal scalar FP load rather than an to-all-lanes load,
; then a vector*scalar multiply.
; FIXME: Temporarily broken due to splat representation changes.
; CHECK-LABEL: fmul_splat_load:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.32 {d16, d17}, [r0]
; CHECK-NEXT: vld1.32 {d18[], d19[]}, [r2:32]
; CHECK-NEXT: vmul.f32 q8, q9, q8
; CHECK-NEXT: vst1.32 {d16, d17}, [r1]
; CHECK-NEXT: bx lr
%tmp = load float, ptr %src, align 4
%tmp5 = load <4 x float>, ptr %A, align 4
%tmp6 = insertelement <4 x float> undef, float %tmp, i32 0
%tmp7 = insertelement <4 x float> %tmp6, float %tmp, i32 1
%tmp8 = insertelement <4 x float> %tmp7, float %tmp, i32 2
%tmp9 = insertelement <4 x float> %tmp8, float %tmp, i32 3
%tmp10 = fmul <4 x float> %tmp9, %tmp5
store <4 x float> %tmp10, ptr %dst, align 4
ret void
}