Quinn Pham 335e8bf100 [PowerPC] emit VSX instructions instead of VMX instructions for vector loads and stores
This patch changes the PowerPC backend to generate VSX load/store instructions
for all vector loads/stores on Power8 and earlier  (LE) instead of VMX
load/store instructions. The reason for this change is because VMX instructions
require the vector to be 16-byte aligned. So, a vector load/store will fail with
VMX instructions if the vector is misaligned. Also, `gcc` generates VSX
instructions in this situation which allow for unaligned access but require a
swap instruction after loading/before storing. This is not an issue for BE
because we already emit VSX instructions since no swap is required. And this is
not an issue on Power9 and up since we have access to `lxv[x]`/`stxv[x]` which
allow for unaligned access and do not require swaps.

This patch also delays the VSX load/store for LE combines until after
LegalizeOps to prioritize other load/store combines.

Reviewed By: #powerpc, stefanp

Differential Revision: https://reviews.llvm.org/D127309
2022-06-15 12:06:04 -05:00

251 lines
8.0 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; Test the doubleword comparison expansions on Power7
;
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr7 < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
; RUN: -mcpu=pwr7 < %s | FileCheck %s --check-prefix=CHECK-BE
define <2 x i64> @v2si64_cmp(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
; CHECK-LABEL: v2si64_cmp:
; CHECK: # %bb.0:
; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addi 3, 3, .LCPI0_0@toc@l
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-NEXT: xxswapd 35, 0
; CHECK-NEXT: vperm 3, 2, 2, 3
; CHECK-NEXT: xxland 34, 35, 34
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: v2si64_cmp:
; CHECK-BE: # %bb.0:
; CHECK-BE-NEXT: vcmpequw 2, 2, 3
; CHECK-BE-NEXT: addis 3, 2, .LCPI0_0@toc@ha
; CHECK-BE-NEXT: addi 3, 3, .LCPI0_0@toc@l
; CHECK-BE-NEXT: lxvw4x 35, 0, 3
; CHECK-BE-NEXT: vperm 3, 2, 2, 3
; CHECK-BE-NEXT: xxland 34, 35, 34
; CHECK-BE-NEXT: blr
%cmp = icmp eq <2 x i64> %x, %y
%result = sext <2 x i1> %cmp to <2 x i64>
ret <2 x i64> %result
}
; Greater than signed
define <2 x i64> @v2si64_cmp_gt(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
; CHECK-LABEL: v2si64_cmp_gt:
; CHECK: # %bb.0:
; CHECK-NEXT: xxswapd 0, 35
; CHECK-NEXT: addi 3, 1, -32
; CHECK-NEXT: addi 4, 1, -48
; CHECK-NEXT: xxswapd 1, 34
; CHECK-NEXT: stxvd2x 0, 0, 3
; CHECK-NEXT: stxvd2x 1, 0, 4
; CHECK-NEXT: ld 3, -24(1)
; CHECK-NEXT: ld 4, -40(1)
; CHECK-NEXT: ld 6, -48(1)
; CHECK-NEXT: cmpd 4, 3
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: li 4, -1
; CHECK-NEXT: iselgt 5, 4, 3
; CHECK-NEXT: std 5, -8(1)
; CHECK-NEXT: ld 5, -32(1)
; CHECK-NEXT: cmpd 6, 5
; CHECK-NEXT: iselgt 3, 4, 3
; CHECK-NEXT: std 3, -16(1)
; CHECK-NEXT: addi 3, 1, -16
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-NEXT: xxswapd 34, 0
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: v2si64_cmp_gt:
; CHECK-BE: # %bb.0:
; CHECK-BE-NEXT: addi 3, 1, -32
; CHECK-BE-NEXT: addi 4, 1, -48
; CHECK-BE-NEXT: stxvd2x 35, 0, 3
; CHECK-BE-NEXT: stxvd2x 34, 0, 4
; CHECK-BE-NEXT: ld 3, -24(1)
; CHECK-BE-NEXT: ld 4, -40(1)
; CHECK-BE-NEXT: ld 6, -48(1)
; CHECK-BE-NEXT: cmpd 4, 3
; CHECK-BE-NEXT: li 3, 0
; CHECK-BE-NEXT: li 4, -1
; CHECK-BE-NEXT: iselgt 5, 4, 3
; CHECK-BE-NEXT: std 5, -8(1)
; CHECK-BE-NEXT: ld 5, -32(1)
; CHECK-BE-NEXT: cmpd 6, 5
; CHECK-BE-NEXT: iselgt 3, 4, 3
; CHECK-BE-NEXT: std 3, -16(1)
; CHECK-BE-NEXT: addi 3, 1, -16
; CHECK-BE-NEXT: lxvd2x 34, 0, 3
; CHECK-BE-NEXT: blr
%cmp = icmp sgt <2 x i64> %x, %y
%result = sext <2 x i1> %cmp to <2 x i64>
ret <2 x i64> %result
}
; Greater than unsigned
define <2 x i64> @v2ui64_cmp_gt(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
; CHECK-LABEL: v2ui64_cmp_gt:
; CHECK: # %bb.0:
; CHECK-NEXT: xxswapd 0, 35
; CHECK-NEXT: addi 3, 1, -32
; CHECK-NEXT: addi 4, 1, -48
; CHECK-NEXT: xxswapd 1, 34
; CHECK-NEXT: stxvd2x 0, 0, 3
; CHECK-NEXT: stxvd2x 1, 0, 4
; CHECK-NEXT: ld 3, -24(1)
; CHECK-NEXT: ld 4, -40(1)
; CHECK-NEXT: ld 6, -48(1)
; CHECK-NEXT: cmpld 4, 3
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: li 4, -1
; CHECK-NEXT: iselgt 5, 4, 3
; CHECK-NEXT: std 5, -8(1)
; CHECK-NEXT: ld 5, -32(1)
; CHECK-NEXT: cmpld 6, 5
; CHECK-NEXT: iselgt 3, 4, 3
; CHECK-NEXT: std 3, -16(1)
; CHECK-NEXT: addi 3, 1, -16
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-NEXT: xxswapd 34, 0
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: v2ui64_cmp_gt:
; CHECK-BE: # %bb.0:
; CHECK-BE-NEXT: addi 3, 1, -32
; CHECK-BE-NEXT: addi 4, 1, -48
; CHECK-BE-NEXT: stxvd2x 35, 0, 3
; CHECK-BE-NEXT: stxvd2x 34, 0, 4
; CHECK-BE-NEXT: ld 3, -24(1)
; CHECK-BE-NEXT: ld 4, -40(1)
; CHECK-BE-NEXT: ld 6, -48(1)
; CHECK-BE-NEXT: cmpld 4, 3
; CHECK-BE-NEXT: li 3, 0
; CHECK-BE-NEXT: li 4, -1
; CHECK-BE-NEXT: iselgt 5, 4, 3
; CHECK-BE-NEXT: std 5, -8(1)
; CHECK-BE-NEXT: ld 5, -32(1)
; CHECK-BE-NEXT: cmpld 6, 5
; CHECK-BE-NEXT: iselgt 3, 4, 3
; CHECK-BE-NEXT: std 3, -16(1)
; CHECK-BE-NEXT: addi 3, 1, -16
; CHECK-BE-NEXT: lxvd2x 34, 0, 3
; CHECK-BE-NEXT: blr
%cmp = icmp ugt <2 x i64> %x, %y
%result = sext <2 x i1> %cmp to <2 x i64>
ret <2 x i64> %result
}
; Check the intrinsics also
declare i32 @llvm.ppc.altivec.vcmpequd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone
declare i32 @llvm.ppc.altivec.vcmpgtsd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone
declare i32 @llvm.ppc.altivec.vcmpgtud.p(i32, <2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_vcmpequd_p(<2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: test_vcmpequd_p:
; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: xxlxor 35, 35, 35
; CHECK-NEXT: xxsldwi 0, 34, 34, 1
; CHECK-NEXT: xxland 0, 0, 34
; CHECK-NEXT: xxspltw 1, 0, 2
; CHECK-NEXT: xxspltw 0, 0, 0
; CHECK-NEXT: xxmrghd 34, 0, 1
; CHECK-NEXT: vcmpgtub. 2, 2, 3
; CHECK-NEXT: mfocrf 3, 2
; CHECK-NEXT: rlwinm 3, 3, 25, 31, 31
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: test_vcmpequd_p:
; CHECK-BE: # %bb.0:
; CHECK-BE-NEXT: vcmpequw 2, 2, 3
; CHECK-BE-NEXT: xxlxor 35, 35, 35
; CHECK-BE-NEXT: xxsldwi 0, 34, 34, 1
; CHECK-BE-NEXT: xxland 0, 0, 34
; CHECK-BE-NEXT: xxspltw 1, 0, 2
; CHECK-BE-NEXT: xxspltw 0, 0, 0
; CHECK-BE-NEXT: xxmrghd 34, 0, 1
; CHECK-BE-NEXT: vcmpgtub. 2, 2, 3
; CHECK-BE-NEXT: mfocrf 3, 2
; CHECK-BE-NEXT: rlwinm 3, 3, 25, 31, 31
; CHECK-BE-NEXT: blr
%tmp = tail call i32 @llvm.ppc.altivec.vcmpequd.p(i32 2, <2 x i64> %x, <2 x i64> %y)
ret i32 %tmp
}
define i32 @test_vcmpgtsd_p(<2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: test_vcmpgtsd_p:
; CHECK: # %bb.0:
; CHECK-NEXT: vcmpgtuw 4, 2, 3
; CHECK-NEXT: vcmpequw 5, 2, 3
; CHECK-NEXT: vcmpgtsw 2, 2, 3
; CHECK-NEXT: xxlxor 35, 35, 35
; CHECK-NEXT: xxsldwi 0, 36, 36, 1
; CHECK-NEXT: xxland 0, 0, 37
; CHECK-NEXT: xxlor 0, 34, 0
; CHECK-NEXT: xxspltw 1, 0, 2
; CHECK-NEXT: xxspltw 0, 0, 0
; CHECK-NEXT: xxmrghd 34, 0, 1
; CHECK-NEXT: vcmpgtub. 2, 2, 3
; CHECK-NEXT: mfocrf 3, 2
; CHECK-NEXT: rlwinm 3, 3, 25, 31, 31
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: test_vcmpgtsd_p:
; CHECK-BE: # %bb.0:
; CHECK-BE-NEXT: vcmpgtuw 4, 2, 3
; CHECK-BE-NEXT: vcmpequw 5, 2, 3
; CHECK-BE-NEXT: vcmpgtsw 2, 2, 3
; CHECK-BE-NEXT: xxlxor 35, 35, 35
; CHECK-BE-NEXT: xxsldwi 0, 36, 36, 1
; CHECK-BE-NEXT: xxland 0, 0, 37
; CHECK-BE-NEXT: xxlor 0, 34, 0
; CHECK-BE-NEXT: xxspltw 1, 0, 2
; CHECK-BE-NEXT: xxspltw 0, 0, 0
; CHECK-BE-NEXT: xxmrghd 34, 0, 1
; CHECK-BE-NEXT: vcmpgtub. 2, 2, 3
; CHECK-BE-NEXT: mfocrf 3, 2
; CHECK-BE-NEXT: rlwinm 3, 3, 25, 31, 31
; CHECK-BE-NEXT: blr
%tmp = tail call i32 @llvm.ppc.altivec.vcmpgtsd.p(i32 2, <2 x i64> %x, <2 x i64> %y)
ret i32 %tmp
}
define i32 @test_vcmpgtud_p(<2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: test_vcmpgtud_p:
; CHECK: # %bb.0:
; CHECK-NEXT: vcmpgtuw 4, 2, 3
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: xxlxor 35, 35, 35
; CHECK-NEXT: xxsldwi 0, 36, 36, 1
; CHECK-NEXT: xxland 0, 0, 34
; CHECK-NEXT: xxlor 0, 36, 0
; CHECK-NEXT: xxspltw 1, 0, 2
; CHECK-NEXT: xxspltw 0, 0, 0
; CHECK-NEXT: xxmrghd 34, 0, 1
; CHECK-NEXT: vcmpgtub. 2, 2, 3
; CHECK-NEXT: mfocrf 3, 2
; CHECK-NEXT: rlwinm 3, 3, 25, 31, 31
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: test_vcmpgtud_p:
; CHECK-BE: # %bb.0:
; CHECK-BE-NEXT: vcmpgtuw 4, 2, 3
; CHECK-BE-NEXT: vcmpequw 2, 2, 3
; CHECK-BE-NEXT: xxlxor 35, 35, 35
; CHECK-BE-NEXT: xxsldwi 0, 36, 36, 1
; CHECK-BE-NEXT: xxland 0, 0, 34
; CHECK-BE-NEXT: xxlor 0, 36, 0
; CHECK-BE-NEXT: xxspltw 1, 0, 2
; CHECK-BE-NEXT: xxspltw 0, 0, 0
; CHECK-BE-NEXT: xxmrghd 34, 0, 1
; CHECK-BE-NEXT: vcmpgtub. 2, 2, 3
; CHECK-BE-NEXT: mfocrf 3, 2
; CHECK-BE-NEXT: rlwinm 3, 3, 25, 31, 31
; CHECK-BE-NEXT: blr
%tmp = tail call i32 @llvm.ppc.altivec.vcmpgtud.p(i32 2, <2 x i64> %x, <2 x i64> %y)
ret i32 %tmp
}