llvm-project/llvm/test/CodeGen/X86/vector-pack-128.ll
Jeremy Morse e6bf48d110
[X86] Don't request 0x90 nop filling in p2align directives (#110134)
As of rev ea222be0d, LLVMs assembler will actually try to honour the
"fill value" part of p2align directives. X86 printed these as 0x90, which
isn't actually what it wanted: we want multi-byte nops for .text
padding. Compiling via a textual assembly file produces single-byte
nop padding since ea222be0d but the built-in assembler will produce
multi-byte nops. This divergent behaviour is undesirable.

To fix: don't set the byte padding field for x86, which allows the
assembler to pick multi-byte nops. Test that we get the same multi-byte
padding when compiled via textual assembly or directly to object file.
Added same-align-bytes-with-llasm-llobj.ll to that effect, updated
numerous other tests to not contain check-lines for the explicit padding.
2024-10-02 11:14:05 +01:00

347 lines
14 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE4
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
; trunc(concat(x,y)) -> pack
define <8 x i16> @trunc_concat_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: trunc_concat_packssdw_128:
; SSE: # %bb.0:
; SSE-NEXT: psrad $17, %xmm0
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_concat_packssdw_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $17, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_concat_packssdw_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $17, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_concat_packssdw_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $17, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = ashr <4 x i32> %a0, <i32 17, i32 17, i32 17, i32 17>
%2 = and <4 x i32> %a1, <i32 15, i32 15, i32 15, i32 15>
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%4 = trunc <8 x i32> %3 to <8 x i16>
ret <8 x i16> %4
}
define <8 x i16> @trunc_concat_packusdw_128(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE2-LABEL: trunc_concat_packusdw_128:
; SSE2: # %bb.0:
; SSE2-NEXT: psrld $17, %xmm0
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: packssdw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: trunc_concat_packusdw_128:
; SSE4: # %bb.0:
; SSE4-NEXT: psrld $17, %xmm0
; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE4-NEXT: packusdw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: trunc_concat_packusdw_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $17, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_concat_packusdw_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $17, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_concat_packusdw_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrld $17, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %a0, <i32 17, i32 17, i32 17, i32 17>
%2 = and <4 x i32> %a1, <i32 15, i32 15, i32 15, i32 15>
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%4 = trunc <8 x i32> %3 to <8 x i16>
ret <8 x i16> %4
}
define <16 x i8> @trunc_concat_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE-LABEL: trunc_concat_packsswb_128:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT: packsswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_concat_packsswb_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_concat_packsswb_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_concat_packsswb_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%2 = and <8 x i16> %a1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%4 = trunc <16 x i16> %3 to <16 x i8>
ret <16 x i8> %4
}
define <16 x i8> @trunc_concat_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE-LABEL: trunc_concat_packuswb_128:
; SSE: # %bb.0:
; SSE-NEXT: psrlw $15, %xmm0
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_concat_packuswb_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_concat_packuswb_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_concat_packuswb_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%2 = and <8 x i16> %a1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%4 = trunc <16 x i16> %3 to <16 x i8>
ret <16 x i8> %4
}
; concat(trunc(x),trunc(y)) -> pack
define <8 x i16> @concat_trunc_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE2-LABEL: concat_trunc_packssdw_128:
; SSE2: # %bb.0:
; SSE2-NEXT: psrad $17, %xmm0
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: packssdw %xmm0, %xmm0
; SSE2-NEXT: packuswb %xmm1, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE4-LABEL: concat_trunc_packssdw_128:
; SSE4: # %bb.0:
; SSE4-NEXT: psrad $17, %xmm0
; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE4-NEXT: packssdw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: concat_trunc_packssdw_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $17, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: concat_trunc_packssdw_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $17, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: concat_trunc_packssdw_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $17, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = ashr <4 x i32> %a0, <i32 17, i32 17, i32 17, i32 17>
%2 = and <4 x i32> %a1, <i32 15, i32 15, i32 15, i32 15>
%3 = trunc <4 x i32> %1 to <4 x i16>
%4 = trunc <4 x i32> %2 to <4 x i16>
%5 = shufflevector <4 x i16> %3, <4 x i16> %4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i16> %5
}
define <8 x i16> @concat_trunc_packusdw_128(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE2-LABEL: concat_trunc_packusdw_128:
; SSE2: # %bb.0:
; SSE2-NEXT: psrld $17, %xmm0
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: packssdw %xmm0, %xmm0
; SSE2-NEXT: packuswb %xmm1, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE4-LABEL: concat_trunc_packusdw_128:
; SSE4: # %bb.0:
; SSE4-NEXT: psrld $17, %xmm0
; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE4-NEXT: packusdw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: concat_trunc_packusdw_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $17, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: concat_trunc_packusdw_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $17, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: concat_trunc_packusdw_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrld $17, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %a0, <i32 17, i32 17, i32 17, i32 17>
%2 = and <4 x i32> %a1, <i32 15, i32 15, i32 15, i32 15>
%3 = trunc <4 x i32> %1 to <4 x i16>
%4 = trunc <4 x i32> %2 to <4 x i16>
%5 = shufflevector <4 x i16> %3, <4 x i16> %4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i16> %5
}
define <16 x i8> @concat_trunc_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE-LABEL: concat_trunc_packsswb_128:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT: packsswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: concat_trunc_packsswb_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: concat_trunc_packsswb_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: concat_trunc_packsswb_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%2 = and <8 x i16> %a1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = trunc <8 x i16> %1 to <8 x i8>
%4 = trunc <8 x i16> %2 to <8 x i8>
%5 = shufflevector <8 x i8> %3, <8 x i8> %4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i8> %5
}
define <16 x i8> @concat_trunc_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE-LABEL: concat_trunc_packuswb_128:
; SSE: # %bb.0:
; SSE-NEXT: psrlw $15, %xmm0
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: concat_trunc_packuswb_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: concat_trunc_packuswb_128:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: concat_trunc_packuswb_128:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%2 = and <8 x i16> %a1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = trunc <8 x i16> %1 to <8 x i8>
%4 = trunc <8 x i16> %2 to <8 x i8>
%5 = shufflevector <8 x i8> %3, <8 x i8> %4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i8> %5
}
; Fuzz test - don't pack a v1i32 comparison result.
define void @autogen_SD10339(<1 x i32> %I49) {
; CHECK-LABEL: autogen_SD10339:
; CHECK: # %bb.0: # %BB
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB8_1: # %CF
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: movw $1, 0
; CHECK-NEXT: jmp .LBB8_1
BB:
%Cmp53 = icmp uge <1 x i32> %I49, zeroinitializer
br label %CF
CF: ; preds = %CF, %BB
%ZE166 = zext <1 x i1> %Cmp53 to <1 x i16>
store <1 x i16> %ZE166, ptr null, align 2
br label %CF
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; AVX: {{.*}}