llvm-project/llvm/test/CodeGen/X86/extract-store.ll
Noah Goldstein 69a322fed1 Add new pass X86FixupInstTuning for fixing up machine-instruction selection.
There are a variety of cases where we want more control over the exact
instruction emitted. This commit creates a new pass to fixup
instructions after the DAG has been lowered. The pass is only meant to
replace instructions that are guranteed to be interchangable, not to
do analysis for special cases.

Handling these instruction changes in in X86ISelLowering of
X86ISelDAGToDAG isn't ideal, as its liable to either break existing
patterns that expected a certain instruction or generate infinite
loops.

As well, operating as the MachineInstruction level allows us to access
scheduling/code size information for making the decisions.

Currently only implements `{v}permilps` -> `{v}shufps/{v}shufd` but
more transforms can be added.

Differential Revision: https://reviews.llvm.org/D143787
2023-02-27 18:53:25 -06:00

631 lines
19 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,SSE-X86,SSE2-X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64,SSE-X64,SSE2-X64
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=X86,SSE-X86,SSE41-X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=X64,SSE-X64,SSE41-X64
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefixes=X86,AVX-X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,AVX-X64
; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+sse -enable-legalize-types-checking | FileCheck %s --check-prefixes=X64,SSE-X64,SSE2-X64
; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+sse -enable-legalize-types-checking | FileCheck %s --check-prefixes=X64,SSE-X64,SSE2-X64
define void @extract_i8_0(ptr nocapture %dst, <16 x i8> %foo) nounwind {
; SSE2-X86-LABEL: extract_i8_0:
; SSE2-X86: # %bb.0:
; SSE2-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X86-NEXT: movd %xmm0, %ecx
; SSE2-X86-NEXT: movb %cl, (%eax)
; SSE2-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_i8_0:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movd %xmm0, %eax
; SSE2-X64-NEXT: movb %al, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X86-LABEL: extract_i8_0:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X86-NEXT: pextrb $0, %xmm0, (%eax)
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: extract_i8_0:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrb $0, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i8_0:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vpextrb $0, %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i8_0:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrb $0, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 0
store i8 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i8_3(ptr nocapture %dst, <16 x i8> %foo) nounwind {
; SSE2-X86-LABEL: extract_i8_3:
; SSE2-X86: # %bb.0:
; SSE2-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X86-NEXT: movd %xmm0, %ecx
; SSE2-X86-NEXT: shrl $24, %ecx
; SSE2-X86-NEXT: movb %cl, (%eax)
; SSE2-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_i8_3:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movd %xmm0, %eax
; SSE2-X64-NEXT: shrl $24, %eax
; SSE2-X64-NEXT: movb %al, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X86-LABEL: extract_i8_3:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X86-NEXT: pextrb $3, %xmm0, (%eax)
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: extract_i8_3:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrb $3, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i8_3:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vpextrb $3, %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i8_3:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrb $3, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 3
store i8 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i8_15(ptr nocapture %dst, <16 x i8> %foo) nounwind {
; SSE2-X86-LABEL: extract_i8_15:
; SSE2-X86: # %bb.0:
; SSE2-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X86-NEXT: pextrw $7, %xmm0, %ecx
; SSE2-X86-NEXT: movb %ch, (%eax)
; SSE2-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_i8_15:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
; SSE2-X64-NEXT: movb %ah, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X86-LABEL: extract_i8_15:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X86-NEXT: pextrb $15, %xmm0, (%eax)
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: extract_i8_15:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrb $15, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i8_15:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vpextrb $15, %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i8_15:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrb $15, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 15
store i8 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i16_0(ptr nocapture %dst, <8 x i16> %foo) nounwind {
; SSE2-X86-LABEL: extract_i16_0:
; SSE2-X86: # %bb.0:
; SSE2-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X86-NEXT: movd %xmm0, %ecx
; SSE2-X86-NEXT: movw %cx, (%eax)
; SSE2-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_i16_0:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movd %xmm0, %eax
; SSE2-X64-NEXT: movw %ax, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X86-LABEL: extract_i16_0:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X86-NEXT: pextrw $0, %xmm0, (%eax)
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: extract_i16_0:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrw $0, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i16_0:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vpextrw $0, %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i16_0:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrw $0, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <8 x i16> %foo, i32 0
store i16 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i16_7(ptr nocapture %dst, <8 x i16> %foo) nounwind {
; SSE2-X86-LABEL: extract_i16_7:
; SSE2-X86: # %bb.0:
; SSE2-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X86-NEXT: pextrw $7, %xmm0, %ecx
; SSE2-X86-NEXT: movw %cx, (%eax)
; SSE2-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_i16_7:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
; SSE2-X64-NEXT: movw %ax, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X86-LABEL: extract_i16_7:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X86-NEXT: pextrw $7, %xmm0, (%eax)
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: extract_i16_7:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrw $7, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i16_7:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vpextrw $7, %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i16_7:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrw $7, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <8 x i16> %foo, i32 7
store i16 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i32_0(ptr nocapture %dst, <4 x i32> %foo) nounwind {
; SSE-X86-LABEL: extract_i32_0:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: movss %xmm0, (%eax)
; SSE-X86-NEXT: retl
;
; SSE-X64-LABEL: extract_i32_0:
; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movss %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i32_0:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vmovss %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i32_0:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <4 x i32> %foo, i32 0
store i32 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i32_3(ptr nocapture %dst, <4 x i32> %foo) nounwind {
; SSE2-X86-LABEL: extract_i32_3:
; SSE2-X86: # %bb.0:
; SSE2-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-X86-NEXT: movd %xmm0, (%eax)
; SSE2-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_i32_3:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-X64-NEXT: movd %xmm0, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X86-LABEL: extract_i32_3:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X86-NEXT: extractps $3, %xmm0, (%eax)
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: extract_i32_3:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i32_3:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vextractps $3, %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i32_3:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <4 x i32> %foo, i32 3
store i32 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i64_0(ptr nocapture %dst, <2 x i64> %foo) nounwind {
; SSE-X86-LABEL: extract_i64_0:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: movlps %xmm0, (%eax)
; SSE-X86-NEXT: retl
;
; SSE-X64-LABEL: extract_i64_0:
; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movlps %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i64_0:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vmovlps %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i64_0:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x i64> %foo, i32 0
store i64 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i64_1(ptr nocapture %dst, <2 x i64> %foo) nounwind {
; SSE-X86-LABEL: extract_i64_1:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-X86-NEXT: movq %xmm0, (%eax)
; SSE-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_i64_1:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE2-X64-NEXT: movq %xmm0, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X64-LABEL: extract_i64_1:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrq $1, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_i64_1:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX-X86-NEXT: vmovlps %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_i64_1:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrq $1, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x i64> %foo, i32 1
store i64 %vecext, ptr %dst, align 1
ret void
}
define void @extract_f32_0(ptr nocapture %dst, <4 x float> %foo) nounwind {
; SSE-X86-LABEL: extract_f32_0:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: movss %xmm0, (%eax)
; SSE-X86-NEXT: retl
;
; SSE-X64-LABEL: extract_f32_0:
; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movss %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_f32_0:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vmovss %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_f32_0:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <4 x float> %foo, i32 0
store float %vecext, ptr %dst, align 1
ret void
}
define void @extract_f32_3(ptr nocapture %dst, <4 x float> %foo) nounwind {
; SSE2-X86-LABEL: extract_f32_3:
; SSE2-X86: # %bb.0:
; SSE2-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-X86-NEXT: movss %xmm0, (%eax)
; SSE2-X86-NEXT: retl
;
; SSE2-X64-LABEL: extract_f32_3:
; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-X64-NEXT: movss %xmm0, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X86-LABEL: extract_f32_3:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X86-NEXT: extractps $3, %xmm0, (%eax)
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: extract_f32_3:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_f32_3:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vextractps $3, %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_f32_3:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <4 x float> %foo, i32 3
store float %vecext, ptr %dst, align 1
ret void
}
define void @extract_f64_0(ptr nocapture %dst, <2 x double> %foo) nounwind {
; SSE-X86-LABEL: extract_f64_0:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: movlps %xmm0, (%eax)
; SSE-X86-NEXT: retl
;
; SSE-X64-LABEL: extract_f64_0:
; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movlps %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_f64_0:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vmovlps %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_f64_0:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x double> %foo, i32 0
store double %vecext, ptr %dst, align 1
ret void
}
define void @extract_f64_1(ptr nocapture %dst, <2 x double> %foo) nounwind {
; SSE-X86-LABEL: extract_f64_1:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: movhps %xmm0, (%eax)
; SSE-X86-NEXT: retl
;
; SSE-X64-LABEL: extract_f64_1:
; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movhps %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_f64_1:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vmovhps %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_f64_1:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovhps %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x double> %foo, i32 1
store double %vecext, ptr %dst, align 1
ret void
}
define void @extract_f128_0(ptr nocapture %dst, <2 x fp128> %foo) nounwind {
; SSE-X86-LABEL: extract_f128_0:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: pushl %edi
; SSE-X86-NEXT: pushl %esi
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; SSE-X86-NEXT: movl %esi, 12(%edi)
; SSE-X86-NEXT: movl %edx, 8(%edi)
; SSE-X86-NEXT: movl %ecx, 4(%edi)
; SSE-X86-NEXT: movl %eax, (%edi)
; SSE-X86-NEXT: popl %esi
; SSE-X86-NEXT: popl %edi
; SSE-X86-NEXT: retl
;
; SSE-X64-LABEL: extract_f128_0:
; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movups %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_f128_0:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vmovups %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_f128_0:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovups %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x fp128> %foo, i32 0
store fp128 %vecext, ptr %dst, align 1
ret void
}
define void @extract_f128_1(ptr nocapture %dst, <2 x fp128> %foo) nounwind {
; SSE-X86-LABEL: extract_f128_1:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: pushl %edi
; SSE-X86-NEXT: pushl %esi
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; SSE-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; SSE-X86-NEXT: movl %esi, 12(%edi)
; SSE-X86-NEXT: movl %edx, 8(%edi)
; SSE-X86-NEXT: movl %ecx, 4(%edi)
; SSE-X86-NEXT: movl %eax, (%edi)
; SSE-X86-NEXT: popl %esi
; SSE-X86-NEXT: popl %edi
; SSE-X86-NEXT: retl
;
; SSE-X64-LABEL: extract_f128_1:
; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movups %xmm1, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X86-LABEL: extract_f128_1:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X86-NEXT: vmovups %xmm0, (%eax)
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: extract_f128_1:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovups %xmm1, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x fp128> %foo, i32 1
store fp128 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i8_undef(ptr nocapture %dst, <16 x i8> %foo) nounwind {
; X86-LABEL: extract_i8_undef:
; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: extract_i8_undef:
; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 16 ; undef
store i8 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i16_undef(ptr nocapture %dst, <8 x i16> %foo) nounwind {
; X86-LABEL: extract_i16_undef:
; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: extract_i16_undef:
; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <8 x i16> %foo, i32 9 ; undef
store i16 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i32_undef(ptr nocapture %dst, <4 x i32> %foo) nounwind {
; X86-LABEL: extract_i32_undef:
; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: extract_i32_undef:
; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %foo, i32 6 ; undef
store i32 %vecext, ptr %dst, align 1
ret void
}
define void @extract_i64_undef(ptr nocapture %dst, <2 x i64> %foo) nounwind {
; X86-LABEL: extract_i64_undef:
; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: extract_i64_undef:
; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <2 x i64> %foo, i32 2 ; undef
store i64 %vecext, ptr %dst, align 1
ret void
}
define void @extract_f32_undef(ptr nocapture %dst, <4 x float> %foo) nounwind {
; X86-LABEL: extract_f32_undef:
; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: extract_f32_undef:
; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <4 x float> %foo, i32 6 ; undef
store float %vecext, ptr %dst, align 1
ret void
}
define void @extract_f64_undef(ptr nocapture %dst, <2 x double> %foo) nounwind {
; X86-LABEL: extract_f64_undef:
; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: extract_f64_undef:
; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <2 x double> %foo, i32 2 ; undef
store double %vecext, ptr %dst, align 1
ret void
}
define void @extract_f128_undef(ptr nocapture %dst, <2 x fp128> %foo) nounwind {
; X86-LABEL: extract_f128_undef:
; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: extract_f128_undef:
; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <2 x fp128> %foo, i32 2 ; undef
store fp128 %vecext, ptr %dst, align 1
ret void
}