
The i386 psABI specifies that `__float128` has 16 byte alignment and must be passed on the stack; however, LLVM currently stores it in a stack slot that has an offset of 4. Add a custom lowering to correct this alignment to 16-byte. i386 does not specify an `__int128`, but it seems reasonable to keep the same behavior as `__float128` so this is changed as well. There also isn't a good way to distinguish whether a set of four registers came from an integer or a float. The main test demonstrating this change is `store_perturbed` in `llvm/test/CodeGen/X86/i128-fp128-abi.ll`. Referenced ABI: https://gitlab.com/x86-psABIs/i386-ABI/-/wikis/uploads/14c05f1b1e156e0e46b61bfa7c1df1e2/intel386-psABI-2020-08-07.pdf Fixes: https://github.com/llvm/llvm-project/issues/77401
2691 lines
89 KiB
LLVM
2691 lines
89 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=X64,SSE,SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=X64,SSE,SSE4
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=X64,AVX,AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64,AVX,AVX512
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
|
|
|
|
define i8 @scmp.8.8(i8 %x, i8 %y) nounwind {
|
|
; X64-LABEL: scmp.8.8:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpb %sil, %dil
|
|
; X64-NEXT: setl %cl
|
|
; X64-NEXT: setg %al
|
|
; X64-NEXT: subb %cl, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.8.8:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %al
|
|
; X86-NEXT: subb %cl, %al
|
|
; X86-NEXT: retl
|
|
%1 = call i8 @llvm.scmp(i8 %x, i8 %y)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @scmp.8.16(i16 %x, i16 %y) nounwind {
|
|
; X64-LABEL: scmp.8.16:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpw %si, %di
|
|
; X64-NEXT: setl %cl
|
|
; X64-NEXT: setg %al
|
|
; X64-NEXT: subb %cl, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.8.16:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpw {{[0-9]+}}(%esp), %ax
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %al
|
|
; X86-NEXT: subb %cl, %al
|
|
; X86-NEXT: retl
|
|
%1 = call i8 @llvm.scmp(i16 %x, i16 %y)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @scmp.8.32(i32 %x, i32 %y) nounwind {
|
|
; X64-LABEL: scmp.8.32:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpl %esi, %edi
|
|
; X64-NEXT: setl %cl
|
|
; X64-NEXT: setg %al
|
|
; X64-NEXT: subb %cl, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.8.32:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %al
|
|
; X86-NEXT: subb %cl, %al
|
|
; X86-NEXT: retl
|
|
%1 = call i8 @llvm.scmp(i32 %x, i32 %y)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @scmp.8.64(i64 %x, i64 %y) nounwind {
|
|
; X64-LABEL: scmp.8.64:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpq %rsi, %rdi
|
|
; X64-NEXT: setl %cl
|
|
; X64-NEXT: setg %al
|
|
; X64-NEXT: subb %cl, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.8.64:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl %eax, %edx
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %ecx, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edx, %eax
|
|
; X86-NEXT: sbbl %esi, %ecx
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %bl, %al
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl
|
|
%1 = call i8 @llvm.scmp(i64 %x, i64 %y)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @scmp.8.128(i128 %x, i128 %y) nounwind {
|
|
; X64-LABEL: scmp.8.128:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpq %rdx, %rdi
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: sbbq %rcx, %rax
|
|
; X64-NEXT: setl %r8b
|
|
; X64-NEXT: cmpq %rdi, %rdx
|
|
; X64-NEXT: sbbq %rsi, %rcx
|
|
; X64-NEXT: setl %al
|
|
; X64-NEXT: subb %r8b, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.8.128:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: andl $-16, %esp
|
|
; X86-NEXT: subl $16, %esp
|
|
; X86-NEXT: movl 24(%ebp), %ecx
|
|
; X86-NEXT: movl 28(%ebp), %eax
|
|
; X86-NEXT: movl 12(%ebp), %edi
|
|
; X86-NEXT: cmpl %ecx, 8(%ebp)
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: movl 32(%ebp), %edi
|
|
; X86-NEXT: movl 16(%ebp), %ebx
|
|
; X86-NEXT: sbbl %edi, %ebx
|
|
; X86-NEXT: movl 36(%ebp), %ebx
|
|
; X86-NEXT: movl 20(%ebp), %edx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: sbbl %ebx, %ecx
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: movl 24(%ebp), %esi
|
|
; X86-NEXT: cmpl 8(%ebp), %esi
|
|
; X86-NEXT: sbbl 12(%ebp), %eax
|
|
; X86-NEXT: sbbl 16(%ebp), %edi
|
|
; X86-NEXT: sbbl %edx, %ebx
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %cl, %al
|
|
; X86-NEXT: leal -12(%ebp), %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
%1 = call i8 @llvm.scmp(i128 %x, i128 %y)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i32 @scmp.32.32(i32 %x, i32 %y) nounwind {
|
|
; X64-LABEL: scmp.32.32:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpl %esi, %edi
|
|
; X64-NEXT: setl %al
|
|
; X64-NEXT: setg %cl
|
|
; X64-NEXT: subb %al, %cl
|
|
; X64-NEXT: movsbl %cl, %eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.32.32:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movsbl %cl, %eax
|
|
; X86-NEXT: retl
|
|
%1 = call i32 @llvm.scmp(i32 %x, i32 %y)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @scmp.32.64(i64 %x, i64 %y) nounwind {
|
|
; X64-LABEL: scmp.32.64:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpq %rsi, %rdi
|
|
; X64-NEXT: setl %al
|
|
; X64-NEXT: setg %cl
|
|
; X64-NEXT: subb %al, %cl
|
|
; X64-NEXT: movsbl %cl, %eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.32.64:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl %eax, %edx
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %ecx, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edx, %eax
|
|
; X86-NEXT: sbbl %esi, %ecx
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %bl, %al
|
|
; X86-NEXT: movsbl %al, %eax
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl
|
|
%1 = call i32 @llvm.scmp(i64 %x, i64 %y)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @scmp.64.64(i64 %x, i64 %y) nounwind {
|
|
; X64-LABEL: scmp.64.64:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpq %rsi, %rdi
|
|
; X64-NEXT: setl %al
|
|
; X64-NEXT: setg %cl
|
|
; X64-NEXT: subb %al, %cl
|
|
; X64-NEXT: movsbq %cl, %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp.64.64:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl %eax, %edx
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %ecx, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edx, %eax
|
|
; X86-NEXT: sbbl %esi, %ecx
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %bl, %al
|
|
; X86-NEXT: movsbl %al, %eax
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: sarl $31, %edx
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl
|
|
%1 = call i64 @llvm.scmp(i64 %x, i64 %y)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i4 @scmp_narrow_result(i32 %x, i32 %y) nounwind {
|
|
; X64-LABEL: scmp_narrow_result:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpl %esi, %edi
|
|
; X64-NEXT: setl %cl
|
|
; X64-NEXT: setg %al
|
|
; X64-NEXT: subb %cl, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_narrow_result:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %al
|
|
; X86-NEXT: subb %cl, %al
|
|
; X86-NEXT: retl
|
|
%1 = call i4 @llvm.scmp(i32 %x, i32 %y)
|
|
ret i4 %1
|
|
}
|
|
|
|
define i8 @scmp_narrow_op(i62 %x, i62 %y) nounwind {
|
|
; X64-LABEL: scmp_narrow_op:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: shlq $2, %rsi
|
|
; X64-NEXT: sarq $2, %rsi
|
|
; X64-NEXT: shlq $2, %rdi
|
|
; X64-NEXT: sarq $2, %rdi
|
|
; X64-NEXT: cmpq %rsi, %rdi
|
|
; X64-NEXT: setl %cl
|
|
; X64-NEXT: setg %al
|
|
; X64-NEXT: subb %cl, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_narrow_op:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: shll $2, %eax
|
|
; X86-NEXT: sarl $2, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shll $2, %esi
|
|
; X86-NEXT: sarl $2, %esi
|
|
; X86-NEXT: cmpl %ecx, %edx
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sbbl %esi, %eax
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %bl, %al
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl
|
|
%1 = call i8 @llvm.scmp(i62 %x, i62 %y)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i141 @scmp_wide_result(i32 %x, i32 %y) nounwind {
|
|
; X64-LABEL: scmp_wide_result:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: cmpl %esi, %edi
|
|
; X64-NEXT: setl %al
|
|
; X64-NEXT: setg %cl
|
|
; X64-NEXT: subb %al, %cl
|
|
; X64-NEXT: movsbq %cl, %rax
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: sarq $63, %rdx
|
|
; X64-NEXT: movl %edx, %ecx
|
|
; X64-NEXT: andl $8191, %ecx # imm = 0x1FFF
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_wide_result:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %cl, %dl
|
|
; X86-NEXT: movsbl %dl, %ecx
|
|
; X86-NEXT: movl %ecx, (%eax)
|
|
; X86-NEXT: sarl $31, %ecx
|
|
; X86-NEXT: movl %ecx, 12(%eax)
|
|
; X86-NEXT: movl %ecx, 8(%eax)
|
|
; X86-NEXT: movl %ecx, 4(%eax)
|
|
; X86-NEXT: andl $8191, %ecx # imm = 0x1FFF
|
|
; X86-NEXT: movw %cx, 16(%eax)
|
|
; X86-NEXT: retl $4
|
|
%1 = call i141 @llvm.scmp(i32 %x, i32 %y)
|
|
ret i141 %1
|
|
}
|
|
|
|
define i8 @scmp_wide_op(i109 %x, i109 %y) nounwind {
|
|
; X64-LABEL: scmp_wide_op:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: shlq $19, %rcx
|
|
; X64-NEXT: sarq $19, %rcx
|
|
; X64-NEXT: shlq $19, %rsi
|
|
; X64-NEXT: sarq $19, %rsi
|
|
; X64-NEXT: cmpq %rdx, %rdi
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: sbbq %rcx, %rax
|
|
; X64-NEXT: setl %r8b
|
|
; X64-NEXT: cmpq %rdi, %rdx
|
|
; X64-NEXT: sbbq %rsi, %rcx
|
|
; X64-NEXT: setl %al
|
|
; X64-NEXT: subb %r8b, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_wide_op:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: shll $19, %eax
|
|
; X86-NEXT: sarl $19, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: shll $19, %ecx
|
|
; X86-NEXT: sarl $19, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: cmpl %esi, {{[0-9]+}}(%esp)
|
|
; X86-NEXT: sbbl %edx, %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: sbbl %ebp, %esi
|
|
; X86-NEXT: movl %ecx, %esi
|
|
; X86-NEXT: sbbl %eax, %esi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: sbbl %edi, %ebp
|
|
; X86-NEXT: sbbl %ecx, %eax
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %bl, %al
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
%1 = call i8 @llvm.scmp(i109 %x, i109 %y)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i41 @scmp_uncommon_types(i7 %x, i7 %y) nounwind {
|
|
; X64-LABEL: scmp_uncommon_types:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: addb %sil, %sil
|
|
; X64-NEXT: sarb %sil
|
|
; X64-NEXT: addb %dil, %dil
|
|
; X64-NEXT: sarb %dil
|
|
; X64-NEXT: cmpb %sil, %dil
|
|
; X64-NEXT: setl %al
|
|
; X64-NEXT: setg %cl
|
|
; X64-NEXT: subb %al, %cl
|
|
; X64-NEXT: movsbq %cl, %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_uncommon_types:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: addb %cl, %cl
|
|
; X86-NEXT: sarb %cl
|
|
; X86-NEXT: cmpb %al, %cl
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movsbl %cl, %eax
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: sarl $31, %edx
|
|
; X86-NEXT: retl
|
|
%1 = call i41 @llvm.scmp(i7 %x, i7 %y)
|
|
ret i41 %1
|
|
}
|
|
|
|
define <4 x i32> @scmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|
; SSE-LABEL: scmp_normal_vectors:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pcmpgtd %xmm1, %xmm2
|
|
; SSE-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE-NEXT: psubd %xmm2, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: scmp_normal_vectors:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
|
|
; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: scmp_normal_vectors:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
|
|
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k2
|
|
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1]
|
|
; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1}
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_normal_vectors:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: setg %dh
|
|
; X86-NEXT: subb %dl, %dh
|
|
; X86-NEXT: movsbl %dh, %edx
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: setg %bh
|
|
; X86-NEXT: subb %bl, %bh
|
|
; X86-NEXT: movsbl %bh, %edi
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: setg %bh
|
|
; X86-NEXT: subb %bl, %bh
|
|
; X86-NEXT: movsbl %bh, %esi
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %ch
|
|
; X86-NEXT: subb %cl, %ch
|
|
; X86-NEXT: movsbl %ch, %ecx
|
|
; X86-NEXT: movl %ecx, 12(%eax)
|
|
; X86-NEXT: movl %esi, 8(%eax)
|
|
; X86-NEXT: movl %edi, 4(%eax)
|
|
; X86-NEXT: movl %edx, (%eax)
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl $4
|
|
%1 = call <4 x i32> @llvm.scmp(<4 x i32> %x, <4 x i32> %y)
|
|
ret <4 x i32> %1
|
|
}
|
|
|
|
define <4 x i8> @scmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|
; SSE2-LABEL: scmp_narrow_vec_result:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: movd %xmm0, %ecx
|
|
; SSE2-NEXT: cmpl %eax, %ecx
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
|
|
; SSE2-NEXT: movd %xmm2, %ecx
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
|
|
; SSE2-NEXT: movd %xmm2, %edx
|
|
; SSE2-NEXT: cmpl %ecx, %edx
|
|
; SSE2-NEXT: setl %cl
|
|
; SSE2-NEXT: setg %dl
|
|
; SSE2-NEXT: subb %cl, %dl
|
|
; SSE2-NEXT: movzbl %dl, %ecx
|
|
; SSE2-NEXT: shll $8, %ecx
|
|
; SSE2-NEXT: orl %eax, %ecx
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: movd %xmm2, %edx
|
|
; SSE2-NEXT: cmpl %eax, %edx
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %dl
|
|
; SSE2-NEXT: subb %al, %dl
|
|
; SSE2-NEXT: movzbl %dl, %eax
|
|
; SSE2-NEXT: shll $16, %eax
|
|
; SSE2-NEXT: orl %ecx, %eax
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
|
|
; SSE2-NEXT: movd %xmm1, %ecx
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; SSE2-NEXT: movd %xmm0, %edx
|
|
; SSE2-NEXT: cmpl %ecx, %edx
|
|
; SSE2-NEXT: setl %cl
|
|
; SSE2-NEXT: setg %dl
|
|
; SSE2-NEXT: subb %cl, %dl
|
|
; SSE2-NEXT: movzbl %dl, %ecx
|
|
; SSE2-NEXT: shll $24, %ecx
|
|
; SSE2-NEXT: orl %eax, %ecx
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE4-LABEL: scmp_narrow_vec_result:
|
|
; SSE4: # %bb.0:
|
|
; SSE4-NEXT: pextrd $1, %xmm1, %eax
|
|
; SSE4-NEXT: pextrd $1, %xmm0, %ecx
|
|
; SSE4-NEXT: cmpl %eax, %ecx
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: movd %xmm1, %ecx
|
|
; SSE4-NEXT: movd %xmm0, %edx
|
|
; SSE4-NEXT: cmpl %ecx, %edx
|
|
; SSE4-NEXT: setl %cl
|
|
; SSE4-NEXT: setg %dl
|
|
; SSE4-NEXT: subb %cl, %dl
|
|
; SSE4-NEXT: movzbl %dl, %ecx
|
|
; SSE4-NEXT: movd %ecx, %xmm2
|
|
; SSE4-NEXT: pinsrb $1, %eax, %xmm2
|
|
; SSE4-NEXT: pextrd $2, %xmm1, %eax
|
|
; SSE4-NEXT: pextrd $2, %xmm0, %ecx
|
|
; SSE4-NEXT: cmpl %eax, %ecx
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $2, %eax, %xmm2
|
|
; SSE4-NEXT: pextrd $3, %xmm1, %eax
|
|
; SSE4-NEXT: pextrd $3, %xmm0, %ecx
|
|
; SSE4-NEXT: cmpl %eax, %ecx
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $3, %eax, %xmm2
|
|
; SSE4-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE4-NEXT: retq
|
|
;
|
|
; AVX-LABEL: scmp_narrow_vec_result:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpextrd $1, %xmm1, %eax
|
|
; AVX-NEXT: vpextrd $1, %xmm0, %ecx
|
|
; AVX-NEXT: cmpl %eax, %ecx
|
|
; AVX-NEXT: setl %al
|
|
; AVX-NEXT: setg %cl
|
|
; AVX-NEXT: subb %al, %cl
|
|
; AVX-NEXT: vmovd %xmm1, %eax
|
|
; AVX-NEXT: vmovd %xmm0, %edx
|
|
; AVX-NEXT: cmpl %eax, %edx
|
|
; AVX-NEXT: setl %al
|
|
; AVX-NEXT: setg %dl
|
|
; AVX-NEXT: subb %al, %dl
|
|
; AVX-NEXT: vmovd %edx, %xmm2
|
|
; AVX-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
|
|
; AVX-NEXT: vpextrd $2, %xmm1, %eax
|
|
; AVX-NEXT: vpextrd $2, %xmm0, %ecx
|
|
; AVX-NEXT: cmpl %eax, %ecx
|
|
; AVX-NEXT: setl %al
|
|
; AVX-NEXT: setg %cl
|
|
; AVX-NEXT: subb %al, %cl
|
|
; AVX-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
|
|
; AVX-NEXT: vpextrd $3, %xmm1, %eax
|
|
; AVX-NEXT: vpextrd $3, %xmm0, %ecx
|
|
; AVX-NEXT: cmpl %eax, %ecx
|
|
; AVX-NEXT: setl %al
|
|
; AVX-NEXT: setg %cl
|
|
; AVX-NEXT: subb %al, %cl
|
|
; AVX-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_narrow_vec_result:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: setl %ch
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %ch, %cl
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: setl %ch
|
|
; X86-NEXT: setg %bl
|
|
; X86-NEXT: subb %ch, %bl
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: setl %ch
|
|
; X86-NEXT: setg %bh
|
|
; X86-NEXT: subb %ch, %bh
|
|
; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: setg %ch
|
|
; X86-NEXT: subb %dl, %ch
|
|
; X86-NEXT: movb %ch, 3(%eax)
|
|
; X86-NEXT: movb %bh, 2(%eax)
|
|
; X86-NEXT: movb %bl, 1(%eax)
|
|
; X86-NEXT: movb %cl, (%eax)
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl $4
|
|
%1 = call <4 x i8> @llvm.scmp(<4 x i32> %x, <4 x i32> %y)
|
|
ret <4 x i8> %1
|
|
}
|
|
|
|
define <4 x i32> @scmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind {
|
|
; SSE2-LABEL: scmp_narrow_vec_op:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: psrad $24, %xmm1
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE2-NEXT: psubd %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE4-LABEL: scmp_narrow_vec_op:
|
|
; SSE4: # %bb.0:
|
|
; SSE4-NEXT: pmovsxbd %xmm1, %xmm1
|
|
; SSE4-NEXT: pmovsxbd %xmm0, %xmm0
|
|
; SSE4-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE4-NEXT: pcmpgtd %xmm1, %xmm2
|
|
; SSE4-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE4-NEXT: psubd %xmm2, %xmm1
|
|
; SSE4-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE4-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: scmp_narrow_vec_op:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX2-NEXT: vpmovsxbd %xmm0, %xmm0
|
|
; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
|
|
; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: scmp_narrow_vec_op:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovsxbd %xmm0, %xmm0
|
|
; AVX512-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
|
|
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k2
|
|
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1]
|
|
; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1}
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_narrow_vec_op:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ch
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %dl
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: setg %dh
|
|
; X86-NEXT: subb %dl, %dh
|
|
; X86-NEXT: movsbl %dh, %edx
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %bl
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: setg %bh
|
|
; X86-NEXT: subb %bl, %bh
|
|
; X86-NEXT: movsbl %bh, %esi
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %ch
|
|
; X86-NEXT: setl %ch
|
|
; X86-NEXT: setg %bl
|
|
; X86-NEXT: subb %ch, %bl
|
|
; X86-NEXT: movsbl %bl, %edi
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %cl
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %ch
|
|
; X86-NEXT: subb %cl, %ch
|
|
; X86-NEXT: movsbl %ch, %ecx
|
|
; X86-NEXT: movl %ecx, 12(%eax)
|
|
; X86-NEXT: movl %edi, 8(%eax)
|
|
; X86-NEXT: movl %esi, 4(%eax)
|
|
; X86-NEXT: movl %edx, (%eax)
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl $4
|
|
%1 = call <4 x i32> @llvm.scmp(<4 x i8> %x, <4 x i8> %y)
|
|
ret <4 x i32> %1
|
|
}
|
|
|
|
define <16 x i32> @scmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind {
|
|
; SSE2-LABEL: scmp_wide_vec_result:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
|
|
; SSE2-NEXT: psrad $24, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
|
|
; SSE2-NEXT: pcmpgtd %xmm5, %xmm0
|
|
; SSE2-NEXT: psubd %xmm6, %xmm0
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psrad $24, %xmm1
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psrad $24, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
|
|
; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
|
|
; SSE2-NEXT: psubd %xmm5, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
|
|
; SSE2-NEXT: psrad $24, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
|
|
; SSE2-NEXT: psrad $24, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
|
|
; SSE2-NEXT: psubd %xmm6, %xmm2
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
|
|
; SSE2-NEXT: psrad $24, %xmm3
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
|
|
; SSE2-NEXT: psrad $24, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
|
|
; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
|
|
; SSE2-NEXT: psubd %xmm5, %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE4-LABEL: scmp_wide_vec_result:
|
|
; SSE4: # %bb.0:
|
|
; SSE4-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE4-NEXT: pmovsxbd %xmm1, %xmm0
|
|
; SSE4-NEXT: pmovsxbd %xmm4, %xmm2
|
|
; SSE4-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE4-NEXT: pcmpgtd %xmm0, %xmm3
|
|
; SSE4-NEXT: pcmpgtd %xmm2, %xmm0
|
|
; SSE4-NEXT: psubd %xmm3, %xmm0
|
|
; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
|
|
; SSE4-NEXT: pmovsxbd %xmm2, %xmm5
|
|
; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,1,1]
|
|
; SSE4-NEXT: pmovsxbd %xmm2, %xmm2
|
|
; SSE4-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE4-NEXT: pcmpgtd %xmm5, %xmm3
|
|
; SSE4-NEXT: pcmpgtd %xmm2, %xmm5
|
|
; SSE4-NEXT: psubd %xmm3, %xmm5
|
|
; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
|
; SSE4-NEXT: pmovsxbd %xmm2, %xmm2
|
|
; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,3,2,3]
|
|
; SSE4-NEXT: pmovsxbd %xmm3, %xmm3
|
|
; SSE4-NEXT: movdqa %xmm3, %xmm6
|
|
; SSE4-NEXT: pcmpgtd %xmm2, %xmm6
|
|
; SSE4-NEXT: pcmpgtd %xmm3, %xmm2
|
|
; SSE4-NEXT: psubd %xmm6, %xmm2
|
|
; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
|
|
; SSE4-NEXT: pmovsxbd %xmm1, %xmm3
|
|
; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
|
|
; SSE4-NEXT: pmovsxbd %xmm1, %xmm1
|
|
; SSE4-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE4-NEXT: pcmpgtd %xmm3, %xmm4
|
|
; SSE4-NEXT: pcmpgtd %xmm1, %xmm3
|
|
; SSE4-NEXT: psubd %xmm4, %xmm3
|
|
; SSE4-NEXT: movdqa %xmm5, %xmm1
|
|
; SSE4-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: scmp_wide_vec_result:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm2
|
|
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm3
|
|
; AVX2-NEXT: vpcmpgtd %ymm2, %ymm3, %ymm4
|
|
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
|
|
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm1
|
|
; AVX2-NEXT: vmovdqa %ymm2, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: scmp_wide_vec_result:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpcmpgtb %xmm0, %xmm1, %k1
|
|
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k2
|
|
; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm0 {%k2} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; AVX512-NEXT: vpternlogd {{.*#+}} zmm1 = -1
|
|
; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_wide_vec_result:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $16, %esp
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ah
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ch
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %dh
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %bh
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %bh
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %bl
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %dh
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %ch
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %ah
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %dl
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %bh
|
|
; X86-NEXT: subb %al, %bh
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %bl
|
|
; X86-NEXT: subb %al, %bl
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dh
|
|
; X86-NEXT: subb %al, %dh
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %ebp
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %edi
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %ah
|
|
; X86-NEXT: subb %al, %ah
|
|
; X86-NEXT: movsbl %ah, %esi
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl %ecx, 60(%eax)
|
|
; X86-NEXT: movl %esi, 56(%eax)
|
|
; X86-NEXT: movl %edi, 52(%eax)
|
|
; X86-NEXT: movl %ebp, 48(%eax)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, 44(%eax)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, 40(%eax)
|
|
; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movsbl %dh, %edx
|
|
; X86-NEXT: movl %edx, 36(%eax)
|
|
; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
|
|
; X86-NEXT: movsbl %bl, %esi
|
|
; X86-NEXT: movl %esi, 32(%eax)
|
|
; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X86-NEXT: movsbl %bh, %edi
|
|
; X86-NEXT: movl %edi, 28(%eax)
|
|
; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
|
|
; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload
|
|
; X86-NEXT: movl %ebx, 24(%eax)
|
|
; X86-NEXT: movl %edi, 20(%eax)
|
|
; X86-NEXT: movl %esi, 16(%eax)
|
|
; X86-NEXT: movl %edx, 12(%eax)
|
|
; X86-NEXT: movl %ecx, 8(%eax)
|
|
; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, 4(%eax)
|
|
; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, (%eax)
|
|
; X86-NEXT: addl $16, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl $4
|
|
%1 = call <16 x i32> @llvm.scmp(<16 x i8> %x, <16 x i8> %y)
|
|
ret <16 x i32> %1
|
|
}
|
|
|
|
define <16 x i8> @scmp_wide_vec_op(<16 x i64> %x, <16 x i64> %y) nounwind {
|
|
; SSE2-LABEL: scmp_wide_vec_op:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movq %xmm7, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm8
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm7, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm7
|
|
; SSE2-NEXT: movq %xmm6, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm7
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm6, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
|
|
; SSE2-NEXT: movq %xmm5, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm5, %rcx
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE2-NEXT: movd %eax, %xmm6
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movq %xmm4, %rcx
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE2-NEXT: movd %eax, %xmm8
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm4, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm4
|
|
; SSE2-NEXT: movq %xmm3, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm3, %rcx
|
|
; SSE2-NEXT: movd %eax, %xmm3
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movq %xmm2, %rcx
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm2, %rcx
|
|
; SSE2-NEXT: movd %eax, %xmm2
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %dl
|
|
; SSE2-NEXT: subb %al, %dl
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
; SSE2-NEXT: movzbl %dl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm2
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
|
|
; SSE2-NEXT: movq %xmm1, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm1, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm1
|
|
; SSE2-NEXT: movq %xmm0, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm0, %rax
|
|
; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %cl
|
|
; SSE2-NEXT: subb %al, %cl
|
|
; SSE2-NEXT: movzbl %cl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm0
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE4-LABEL: scmp_wide_vec_op:
|
|
; SSE4: # %bb.0:
|
|
; SSE4-NEXT: pextrq $1, %xmm0, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: movq %xmm0, %rcx
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE4-NEXT: setl %cl
|
|
; SSE4-NEXT: setg %dl
|
|
; SSE4-NEXT: subb %cl, %dl
|
|
; SSE4-NEXT: movzbl %dl, %ecx
|
|
; SSE4-NEXT: movd %ecx, %xmm0
|
|
; SSE4-NEXT: pinsrb $1, %eax, %xmm0
|
|
; SSE4-NEXT: movq %xmm1, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $2, %eax, %xmm0
|
|
; SSE4-NEXT: pextrq $1, %xmm1, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $3, %eax, %xmm0
|
|
; SSE4-NEXT: movq %xmm2, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $4, %eax, %xmm0
|
|
; SSE4-NEXT: pextrq $1, %xmm2, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $5, %eax, %xmm0
|
|
; SSE4-NEXT: movq %xmm3, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $6, %eax, %xmm0
|
|
; SSE4-NEXT: pextrq $1, %xmm3, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $7, %eax, %xmm0
|
|
; SSE4-NEXT: movq %xmm4, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $8, %eax, %xmm0
|
|
; SSE4-NEXT: pextrq $1, %xmm4, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $9, %eax, %xmm0
|
|
; SSE4-NEXT: movq %xmm5, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $10, %eax, %xmm0
|
|
; SSE4-NEXT: pextrq $1, %xmm5, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $11, %eax, %xmm0
|
|
; SSE4-NEXT: movq %xmm6, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $12, %eax, %xmm0
|
|
; SSE4-NEXT: pextrq $1, %xmm6, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $13, %eax, %xmm0
|
|
; SSE4-NEXT: movq %xmm7, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $14, %eax, %xmm0
|
|
; SSE4-NEXT: pextrq $1, %xmm7, %rax
|
|
; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: pinsrb $15, %eax, %xmm0
|
|
; SSE4-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: scmp_wide_vec_op:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpextrq $1, %xmm4, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vmovq %xmm4, %rax
|
|
; AVX2-NEXT: vmovq %xmm0, %rdx
|
|
; AVX2-NEXT: cmpq %rax, %rdx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %dl
|
|
; AVX2-NEXT: subb %al, %dl
|
|
; AVX2-NEXT: vmovd %edx, %xmm8
|
|
; AVX2-NEXT: vpinsrb $1, %ecx, %xmm8, %xmm8
|
|
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm4
|
|
; AVX2-NEXT: vmovq %xmm4, %rax
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $2, %ecx, %xmm8, %xmm8
|
|
; AVX2-NEXT: vpextrq $1, %xmm4, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $3, %ecx, %xmm8, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm5, %rax
|
|
; AVX2-NEXT: vmovq %xmm1, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrq $1, %xmm5, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm4
|
|
; AVX2-NEXT: vmovq %xmm4, %rax
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX2-NEXT: vmovq %xmm1, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrq $1, %xmm4, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm6, %rax
|
|
; AVX2-NEXT: vmovq %xmm2, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrq $1, %xmm6, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm1
|
|
; AVX2-NEXT: vmovq %xmm1, %rax
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
|
|
; AVX2-NEXT: vmovq %xmm2, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm7, %rax
|
|
; AVX2-NEXT: vmovq %xmm3, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrq $1, %xmm7, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm1
|
|
; AVX2-NEXT: vmovq %xmm1, %rax
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
|
|
; AVX2-NEXT: vmovq %xmm2, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
|
|
; AVX2-NEXT: cmpq %rax, %rcx
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: scmp_wide_vec_op:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vmovq %xmm2, %rax
|
|
; AVX512-NEXT: vmovq %xmm0, %rdx
|
|
; AVX512-NEXT: cmpq %rax, %rdx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %dl
|
|
; AVX512-NEXT: subb %al, %dl
|
|
; AVX512-NEXT: vmovd %edx, %xmm4
|
|
; AVX512-NEXT: vpinsrb $1, %ecx, %xmm4, %xmm4
|
|
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm5
|
|
; AVX512-NEXT: vmovq %xmm5, %rax
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm6
|
|
; AVX512-NEXT: vmovq %xmm6, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $2, %ecx, %xmm4, %xmm4
|
|
; AVX512-NEXT: vpextrq $1, %xmm5, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm6, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $3, %ecx, %xmm4, %xmm4
|
|
; AVX512-NEXT: vextracti32x4 $2, %zmm2, %xmm5
|
|
; AVX512-NEXT: vmovq %xmm5, %rax
|
|
; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm6
|
|
; AVX512-NEXT: vmovq %xmm6, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $4, %ecx, %xmm4, %xmm4
|
|
; AVX512-NEXT: vpextrq $1, %xmm5, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm6, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $5, %ecx, %xmm4, %xmm4
|
|
; AVX512-NEXT: vextracti32x4 $3, %zmm2, %xmm2
|
|
; AVX512-NEXT: vmovq %xmm2, %rax
|
|
; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm0
|
|
; AVX512-NEXT: vmovq %xmm0, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $6, %ecx, %xmm4, %xmm4
|
|
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $7, %ecx, %xmm4, %xmm0
|
|
; AVX512-NEXT: vmovq %xmm3, %rax
|
|
; AVX512-NEXT: vmovq %xmm1, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrq $1, %xmm3, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm2
|
|
; AVX512-NEXT: vmovq %xmm2, %rax
|
|
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm4
|
|
; AVX512-NEXT: vmovq %xmm4, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm4, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vextracti32x4 $2, %zmm3, %xmm2
|
|
; AVX512-NEXT: vmovq %xmm2, %rax
|
|
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm4
|
|
; AVX512-NEXT: vmovq %xmm4, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm4, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vextracti32x4 $3, %zmm3, %xmm2
|
|
; AVX512-NEXT: vmovq %xmm2, %rax
|
|
; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
|
|
; AVX512-NEXT: vmovq %xmm1, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
|
|
; AVX512-NEXT: cmpq %rax, %rcx
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_wide_vec_op:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: cmpl %edx, %edi
|
|
; X86-NEXT: movl %ebx, %ebp
|
|
; X86-NEXT: sbbl %esi, %ebp
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: cmpl %edi, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: sbbl %ebx, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: setl %ah
|
|
; X86-NEXT: subb %al, %ah
|
|
; X86-NEXT: movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpl %ecx, %ebp
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: sbbl %edx, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: cmpl %ebp, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl %ebx, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: setl %ah
|
|
; X86-NEXT: subb %al, %ah
|
|
; X86-NEXT: movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpl %edi, %ecx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: sbbl %esi, %eax
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: cmpl %ecx, %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: sbbl %edx, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: cmpl %ebp, %edi
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: sbbl %ecx, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edi, %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: sbbl %esi, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: subb %bl, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpl %edx, %edi
|
|
; X86-NEXT: movl %esi, %ecx
|
|
; X86-NEXT: sbbl %eax, %ecx
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edi, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl %esi, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: setl %bh
|
|
; X86-NEXT: subb %bl, %bh
|
|
; X86-NEXT: movb %bh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpl %edx, %eax
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %ecx, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %eax, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: sbbl %esi, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: setl %bh
|
|
; X86-NEXT: subb %bl, %bh
|
|
; X86-NEXT: movb %bh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %ecx, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl %esi, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: setl %bh
|
|
; X86-NEXT: subb %bl, %bh
|
|
; X86-NEXT: movb %bh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpl %ecx, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl %esi, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: subb %bl, %dl
|
|
; X86-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: cmpl %ecx, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sbbl %esi, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: subb %bl, %dl
|
|
; X86-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpl %eax, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %edx, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %ecx, %eax
|
|
; X86-NEXT: sbbl %esi, %edx
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %bl, %al
|
|
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: cmpl %ebp, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %edx, %edi
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: cmpl %ecx, %ebp
|
|
; X86-NEXT: sbbl %esi, %edx
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: cmpl %ebp, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %edx, %edi
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: cmpl %ecx, %ebp
|
|
; X86-NEXT: sbbl %esi, %edx
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movb %cl, (%esp) # 1-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: cmpl %eax, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: sbbl %esi, %ebp
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: cmpl %ecx, %eax
|
|
; X86-NEXT: sbbl %edi, %esi
|
|
; X86-NEXT: setl %ch
|
|
; X86-NEXT: subb %dl, %ch
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl %edx, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: sbbl %edi, %ebp
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: cmpl %esi, %edx
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: subb %cl, %dl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl %ebx, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl %eax, %ebp
|
|
; X86-NEXT: sbbl %edi, %ebp
|
|
; X86-NEXT: setl %dh
|
|
; X86-NEXT: cmpl %esi, %ebx
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: subb %dh, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: cmpl %eax, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl %ebx, %ebp
|
|
; X86-NEXT: sbbl %edi, %ebp
|
|
; X86-NEXT: setl %dh
|
|
; X86-NEXT: cmpl %esi, %eax
|
|
; X86-NEXT: sbbl %ebx, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: subb %dh, %bl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movb %bl, 15(%eax)
|
|
; X86-NEXT: movb %cl, 14(%eax)
|
|
; X86-NEXT: movb %dl, 13(%eax)
|
|
; X86-NEXT: movb %ch, 12(%eax)
|
|
; X86-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 11(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 10(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 9(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 8(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 7(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 6(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 5(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 4(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 3(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 2(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, 1(%eax)
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, (%eax)
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl $4
|
|
%1 = call <16 x i8> @llvm.scmp(<16 x i64> %x, <16 x i64> %y)
|
|
ret <16 x i8> %1
|
|
}
|
|
|
|
define <7 x i117> @scmp_uncommon_vectors(<7 x i7> %x, <7 x i7> %y) nounwind {
|
|
; SSE2-LABEL: scmp_uncommon_vectors:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pushq %rbp
|
|
; SSE2-NEXT: pushq %r15
|
|
; SSE2-NEXT: pushq %r14
|
|
; SSE2-NEXT: pushq %r13
|
|
; SSE2-NEXT: pushq %r12
|
|
; SSE2-NEXT: pushq %rbx
|
|
; SSE2-NEXT: movq %rdi, %rax
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r14d
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
|
|
; SSE2-NEXT: addb %r15b, %r15b
|
|
; SSE2-NEXT: sarb %r15b
|
|
; SSE2-NEXT: addb %sil, %sil
|
|
; SSE2-NEXT: sarb %sil
|
|
; SSE2-NEXT: cmpb %r15b, %sil
|
|
; SSE2-NEXT: setl %sil
|
|
; SSE2-NEXT: setg %r15b
|
|
; SSE2-NEXT: subb %sil, %r15b
|
|
; SSE2-NEXT: movsbq %r15b, %rsi
|
|
; SSE2-NEXT: movq %rsi, (%rax)
|
|
; SSE2-NEXT: movq %rsi, %xmm0
|
|
; SSE2-NEXT: sarq $63, %rsi
|
|
; SSE2-NEXT: addb %r14b, %r14b
|
|
; SSE2-NEXT: sarb %r14b
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
|
|
; SSE2-NEXT: addb %r15b, %r15b
|
|
; SSE2-NEXT: sarb %r15b
|
|
; SSE2-NEXT: cmpb %r14b, %r15b
|
|
; SSE2-NEXT: setl %r14b
|
|
; SSE2-NEXT: setg %r15b
|
|
; SSE2-NEXT: subb %r14b, %r15b
|
|
; SSE2-NEXT: movsbq %r15b, %r14
|
|
; SSE2-NEXT: movq %r14, %r15
|
|
; SSE2-NEXT: sarq $63, %r15
|
|
; SSE2-NEXT: addb %bpl, %bpl
|
|
; SSE2-NEXT: sarb %bpl
|
|
; SSE2-NEXT: addb %dl, %dl
|
|
; SSE2-NEXT: sarb %dl
|
|
; SSE2-NEXT: cmpb %bpl, %dl
|
|
; SSE2-NEXT: setl %dl
|
|
; SSE2-NEXT: setg %bpl
|
|
; SSE2-NEXT: subb %dl, %bpl
|
|
; SSE2-NEXT: movsbq %bpl, %rdx
|
|
; SSE2-NEXT: movq %rdx, %r12
|
|
; SSE2-NEXT: sarq $63, %r12
|
|
; SSE2-NEXT: addb %bl, %bl
|
|
; SSE2-NEXT: sarb %bl
|
|
; SSE2-NEXT: addb %cl, %cl
|
|
; SSE2-NEXT: sarb %cl
|
|
; SSE2-NEXT: cmpb %bl, %cl
|
|
; SSE2-NEXT: setl %cl
|
|
; SSE2-NEXT: setg %bl
|
|
; SSE2-NEXT: subb %cl, %bl
|
|
; SSE2-NEXT: movsbq %bl, %rbx
|
|
; SSE2-NEXT: movq %rbx, %rcx
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
; SSE2-NEXT: addb %r11b, %r11b
|
|
; SSE2-NEXT: sarb %r11b
|
|
; SSE2-NEXT: addb %r8b, %r8b
|
|
; SSE2-NEXT: sarb %r8b
|
|
; SSE2-NEXT: cmpb %r11b, %r8b
|
|
; SSE2-NEXT: setl %r8b
|
|
; SSE2-NEXT: setg %r11b
|
|
; SSE2-NEXT: subb %r8b, %r11b
|
|
; SSE2-NEXT: movsbq %r11b, %r8
|
|
; SSE2-NEXT: movq %r8, %r11
|
|
; SSE2-NEXT: sarq $63, %r11
|
|
; SSE2-NEXT: addb %r10b, %r10b
|
|
; SSE2-NEXT: sarb %r10b
|
|
; SSE2-NEXT: addb %r9b, %r9b
|
|
; SSE2-NEXT: sarb %r9b
|
|
; SSE2-NEXT: cmpb %r10b, %r9b
|
|
; SSE2-NEXT: setl %r9b
|
|
; SSE2-NEXT: setg %r10b
|
|
; SSE2-NEXT: subb %r9b, %r10b
|
|
; SSE2-NEXT: movsbq %r10b, %r9
|
|
; SSE2-NEXT: movq %r9, %r10
|
|
; SSE2-NEXT: sarq $63, %r10
|
|
; SSE2-NEXT: addb %dil, %dil
|
|
; SSE2-NEXT: sarb %dil
|
|
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
|
|
; SSE2-NEXT: addb %bpl, %bpl
|
|
; SSE2-NEXT: sarb %bpl
|
|
; SSE2-NEXT: cmpb %dil, %bpl
|
|
; SSE2-NEXT: setl %dil
|
|
; SSE2-NEXT: setg %bpl
|
|
; SSE2-NEXT: subb %dil, %bpl
|
|
; SSE2-NEXT: movsbq %bpl, %rdi
|
|
; SSE2-NEXT: movq %rdi, %r13
|
|
; SSE2-NEXT: sarq $63, %r13
|
|
; SSE2-NEXT: movl %r13d, 96(%rax)
|
|
; SSE2-NEXT: movabsq $2251799813685247, %rbp # imm = 0x7FFFFFFFFFFFF
|
|
; SSE2-NEXT: andq %r13, %rbp
|
|
; SSE2-NEXT: shldq $62, %rdi, %r13
|
|
; SSE2-NEXT: movq %r13, 88(%rax)
|
|
; SSE2-NEXT: movq %r10, %r13
|
|
; SSE2-NEXT: shldq $20, %r9, %r13
|
|
; SSE2-NEXT: movq %r13, 64(%rax)
|
|
; SSE2-NEXT: movq %r11, %r13
|
|
; SSE2-NEXT: shldq $31, %r8, %r13
|
|
; SSE2-NEXT: movq %r13, 48(%rax)
|
|
; SSE2-NEXT: movq %rcx, %r13
|
|
; SSE2-NEXT: shldq $42, %rbx, %r13
|
|
; SSE2-NEXT: movq %r13, 32(%rax)
|
|
; SSE2-NEXT: movabsq $9007199254738944, %r13 # imm = 0x1FFFFFFFFFF800
|
|
; SSE2-NEXT: andq %r12, %r13
|
|
; SSE2-NEXT: shldq $53, %rdx, %r12
|
|
; SSE2-NEXT: movq %r12, 16(%rax)
|
|
; SSE2-NEXT: movq %rbp, %r12
|
|
; SSE2-NEXT: shrq $48, %r12
|
|
; SSE2-NEXT: movb %r12b, 102(%rax)
|
|
; SSE2-NEXT: shrq $32, %rbp
|
|
; SSE2-NEXT: movw %bp, 100(%rax)
|
|
; SSE2-NEXT: movabsq $9007199254740991, %r12 # imm = 0x1FFFFFFFFFFFFF
|
|
; SSE2-NEXT: andq %r12, %r15
|
|
; SSE2-NEXT: shldq $9, %r14, %r15
|
|
; SSE2-NEXT: shlq $62, %rdi
|
|
; SSE2-NEXT: orq %r15, %rdi
|
|
; SSE2-NEXT: movq %rdi, 80(%rax)
|
|
; SSE2-NEXT: shlq $42, %rbx
|
|
; SSE2-NEXT: shrq $11, %r13
|
|
; SSE2-NEXT: orq %rbx, %r13
|
|
; SSE2-NEXT: movq %r13, 24(%rax)
|
|
; SSE2-NEXT: shlq $9, %r14
|
|
; SSE2-NEXT: andl $511, %r10d # imm = 0x1FF
|
|
; SSE2-NEXT: orq %r14, %r10
|
|
; SSE2-NEXT: movq %r10, 72(%rax)
|
|
; SSE2-NEXT: shlq $20, %r9
|
|
; SSE2-NEXT: andl $1048575, %r11d # imm = 0xFFFFF
|
|
; SSE2-NEXT: orq %r9, %r11
|
|
; SSE2-NEXT: movq %r11, 56(%rax)
|
|
; SSE2-NEXT: shlq $31, %r8
|
|
; SSE2-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
|
|
; SSE2-NEXT: orq %r8, %rcx
|
|
; SSE2-NEXT: movq %rcx, 40(%rax)
|
|
; SSE2-NEXT: movq %rsi, %xmm1
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm0, %rcx
|
|
; SSE2-NEXT: andq %r12, %rcx
|
|
; SSE2-NEXT: shlq $53, %rdx
|
|
; SSE2-NEXT: orq %rcx, %rdx
|
|
; SSE2-NEXT: movq %rdx, 8(%rax)
|
|
; SSE2-NEXT: popq %rbx
|
|
; SSE2-NEXT: popq %r12
|
|
; SSE2-NEXT: popq %r13
|
|
; SSE2-NEXT: popq %r14
|
|
; SSE2-NEXT: popq %r15
|
|
; SSE2-NEXT: popq %rbp
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE4-LABEL: scmp_uncommon_vectors:
|
|
; SSE4: # %bb.0:
|
|
; SSE4-NEXT: pushq %rbp
|
|
; SSE4-NEXT: pushq %r15
|
|
; SSE4-NEXT: pushq %r14
|
|
; SSE4-NEXT: pushq %r13
|
|
; SSE4-NEXT: pushq %r12
|
|
; SSE4-NEXT: pushq %rbx
|
|
; SSE4-NEXT: movq %rdi, %rax
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r14d
|
|
; SSE4-NEXT: addb %r14b, %r14b
|
|
; SSE4-NEXT: sarb %r14b
|
|
; SSE4-NEXT: addb %sil, %sil
|
|
; SSE4-NEXT: sarb %sil
|
|
; SSE4-NEXT: cmpb %r14b, %sil
|
|
; SSE4-NEXT: setl %sil
|
|
; SSE4-NEXT: setg %r14b
|
|
; SSE4-NEXT: subb %sil, %r14b
|
|
; SSE4-NEXT: movsbq %r14b, %r14
|
|
; SSE4-NEXT: movq %r14, (%rax)
|
|
; SSE4-NEXT: sarq $63, %r14
|
|
; SSE4-NEXT: addb %r15b, %r15b
|
|
; SSE4-NEXT: sarb %r15b
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
|
|
; SSE4-NEXT: addb %sil, %sil
|
|
; SSE4-NEXT: sarb %sil
|
|
; SSE4-NEXT: cmpb %r15b, %sil
|
|
; SSE4-NEXT: setl %sil
|
|
; SSE4-NEXT: setg %r15b
|
|
; SSE4-NEXT: subb %sil, %r15b
|
|
; SSE4-NEXT: movsbq %r15b, %rsi
|
|
; SSE4-NEXT: movq %rsi, %r15
|
|
; SSE4-NEXT: sarq $63, %r15
|
|
; SSE4-NEXT: addb %bpl, %bpl
|
|
; SSE4-NEXT: sarb %bpl
|
|
; SSE4-NEXT: addb %dl, %dl
|
|
; SSE4-NEXT: sarb %dl
|
|
; SSE4-NEXT: cmpb %bpl, %dl
|
|
; SSE4-NEXT: setl %dl
|
|
; SSE4-NEXT: setg %bpl
|
|
; SSE4-NEXT: subb %dl, %bpl
|
|
; SSE4-NEXT: movsbq %bpl, %r12
|
|
; SSE4-NEXT: movq %r12, %r13
|
|
; SSE4-NEXT: sarq $63, %r13
|
|
; SSE4-NEXT: addb %bl, %bl
|
|
; SSE4-NEXT: sarb %bl
|
|
; SSE4-NEXT: addb %cl, %cl
|
|
; SSE4-NEXT: sarb %cl
|
|
; SSE4-NEXT: cmpb %bl, %cl
|
|
; SSE4-NEXT: setl %cl
|
|
; SSE4-NEXT: setg %dl
|
|
; SSE4-NEXT: subb %cl, %dl
|
|
; SSE4-NEXT: movsbq %dl, %rbx
|
|
; SSE4-NEXT: movq %rbx, %rcx
|
|
; SSE4-NEXT: sarq $63, %rcx
|
|
; SSE4-NEXT: addb %r11b, %r11b
|
|
; SSE4-NEXT: sarb %r11b
|
|
; SSE4-NEXT: addb %r8b, %r8b
|
|
; SSE4-NEXT: sarb %r8b
|
|
; SSE4-NEXT: cmpb %r11b, %r8b
|
|
; SSE4-NEXT: setl %dl
|
|
; SSE4-NEXT: setg %r8b
|
|
; SSE4-NEXT: subb %dl, %r8b
|
|
; SSE4-NEXT: movsbq %r8b, %rdx
|
|
; SSE4-NEXT: movq %rdx, %r8
|
|
; SSE4-NEXT: sarq $63, %r8
|
|
; SSE4-NEXT: addb %r10b, %r10b
|
|
; SSE4-NEXT: sarb %r10b
|
|
; SSE4-NEXT: addb %r9b, %r9b
|
|
; SSE4-NEXT: sarb %r9b
|
|
; SSE4-NEXT: cmpb %r10b, %r9b
|
|
; SSE4-NEXT: setl %r9b
|
|
; SSE4-NEXT: setg %r10b
|
|
; SSE4-NEXT: subb %r9b, %r10b
|
|
; SSE4-NEXT: movsbq %r10b, %r9
|
|
; SSE4-NEXT: movq %r9, %r10
|
|
; SSE4-NEXT: sarq $63, %r10
|
|
; SSE4-NEXT: addb %dil, %dil
|
|
; SSE4-NEXT: sarb %dil
|
|
; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
|
|
; SSE4-NEXT: addb %r11b, %r11b
|
|
; SSE4-NEXT: sarb %r11b
|
|
; SSE4-NEXT: cmpb %dil, %r11b
|
|
; SSE4-NEXT: setl %dil
|
|
; SSE4-NEXT: setg %r11b
|
|
; SSE4-NEXT: subb %dil, %r11b
|
|
; SSE4-NEXT: movsbq %r11b, %rdi
|
|
; SSE4-NEXT: movq %rdi, %rbp
|
|
; SSE4-NEXT: sarq $63, %rbp
|
|
; SSE4-NEXT: movl %ebp, 96(%rax)
|
|
; SSE4-NEXT: movabsq $2251799813685247, %r11 # imm = 0x7FFFFFFFFFFFF
|
|
; SSE4-NEXT: andq %rbp, %r11
|
|
; SSE4-NEXT: shldq $62, %rdi, %rbp
|
|
; SSE4-NEXT: movq %rbp, 88(%rax)
|
|
; SSE4-NEXT: movq %r10, %rbp
|
|
; SSE4-NEXT: shldq $20, %r9, %rbp
|
|
; SSE4-NEXT: movq %rbp, 64(%rax)
|
|
; SSE4-NEXT: movq %r8, %rbp
|
|
; SSE4-NEXT: shldq $31, %rdx, %rbp
|
|
; SSE4-NEXT: movq %rbp, 48(%rax)
|
|
; SSE4-NEXT: movq %rcx, %rbp
|
|
; SSE4-NEXT: shldq $42, %rbx, %rbp
|
|
; SSE4-NEXT: movq %rbp, 32(%rax)
|
|
; SSE4-NEXT: movabsq $9007199254738944, %rbp # imm = 0x1FFFFFFFFFF800
|
|
; SSE4-NEXT: andq %r13, %rbp
|
|
; SSE4-NEXT: shldq $53, %r12, %r13
|
|
; SSE4-NEXT: movq %r13, 16(%rax)
|
|
; SSE4-NEXT: movq %r11, %r13
|
|
; SSE4-NEXT: shrq $48, %r13
|
|
; SSE4-NEXT: movb %r13b, 102(%rax)
|
|
; SSE4-NEXT: shrq $32, %r11
|
|
; SSE4-NEXT: movw %r11w, 100(%rax)
|
|
; SSE4-NEXT: movabsq $9007199254740991, %r11 # imm = 0x1FFFFFFFFFFFFF
|
|
; SSE4-NEXT: andq %r11, %r15
|
|
; SSE4-NEXT: shldq $9, %rsi, %r15
|
|
; SSE4-NEXT: shlq $62, %rdi
|
|
; SSE4-NEXT: orq %r15, %rdi
|
|
; SSE4-NEXT: movq %rdi, 80(%rax)
|
|
; SSE4-NEXT: andq %r11, %r14
|
|
; SSE4-NEXT: shlq $53, %r12
|
|
; SSE4-NEXT: orq %r14, %r12
|
|
; SSE4-NEXT: movq %r12, 8(%rax)
|
|
; SSE4-NEXT: shlq $42, %rbx
|
|
; SSE4-NEXT: shrq $11, %rbp
|
|
; SSE4-NEXT: orq %rbx, %rbp
|
|
; SSE4-NEXT: movq %rbp, 24(%rax)
|
|
; SSE4-NEXT: shlq $9, %rsi
|
|
; SSE4-NEXT: andl $511, %r10d # imm = 0x1FF
|
|
; SSE4-NEXT: orq %rsi, %r10
|
|
; SSE4-NEXT: movq %r10, 72(%rax)
|
|
; SSE4-NEXT: shlq $20, %r9
|
|
; SSE4-NEXT: andl $1048575, %r8d # imm = 0xFFFFF
|
|
; SSE4-NEXT: orq %r9, %r8
|
|
; SSE4-NEXT: movq %r8, 56(%rax)
|
|
; SSE4-NEXT: shlq $31, %rdx
|
|
; SSE4-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
|
|
; SSE4-NEXT: orq %rdx, %rcx
|
|
; SSE4-NEXT: movq %rcx, 40(%rax)
|
|
; SSE4-NEXT: popq %rbx
|
|
; SSE4-NEXT: popq %r12
|
|
; SSE4-NEXT: popq %r13
|
|
; SSE4-NEXT: popq %r14
|
|
; SSE4-NEXT: popq %r15
|
|
; SSE4-NEXT: popq %rbp
|
|
; SSE4-NEXT: retq
|
|
;
|
|
; AVX-LABEL: scmp_uncommon_vectors:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbp
|
|
; AVX-NEXT: pushq %r15
|
|
; AVX-NEXT: pushq %r14
|
|
; AVX-NEXT: pushq %r13
|
|
; AVX-NEXT: pushq %r12
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: movq %rdi, %rax
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r14d
|
|
; AVX-NEXT: addb %r14b, %r14b
|
|
; AVX-NEXT: sarb %r14b
|
|
; AVX-NEXT: addb %sil, %sil
|
|
; AVX-NEXT: sarb %sil
|
|
; AVX-NEXT: cmpb %r14b, %sil
|
|
; AVX-NEXT: setl %sil
|
|
; AVX-NEXT: setg %r14b
|
|
; AVX-NEXT: subb %sil, %r14b
|
|
; AVX-NEXT: movsbq %r14b, %r14
|
|
; AVX-NEXT: movq %r14, (%rax)
|
|
; AVX-NEXT: sarq $63, %r14
|
|
; AVX-NEXT: addb %r15b, %r15b
|
|
; AVX-NEXT: sarb %r15b
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
|
|
; AVX-NEXT: addb %sil, %sil
|
|
; AVX-NEXT: sarb %sil
|
|
; AVX-NEXT: cmpb %r15b, %sil
|
|
; AVX-NEXT: setl %sil
|
|
; AVX-NEXT: setg %r15b
|
|
; AVX-NEXT: subb %sil, %r15b
|
|
; AVX-NEXT: movsbq %r15b, %rsi
|
|
; AVX-NEXT: movq %rsi, %r12
|
|
; AVX-NEXT: sarq $63, %r12
|
|
; AVX-NEXT: addb %bpl, %bpl
|
|
; AVX-NEXT: sarb %bpl
|
|
; AVX-NEXT: addb %dl, %dl
|
|
; AVX-NEXT: sarb %dl
|
|
; AVX-NEXT: cmpb %bpl, %dl
|
|
; AVX-NEXT: setl %dl
|
|
; AVX-NEXT: setg %bpl
|
|
; AVX-NEXT: subb %dl, %bpl
|
|
; AVX-NEXT: movsbq %bpl, %r15
|
|
; AVX-NEXT: movq %r15, %r13
|
|
; AVX-NEXT: sarq $63, %r13
|
|
; AVX-NEXT: addb %bl, %bl
|
|
; AVX-NEXT: sarb %bl
|
|
; AVX-NEXT: addb %cl, %cl
|
|
; AVX-NEXT: sarb %cl
|
|
; AVX-NEXT: cmpb %bl, %cl
|
|
; AVX-NEXT: setl %cl
|
|
; AVX-NEXT: setg %dl
|
|
; AVX-NEXT: subb %cl, %dl
|
|
; AVX-NEXT: movsbq %dl, %rbx
|
|
; AVX-NEXT: movq %rbx, %rcx
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
; AVX-NEXT: addb %r11b, %r11b
|
|
; AVX-NEXT: sarb %r11b
|
|
; AVX-NEXT: addb %r8b, %r8b
|
|
; AVX-NEXT: sarb %r8b
|
|
; AVX-NEXT: cmpb %r11b, %r8b
|
|
; AVX-NEXT: setl %dl
|
|
; AVX-NEXT: setg %r8b
|
|
; AVX-NEXT: subb %dl, %r8b
|
|
; AVX-NEXT: movsbq %r8b, %rdx
|
|
; AVX-NEXT: movq %rdx, %r8
|
|
; AVX-NEXT: sarq $63, %r8
|
|
; AVX-NEXT: addb %r10b, %r10b
|
|
; AVX-NEXT: sarb %r10b
|
|
; AVX-NEXT: addb %r9b, %r9b
|
|
; AVX-NEXT: sarb %r9b
|
|
; AVX-NEXT: cmpb %r10b, %r9b
|
|
; AVX-NEXT: setl %r9b
|
|
; AVX-NEXT: setg %r10b
|
|
; AVX-NEXT: subb %r9b, %r10b
|
|
; AVX-NEXT: movsbq %r10b, %r9
|
|
; AVX-NEXT: movq %r9, %r10
|
|
; AVX-NEXT: sarq $63, %r10
|
|
; AVX-NEXT: addb %dil, %dil
|
|
; AVX-NEXT: sarb %dil
|
|
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
|
|
; AVX-NEXT: addb %r11b, %r11b
|
|
; AVX-NEXT: sarb %r11b
|
|
; AVX-NEXT: cmpb %dil, %r11b
|
|
; AVX-NEXT: setl %dil
|
|
; AVX-NEXT: setg %r11b
|
|
; AVX-NEXT: subb %dil, %r11b
|
|
; AVX-NEXT: movsbq %r11b, %rdi
|
|
; AVX-NEXT: movq %rdi, %rbp
|
|
; AVX-NEXT: sarq $63, %rbp
|
|
; AVX-NEXT: movl %ebp, 96(%rax)
|
|
; AVX-NEXT: movb $51, %r11b
|
|
; AVX-NEXT: bzhiq %r11, %rbp, %r11
|
|
; AVX-NEXT: shldq $62, %rdi, %rbp
|
|
; AVX-NEXT: movq %rbp, 88(%rax)
|
|
; AVX-NEXT: movq %r10, %rbp
|
|
; AVX-NEXT: shldq $20, %r9, %rbp
|
|
; AVX-NEXT: movq %rbp, 64(%rax)
|
|
; AVX-NEXT: movq %r8, %rbp
|
|
; AVX-NEXT: shldq $31, %rdx, %rbp
|
|
; AVX-NEXT: movq %rbp, 48(%rax)
|
|
; AVX-NEXT: movq %rcx, %rbp
|
|
; AVX-NEXT: shldq $42, %rbx, %rbp
|
|
; AVX-NEXT: movq %rbp, 32(%rax)
|
|
; AVX-NEXT: movb $42, %bpl
|
|
; AVX-NEXT: bzhiq %rbp, %r13, %rbp
|
|
; AVX-NEXT: shldq $53, %r15, %r13
|
|
; AVX-NEXT: movq %r13, 16(%rax)
|
|
; AVX-NEXT: movq %r11, %r13
|
|
; AVX-NEXT: shrq $48, %r13
|
|
; AVX-NEXT: movb %r13b, 102(%rax)
|
|
; AVX-NEXT: shrq $32, %r11
|
|
; AVX-NEXT: movw %r11w, 100(%rax)
|
|
; AVX-NEXT: movb $53, %r11b
|
|
; AVX-NEXT: bzhiq %r11, %r12, %r12
|
|
; AVX-NEXT: shldq $9, %rsi, %r12
|
|
; AVX-NEXT: shlq $62, %rdi
|
|
; AVX-NEXT: orq %r12, %rdi
|
|
; AVX-NEXT: movq %rdi, 80(%rax)
|
|
; AVX-NEXT: shlq $42, %rbx
|
|
; AVX-NEXT: orq %rbp, %rbx
|
|
; AVX-NEXT: movq %rbx, 24(%rax)
|
|
; AVX-NEXT: bzhiq %r11, %r14, %rdi
|
|
; AVX-NEXT: shlq $53, %r15
|
|
; AVX-NEXT: orq %rdi, %r15
|
|
; AVX-NEXT: movq %r15, 8(%rax)
|
|
; AVX-NEXT: shlq $9, %rsi
|
|
; AVX-NEXT: andl $511, %r10d # imm = 0x1FF
|
|
; AVX-NEXT: orq %rsi, %r10
|
|
; AVX-NEXT: movq %r10, 72(%rax)
|
|
; AVX-NEXT: shlq $20, %r9
|
|
; AVX-NEXT: andl $1048575, %r8d # imm = 0xFFFFF
|
|
; AVX-NEXT: orq %r9, %r8
|
|
; AVX-NEXT: movq %r8, 56(%rax)
|
|
; AVX-NEXT: shlq $31, %rdx
|
|
; AVX-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rcx, 40(%rax)
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: popq %r12
|
|
; AVX-NEXT: popq %r13
|
|
; AVX-NEXT: popq %r14
|
|
; AVX-NEXT: popq %r15
|
|
; AVX-NEXT: popq %rbp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_uncommon_vectors:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $52, %esp
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %dh
|
|
; X86-NEXT: addb %dh, %dh
|
|
; X86-NEXT: sarb %dh
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %dl
|
|
; X86-NEXT: addb %dl, %dl
|
|
; X86-NEXT: sarb %dl
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: addb %al, %al
|
|
; X86-NEXT: sarb %al
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ah
|
|
; X86-NEXT: addb %ah, %ah
|
|
; X86-NEXT: sarb %ah
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: addb %cl, %cl
|
|
; X86-NEXT: sarb %cl
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ch
|
|
; X86-NEXT: addb %ch, %ch
|
|
; X86-NEXT: sarb %ch
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: addb %bl, %bl
|
|
; X86-NEXT: sarb %bl
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %bh
|
|
; X86-NEXT: addb %bh, %bh
|
|
; X86-NEXT: sarb %bh
|
|
; X86-NEXT: cmpb %bl, %bh
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: setg %bh
|
|
; X86-NEXT: subb %bl, %bh
|
|
; X86-NEXT: movsbl %bh, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: sarl $31, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: andl $2097151, %esi # imm = 0x1FFFFF
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: cmpb %cl, %ch
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %ch
|
|
; X86-NEXT: subb %cl, %ch
|
|
; X86-NEXT: movsbl %ch, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: sarl $31, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: andl $2097151, %ecx # imm = 0x1FFFFF
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: cmpb %al, %ah
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %cl
|
|
; X86-NEXT: subb %al, %cl
|
|
; X86-NEXT: movsbl %cl, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %ecx, (%edi)
|
|
; X86-NEXT: sarl $31, %ecx
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: andl $2097151, %eax # imm = 0x1FFFFF
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: cmpb %dh, %dl
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: sarl $31, %ebp
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: cmpb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Folded Reload
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: sarl $31, %esi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: cmpb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Folded Reload
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: sarl $31, %eax
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
|
|
; X86-NEXT: cmpb {{[-0-9]+}}(%e{{[sb]}}p), %dl # 1-byte Folded Reload
|
|
; X86-NEXT: setl %dl
|
|
; X86-NEXT: setg %dh
|
|
; X86-NEXT: subb %dl, %dh
|
|
; X86-NEXT: movsbl %dh, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: sarl $31, %ebx
|
|
; X86-NEXT: movl %ebx, 96(%edi)
|
|
; X86-NEXT: movl %ebx, 92(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 80(%edi)
|
|
; X86-NEXT: movl %eax, 68(%edi)
|
|
; X86-NEXT: movl %eax, 64(%edi)
|
|
; X86-NEXT: movl %esi, 52(%edi)
|
|
; X86-NEXT: movl %esi, 48(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 36(%edi)
|
|
; X86-NEXT: movl %ebp, 24(%edi)
|
|
; X86-NEXT: movl %ebp, 20(%edi)
|
|
; X86-NEXT: movl %ecx, 8(%edi)
|
|
; X86-NEXT: movl %ecx, 4(%edi)
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movw %cx, 100(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: shldl $30, %edx, %ecx
|
|
; X86-NEXT: movl %ecx, 88(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: shldl $9, %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: shldl $9, %edx, %ecx
|
|
; X86-NEXT: movl %ecx, 76(%edi)
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: shldl $20, %edx, %ecx
|
|
; X86-NEXT: movl %ecx, 60(%edi)
|
|
; X86-NEXT: movl %esi, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: shldl $31, %edx, %ecx
|
|
; X86-NEXT: movl %ecx, 44(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: shldl $10, %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: shldl $10, %edx, %ecx
|
|
; X86-NEXT: movl %ecx, 32(%edi)
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: shldl $21, %ebx, %ecx
|
|
; X86-NEXT: movl %ecx, 16(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: shll $9, %ecx
|
|
; X86-NEXT: andl $511, %eax # imm = 0x1FF
|
|
; X86-NEXT: orl %ecx, %eax
|
|
; X86-NEXT: movl %eax, 72(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: shll $20, %eax
|
|
; X86-NEXT: andl $1048575, %esi # imm = 0xFFFFF
|
|
; X86-NEXT: orl %eax, %esi
|
|
; X86-NEXT: movl %esi, 56(%edi)
|
|
; X86-NEXT: shll $10, %edx
|
|
; X86-NEXT: andl $1023, %ebp # imm = 0x3FF
|
|
; X86-NEXT: orl %edx, %ebp
|
|
; X86-NEXT: movl %ebp, 28(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: shll $21, %eax
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, 12(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: andl $7, %eax
|
|
; X86-NEXT: movb %al, 102(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: shll $30, %eax
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, 84(%edi)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: shll $31, %eax
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, 40(%edi)
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: addl $52, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl $4
|
|
%1 = call <7 x i117> @llvm.scmp(<7 x i7> %x, <7 x i7> %y)
|
|
ret <7 x i117> %1
|
|
}
|
|
|
|
define <1 x i3> @scmp_scalarize(<1 x i33> %x, <1 x i33> %y) nounwind {
|
|
; X64-LABEL: scmp_scalarize:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: shlq $31, %rsi
|
|
; X64-NEXT: sarq $31, %rsi
|
|
; X64-NEXT: shlq $31, %rdi
|
|
; X64-NEXT: sarq $31, %rdi
|
|
; X64-NEXT: cmpq %rsi, %rdi
|
|
; X64-NEXT: setl %cl
|
|
; X64-NEXT: setg %al
|
|
; X64-NEXT: subb %cl, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_scalarize:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: andl $1, %eax
|
|
; X86-NEXT: negl %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: andl $1, %esi
|
|
; X86-NEXT: negl %esi
|
|
; X86-NEXT: cmpl %ecx, %edx
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: sbbl %eax, %edi
|
|
; X86-NEXT: setl %bl
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sbbl %esi, %eax
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: subb %bl, %al
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: retl
|
|
%1 = call <1 x i3> @llvm.scmp(<1 x i33> %x, <1 x i33> %y)
|
|
ret <1 x i3> %1
|
|
}
|
|
|
|
define <2 x i8> @scmp_bool_operands(<2 x i1> %x, <2 x i1> %y) nounwind {
|
|
; SSE2-LABEL: scmp_bool_operands:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
|
|
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
|
; SSE2-NEXT: andb $1, %al
|
|
; SSE2-NEXT: negb %al
|
|
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
|
|
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
|
|
; SSE2-NEXT: andb $1, %dl
|
|
; SSE2-NEXT: negb %dl
|
|
; SSE2-NEXT: cmpb %al, %dl
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %dl
|
|
; SSE2-NEXT: subb %al, %dl
|
|
; SSE2-NEXT: movzbl %dl, %eax
|
|
; SSE2-NEXT: andb $1, %cl
|
|
; SSE2-NEXT: negb %cl
|
|
; SSE2-NEXT: andb $1, %sil
|
|
; SSE2-NEXT: negb %sil
|
|
; SSE2-NEXT: cmpb %cl, %sil
|
|
; SSE2-NEXT: setl %cl
|
|
; SSE2-NEXT: setg %dl
|
|
; SSE2-NEXT: subb %cl, %dl
|
|
; SSE2-NEXT: movzbl %dl, %ecx
|
|
; SSE2-NEXT: shll $8, %ecx
|
|
; SSE2-NEXT: orl %eax, %ecx
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE4-LABEL: scmp_bool_operands:
|
|
; SSE4: # %bb.0:
|
|
; SSE4-NEXT: pextrb $8, %xmm1, %eax
|
|
; SSE4-NEXT: andb $1, %al
|
|
; SSE4-NEXT: negb %al
|
|
; SSE4-NEXT: pextrb $8, %xmm0, %ecx
|
|
; SSE4-NEXT: andb $1, %cl
|
|
; SSE4-NEXT: negb %cl
|
|
; SSE4-NEXT: cmpb %al, %cl
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movzbl %cl, %eax
|
|
; SSE4-NEXT: movd %xmm1, %ecx
|
|
; SSE4-NEXT: andb $1, %cl
|
|
; SSE4-NEXT: negb %cl
|
|
; SSE4-NEXT: movd %xmm0, %edx
|
|
; SSE4-NEXT: andb $1, %dl
|
|
; SSE4-NEXT: negb %dl
|
|
; SSE4-NEXT: cmpb %cl, %dl
|
|
; SSE4-NEXT: setl %cl
|
|
; SSE4-NEXT: setg %dl
|
|
; SSE4-NEXT: subb %cl, %dl
|
|
; SSE4-NEXT: movzbl %dl, %ecx
|
|
; SSE4-NEXT: movd %ecx, %xmm0
|
|
; SSE4-NEXT: pinsrb $1, %eax, %xmm0
|
|
; SSE4-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: scmp_bool_operands:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpextrb $8, %xmm1, %eax
|
|
; AVX2-NEXT: andb $1, %al
|
|
; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
|
|
; AVX2-NEXT: negb %al
|
|
; AVX2-NEXT: andb $1, %cl
|
|
; AVX2-NEXT: negb %cl
|
|
; AVX2-NEXT: cmpb %al, %cl
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %cl
|
|
; AVX2-NEXT: subb %al, %cl
|
|
; AVX2-NEXT: vmovd %xmm1, %eax
|
|
; AVX2-NEXT: andb $1, %al
|
|
; AVX2-NEXT: negb %al
|
|
; AVX2-NEXT: vmovd %xmm0, %edx
|
|
; AVX2-NEXT: andb $1, %dl
|
|
; AVX2-NEXT: negb %dl
|
|
; AVX2-NEXT: cmpb %al, %dl
|
|
; AVX2-NEXT: setl %al
|
|
; AVX2-NEXT: setg %dl
|
|
; AVX2-NEXT: subb %al, %dl
|
|
; AVX2-NEXT: vmovd %edx, %xmm0
|
|
; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: scmp_bool_operands:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpsllq $63, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpmovq2m %xmm0, %k0
|
|
; AVX512-NEXT: kshiftrb $1, %k0, %k1
|
|
; AVX512-NEXT: kmovd %k1, %eax
|
|
; AVX512-NEXT: vpsllq $63, %xmm1, %xmm0
|
|
; AVX512-NEXT: vpmovq2m %xmm0, %k1
|
|
; AVX512-NEXT: kshiftrb $1, %k1, %k2
|
|
; AVX512-NEXT: kmovd %k2, %ecx
|
|
; AVX512-NEXT: andb $1, %cl
|
|
; AVX512-NEXT: negb %cl
|
|
; AVX512-NEXT: andb $1, %al
|
|
; AVX512-NEXT: negb %al
|
|
; AVX512-NEXT: cmpb %cl, %al
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %cl
|
|
; AVX512-NEXT: subb %al, %cl
|
|
; AVX512-NEXT: kmovd %k1, %eax
|
|
; AVX512-NEXT: andb $1, %al
|
|
; AVX512-NEXT: negb %al
|
|
; AVX512-NEXT: kmovd %k0, %edx
|
|
; AVX512-NEXT: andb $1, %dl
|
|
; AVX512-NEXT: negb %dl
|
|
; AVX512-NEXT: cmpb %al, %dl
|
|
; AVX512-NEXT: setl %al
|
|
; AVX512-NEXT: setg %dl
|
|
; AVX512-NEXT: subb %al, %dl
|
|
; AVX512-NEXT: vmovd %edx, %xmm0
|
|
; AVX512-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_bool_operands:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: andb $1, %cl
|
|
; X86-NEXT: negb %cl
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: andb $1, %dl
|
|
; X86-NEXT: negb %dl
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: andb $1, %al
|
|
; X86-NEXT: negb %al
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ah
|
|
; X86-NEXT: andb $1, %ah
|
|
; X86-NEXT: negb %ah
|
|
; X86-NEXT: cmpb %al, %ah
|
|
; X86-NEXT: setl %ah
|
|
; X86-NEXT: setg %al
|
|
; X86-NEXT: subb %ah, %al
|
|
; X86-NEXT: cmpb %cl, %dl
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %cl, %dl
|
|
; X86-NEXT: retl
|
|
%1 = call <2 x i8> @llvm.scmp(<2 x i1> %x, <2 x i1> %y)
|
|
ret <2 x i8> %1
|
|
}
|
|
|
|
define <2 x i16> @scmp_ret_wider_than_operands(<2 x i8> %x, <2 x i8> %y) nounwind {
|
|
; SSE2-LABEL: scmp_ret_wider_than_operands:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
; SSE2-NEXT: shrl $8, %ecx
|
|
; SSE2-NEXT: movd %xmm0, %edx
|
|
; SSE2-NEXT: movl %edx, %esi
|
|
; SSE2-NEXT: shrl $8, %esi
|
|
; SSE2-NEXT: cmpb %cl, %sil
|
|
; SSE2-NEXT: setl %cl
|
|
; SSE2-NEXT: setg %sil
|
|
; SSE2-NEXT: subb %cl, %sil
|
|
; SSE2-NEXT: movsbl %sil, %ecx
|
|
; SSE2-NEXT: cmpb %al, %dl
|
|
; SSE2-NEXT: setl %al
|
|
; SSE2-NEXT: setg %dl
|
|
; SSE2-NEXT: subb %al, %dl
|
|
; SSE2-NEXT: movsbl %dl, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm0
|
|
; SSE2-NEXT: pinsrw $1, %ecx, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE4-LABEL: scmp_ret_wider_than_operands:
|
|
; SSE4: # %bb.0:
|
|
; SSE4-NEXT: pextrb $1, %xmm1, %eax
|
|
; SSE4-NEXT: pextrb $1, %xmm0, %ecx
|
|
; SSE4-NEXT: cmpb %al, %cl
|
|
; SSE4-NEXT: setl %al
|
|
; SSE4-NEXT: setg %cl
|
|
; SSE4-NEXT: subb %al, %cl
|
|
; SSE4-NEXT: movsbl %cl, %eax
|
|
; SSE4-NEXT: movd %xmm1, %ecx
|
|
; SSE4-NEXT: movd %xmm0, %edx
|
|
; SSE4-NEXT: cmpb %cl, %dl
|
|
; SSE4-NEXT: setl %cl
|
|
; SSE4-NEXT: setg %dl
|
|
; SSE4-NEXT: subb %cl, %dl
|
|
; SSE4-NEXT: movsbl %dl, %ecx
|
|
; SSE4-NEXT: movd %ecx, %xmm0
|
|
; SSE4-NEXT: pinsrw $1, %eax, %xmm0
|
|
; SSE4-NEXT: retq
|
|
;
|
|
; AVX-LABEL: scmp_ret_wider_than_operands:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpextrb $1, %xmm1, %eax
|
|
; AVX-NEXT: vpextrb $1, %xmm0, %ecx
|
|
; AVX-NEXT: cmpb %al, %cl
|
|
; AVX-NEXT: setl %al
|
|
; AVX-NEXT: setg %cl
|
|
; AVX-NEXT: subb %al, %cl
|
|
; AVX-NEXT: movsbl %cl, %eax
|
|
; AVX-NEXT: vmovd %xmm1, %ecx
|
|
; AVX-NEXT: vmovd %xmm0, %edx
|
|
; AVX-NEXT: cmpb %cl, %dl
|
|
; AVX-NEXT: setl %cl
|
|
; AVX-NEXT: setg %dl
|
|
; AVX-NEXT: subb %cl, %dl
|
|
; AVX-NEXT: movsbl %dl, %ecx
|
|
; AVX-NEXT: vmovd %ecx, %xmm0
|
|
; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; X86-LABEL: scmp_ret_wider_than_operands:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: setl %al
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %al, %dl
|
|
; X86-NEXT: movsbl %dl, %eax
|
|
; X86-NEXT: cmpb {{[0-9]+}}(%esp), %cl
|
|
; X86-NEXT: setl %cl
|
|
; X86-NEXT: setg %dl
|
|
; X86-NEXT: subb %cl, %dl
|
|
; X86-NEXT: movsbl %dl, %edx
|
|
; X86-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X86-NEXT: # kill: def $dx killed $dx killed $edx
|
|
; X86-NEXT: retl
|
|
%1 = call <2 x i16> @llvm.scmp(<2 x i8> %x, <2 x i8> %y)
|
|
ret <2 x i16> %1
|
|
}
|
|
|