1647 lines
49 KiB
LLVM
1647 lines
49 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu -mattr=+f16c | FileCheck %s --check-prefix=F16C
|
|
; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu -mattr=+avx512fp16 | FileCheck %s --check-prefix=FP16
|
|
; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
|
|
; RUN: llc < %s -enable-legalize-types-checking -mtriple=i686-linux-gnu -mattr=sse2 | FileCheck %s --check-prefix=X86
|
|
|
|
; Check all soft floating point library function calls.
|
|
|
|
define void @test_half_ceil(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_ceil:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_ceil:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vrndscalesh $10, %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_ceil:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq ceilf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_ceil:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll ceilf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.ceil.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_copysign(half %a0, half %a1, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_copysign:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
|
; F16C-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; F16C-NEXT: vpor %xmm1, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_copysign:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
|
|
; FP16-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
|
|
; FP16-NEXT: vpbroadcastw {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
|
|
; FP16-NEXT: vpternlogd {{.*#+}} zmm0 = zmm1 ^ (zmm2 & (zmm0 ^ zmm1))
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: vzeroupper
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_copysign:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
|
; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
|
; X64-NEXT: por %xmm1, %xmm0
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_copysign:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
|
; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
|
; X86-NEXT: por %xmm1, %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %ecx
|
|
; X86-NEXT: movw %cx, (%eax)
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.copysign.half(half %a0, half %a1)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_cos(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_cos:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq cosf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_cos:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq cosf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_cos:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq cosf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_cos:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll cosf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.cos.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_exp(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_exp:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq expf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_exp:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq expf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_exp:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq expf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_exp:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll expf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.exp.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_exp2(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_exp2:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq exp2f@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_exp2:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq exp2f@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_exp2:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq exp2f@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_exp2:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll exp2f
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.exp2.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_exp10(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_exp10:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq exp10f@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_exp10:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq exp10f@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_exp10:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq exp10f@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_exp10:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll exp10f
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.exp10.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_fabs(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_fabs:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_fabs:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
|
|
; FP16-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_fabs:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_fabs:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: andl $32767, %ecx # imm = 0x7FFF
|
|
; X86-NEXT: movw %cx, (%eax)
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.fabs.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_floor(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_floor:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_floor:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vrndscalesh $9, %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_floor:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq floorf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_floor:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll floorf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.floor.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_fma(half %a0, half %a1, half %a2, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_fma:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
|
|
; F16C-NEXT: vcvtph2ps %xmm2, %xmm2
|
|
; F16C-NEXT: callq fmaf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_fma:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_fma:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: subq $16, %rsp
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: movss %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
|
|
; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
|
|
; X64-NEXT: movaps %xmm1, %xmm0
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
|
|
; X64-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
|
|
; X64-NEXT: # xmm0 = mem[0],zero,zero,zero
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
|
|
; X64-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
|
|
; X64-NEXT: # xmm0 = mem[0],zero,zero,zero
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
|
|
; X64-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; X64-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Reload
|
|
; X64-NEXT: # xmm2 = mem[0],zero,zero,zero
|
|
; X64-NEXT: callq fmaf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: addq $16, %rsp
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_fma:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $72, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
|
|
; X86-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
|
|
; X86-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps {{[0-9]+}}(%esp)
|
|
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
|
|
; X86-NEXT: fstps {{[0-9]+}}(%esp)
|
|
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll fmaf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $72, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.fma.half(half %a0, half %a1, half %a2)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_fneg(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_fneg:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_fneg:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
|
|
; FP16-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_fneg:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_fneg:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl $32768, %ecx # imm = 0x8000
|
|
; X86-NEXT: xorl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movw %cx, (%eax)
|
|
; X86-NEXT: retl
|
|
%res = fneg half %a0
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_log(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_log:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq logf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_log:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq logf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_log:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq logf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_log:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll logf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.log.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_log2(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_log2:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq log2f@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_log2:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq log2f@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_log2:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq log2f@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_log2:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll log2f
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.log2.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_log10(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_log10:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq log10f@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_log10:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq log10f@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_log10:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq log10f@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_log10:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll log10f
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.log10.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_nearbyint(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_nearbyint:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_nearbyint:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vrndscalesh $12, %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_nearbyint:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq nearbyintf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_nearbyint:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll nearbyintf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.nearbyint.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_pow(half %a0, half %a1, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_pow:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
|
|
; F16C-NEXT: callq powf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_pow:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vcvtsh2ss %xmm1, %xmm1, %xmm1
|
|
; FP16-NEXT: callq powf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_pow:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: subq $16, %rsp
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
|
|
; X64-NEXT: movaps %xmm1, %xmm0
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
|
|
; X64-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
|
|
; X64-NEXT: # xmm0 = mem[0],zero,zero,zero
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
|
|
; X64-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; X64-NEXT: callq powf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: addq $16, %rsp
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_pow:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $56, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
|
|
; X86-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps {{[0-9]+}}(%esp)
|
|
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll powf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $56, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.pow.half(half %a0, half %a1)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_powi(half %a0, i32 %a1, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_powi:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rsi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq __powisf2@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_powi:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rsi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq __powisf2@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_powi:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbp
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: movq %rsi, %rbx
|
|
; X64-NEXT: movl %edi, %ebp
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movl %ebp, %edi
|
|
; X64-NEXT: callq __powisf2@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: addq $8, %rsp
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: popq %rbp
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_powi:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $20, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __powisf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $20, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.powi.half(half %a0, i32 %a1)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_rint(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_rint:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_rint:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vrndscalesh $4, %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_rint:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq rintf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_rint:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll rintf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.rint.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_sin(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_sin:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq sinf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_sin:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq sinf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_sin:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq sinf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_sin:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll sinf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.sin.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_sqrt(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_sqrt:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_sqrt:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vsqrtsh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_sqrt:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: sqrtss %xmm0, %xmm0
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_sqrt:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps {{[0-9]+}}(%esp)
|
|
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; X86-NEXT: sqrtss %xmm0, %xmm0
|
|
; X86-NEXT: movss %xmm0, (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.sqrt.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_tan(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_tan:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq tanf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_tan:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rbx
|
|
; FP16-NEXT: movq %rdi, %rbx
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq tanf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rbx)
|
|
; FP16-NEXT: popq %rbx
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_tan:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq tanf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_tan:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll tanf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.tan.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define void @test_half_trunc(half %a0, ptr %p0) nounwind {
|
|
; F16C-LABEL: test_half_trunc:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_trunc:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: vrndscalesh $11, %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vmovsh %xmm0, (%rdi)
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_trunc:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: movq %rdi, %rbx
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq truncf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: pextrw $0, %xmm0, %eax
|
|
; X64-NEXT: movw %ax, (%rbx)
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_trunc:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll truncf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esi)
|
|
; X86-NEXT: addl $8, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.trunc.half(half %a0)
|
|
store half %res, ptr %p0, align 2
|
|
ret void
|
|
}
|
|
|
|
define half @test_half_acos(half %a) nounwind {
|
|
; F16C-LABEL: test_half_acos:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq acosf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_acos:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq acosf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_acos:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq acosf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_acos:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll acosf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.acos.f16(half %a)
|
|
ret half %x
|
|
}
|
|
|
|
define half @test_half_asin(half %a) nounwind {
|
|
; F16C-LABEL: test_half_asin:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq asinf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_asin:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq asinf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_asin:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq asinf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_asin:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll asinf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.asin.f16(half %a)
|
|
ret half %x
|
|
}
|
|
|
|
define half @test_half_atan(half %a) nounwind {
|
|
; F16C-LABEL: test_half_atan:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq atanf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_atan:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq atanf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_atan:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq atanf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_atan:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll atanf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.atan.f16(half %a)
|
|
ret half %x
|
|
}
|
|
|
|
define half @test_half_atan2(half %a, half %b) nounwind {
|
|
; F16C-LABEL: test_half_atan2:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
|
|
; F16C-NEXT: callq atan2f@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_atan2:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: vcvtsh2ss %xmm1, %xmm1, %xmm1
|
|
; FP16-NEXT: callq atan2f@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_atan2:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
|
|
; X64-NEXT: movaps %xmm1, %xmm0
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
|
|
; X64-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
|
|
; X64-NEXT: # xmm0 = mem[0],zero,zero,zero
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
|
|
; X64-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; X64-NEXT: callq atan2f@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_atan2:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $60, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
|
|
; X86-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps {{[0-9]+}}(%esp)
|
|
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll atan2f
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $60, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.atan2.f16(half %a, half %b)
|
|
ret half %x
|
|
}
|
|
|
|
define half @test2_half_cos(half %Val) nounwind {
|
|
; F16C-LABEL: test2_half_cos:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq cosf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test2_half_cos:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq cosf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test2_half_cos:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq cosf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test2_half_cos:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll cosf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.cos.f16(half %Val)
|
|
ret half %res
|
|
}
|
|
|
|
define half @test_half_cosh(half %a) nounwind {
|
|
; F16C-LABEL: test_half_cosh:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq coshf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_cosh:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq coshf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_cosh:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq coshf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_cosh:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll coshf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.cosh.f16(half %a)
|
|
ret half %x
|
|
}
|
|
|
|
define half @test2_half_sin(half %Val) nounwind {
|
|
; F16C-LABEL: test2_half_sin:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq sinf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test2_half_sin:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq sinf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test2_half_sin:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq sinf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test2_half_sin:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll sinf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%res = call half @llvm.sin.f16(half %Val)
|
|
ret half %res
|
|
}
|
|
|
|
define half @test_half_sinh(half %a) nounwind {
|
|
; F16C-LABEL: test_half_sinh:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq sinhf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_sinh:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq sinhf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_sinh:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq sinhf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_sinh:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll sinhf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.sinh.f16(half %a)
|
|
ret half %x
|
|
}
|
|
|
|
define half @test2_half_tan(half %a) nounwind {
|
|
; F16C-LABEL: test2_half_tan:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq tanf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test2_half_tan:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq tanf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test2_half_tan:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq tanf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test2_half_tan:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll tanf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.tan.f16(half %a)
|
|
ret half %x
|
|
}
|
|
|
|
define half @test_half_tanh(half %a) nounwind {
|
|
; F16C-LABEL: test_half_tanh:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: callq tanhf@PLT
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: popq %rax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; FP16-LABEL: test_half_tanh:
|
|
; FP16: # %bb.0:
|
|
; FP16-NEXT: pushq %rax
|
|
; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: callq tanhf@PLT
|
|
; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
|
|
; FP16-NEXT: popq %rax
|
|
; FP16-NEXT: retq
|
|
;
|
|
; X64-LABEL: test_half_tanh:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rax
|
|
; X64-NEXT: callq __extendhfsf2@PLT
|
|
; X64-NEXT: callq tanhf@PLT
|
|
; X64-NEXT: callq __truncsfhf2@PLT
|
|
; X64-NEXT: popq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_half_tanh:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: subl $12, %esp
|
|
; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
|
|
; X86-NEXT: pextrw $0, %xmm0, %eax
|
|
; X86-NEXT: movw %ax, (%esp)
|
|
; X86-NEXT: calll __extendhfsf2
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll tanhf
|
|
; X86-NEXT: fstps (%esp)
|
|
; X86-NEXT: calll __truncsfhf2
|
|
; X86-NEXT: addl $12, %esp
|
|
; X86-NEXT: retl
|
|
%x = call half @llvm.tanh.f16(half %a)
|
|
ret half %x
|
|
}
|