
When floating-point operations are legalized to operations of a higher precision (e.g. f16 fadd being legalized to f32 fadd) then we get narrowing then widening operations between each operation. With the appropriate fast math flags (nnan ninf contract) we can eliminate these casts.
204 lines
14 KiB
LLVM
204 lines
14 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc < %s -mtriple=arm -mattr=+vfp4d16sp,+fullfp16,-bf16 -stop-after=finalize-isel | FileCheck %s --check-prefixes=CHECK-NOBF16
|
|
|
|
; Check that the output instructions have the same fast math flags as the input
|
|
; fadd, even when bf16 is legalized to f32.
|
|
; FIXME: We should also test with +bf16, but it currently fails in instruction
|
|
; selection.
|
|
|
|
define bfloat @normal_fadd(bfloat %x, bfloat %y) {
|
|
; CHECK-NOBF16-LABEL: name: normal_fadd
|
|
; CHECK-NOBF16: bb.0.entry:
|
|
; CHECK-NOBF16-NEXT: liveins: $r0, $r1
|
|
; CHECK-NOBF16-NEXT: {{ $}}
|
|
; CHECK-NOBF16-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r1
|
|
; CHECK-NOBF16-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi:%[0-9]+]]:gpr = MOVsi [[COPY]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS:%[0-9]+]]:spr = VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY2:%[0-9]+]]:rgpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRH:%[0-9]+]]:rgpr = VMOVRH killed [[VMOVHR]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRH]]
|
|
; CHECK-NOBF16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
|
|
entry:
|
|
%add = fadd bfloat %x, %y
|
|
ret bfloat %add
|
|
}
|
|
|
|
define bfloat @fast_fadd(bfloat %x, bfloat %y) {
|
|
; CHECK-NOBF16-LABEL: name: fast_fadd
|
|
; CHECK-NOBF16: bb.0.entry:
|
|
; CHECK-NOBF16-NEXT: liveins: $r0, $r1
|
|
; CHECK-NOBF16-NEXT: {{ $}}
|
|
; CHECK-NOBF16-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r1
|
|
; CHECK-NOBF16-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi:%[0-9]+]]:gpr = MOVsi [[COPY]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY2:%[0-9]+]]:rgpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRH:%[0-9]+]]:rgpr = VMOVRH killed [[VMOVHR]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRH]]
|
|
; CHECK-NOBF16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
|
|
entry:
|
|
%add = fadd fast bfloat %x, %y
|
|
ret bfloat %add
|
|
}
|
|
|
|
define bfloat @ninf_fadd(bfloat %x, bfloat %y) {
|
|
; CHECK-NOBF16-LABEL: name: ninf_fadd
|
|
; CHECK-NOBF16: bb.0.entry:
|
|
; CHECK-NOBF16-NEXT: liveins: $r0, $r1
|
|
; CHECK-NOBF16-NEXT: {{ $}}
|
|
; CHECK-NOBF16-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r1
|
|
; CHECK-NOBF16-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi:%[0-9]+]]:gpr = MOVsi [[COPY]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY2:%[0-9]+]]:rgpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRH:%[0-9]+]]:rgpr = VMOVRH killed [[VMOVHR]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRH]]
|
|
; CHECK-NOBF16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
|
|
entry:
|
|
%add = fadd ninf bfloat %x, %y
|
|
ret bfloat %add
|
|
}
|
|
|
|
; Check that when we have the right fast math flags the converts in between the
|
|
; two fadds are removed.
|
|
|
|
define bfloat @normal_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
|
|
; CHECK-NOBF16-LABEL: name: normal_fadd_sequence
|
|
; CHECK-NOBF16: bb.0.entry:
|
|
; CHECK-NOBF16-NEXT: liveins: $r0, $r1, $r2
|
|
; CHECK-NOBF16-NEXT: {{ $}}
|
|
; CHECK-NOBF16-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r2
|
|
; CHECK-NOBF16-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r1
|
|
; CHECK-NOBF16-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY2]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS:%[0-9]+]]:spr = VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi2:%[0-9]+]]:gpr = MOVsi [[COPY]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR2:%[0-9]+]]:spr = VMOVSR killed [[MOVsi2]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi3:%[0-9]+]]:gpr = MOVsi [[COPY3]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR3:%[0-9]+]]:spr = VMOVSR killed [[MOVsi3]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS1:%[0-9]+]]:spr = VADDS killed [[VMOVSR3]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS1:%[0-9]+]]:gpr = VMOVRS killed [[VADDS1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS1]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY4:%[0-9]+]]:rgpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY4]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRH:%[0-9]+]]:rgpr = VMOVRH killed [[VMOVHR]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRH]]
|
|
; CHECK-NOBF16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
|
|
entry:
|
|
%add1 = fadd bfloat %x, %y
|
|
%add2 = fadd bfloat %add1, %z
|
|
ret bfloat %add2
|
|
}
|
|
|
|
define bfloat @nnan_ninf_contract_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
|
|
; CHECK-NOBF16-LABEL: name: nnan_ninf_contract_fadd_sequence
|
|
; CHECK-NOBF16: bb.0.entry:
|
|
; CHECK-NOBF16-NEXT: liveins: $r0, $r1, $r2
|
|
; CHECK-NOBF16-NEXT: {{ $}}
|
|
; CHECK-NOBF16-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r2
|
|
; CHECK-NOBF16-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r1
|
|
; CHECK-NOBF16-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY2]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi2:%[0-9]+]]:gpr = MOVsi [[COPY]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR2:%[0-9]+]]:spr = VMOVSR killed [[MOVsi2]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS1:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VADDS]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY3:%[0-9]+]]:rgpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY3]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRH:%[0-9]+]]:rgpr = VMOVRH killed [[VMOVHR]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRH]]
|
|
; CHECK-NOBF16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
|
|
entry:
|
|
%add1 = fadd nnan ninf contract bfloat %x, %y
|
|
%add2 = fadd nnan ninf contract bfloat %add1, %z
|
|
ret bfloat %add2
|
|
}
|
|
|
|
define bfloat @ninf_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
|
|
; CHECK-NOBF16-LABEL: name: ninf_fadd_sequence
|
|
; CHECK-NOBF16: bb.0.entry:
|
|
; CHECK-NOBF16-NEXT: liveins: $r0, $r1, $r2
|
|
; CHECK-NOBF16-NEXT: {{ $}}
|
|
; CHECK-NOBF16-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r2
|
|
; CHECK-NOBF16-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r1
|
|
; CHECK-NOBF16-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY2]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[MOVsi2:%[0-9]+]]:gpr = MOVsi [[COPY]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR2:%[0-9]+]]:spr = VMOVSR killed [[MOVsi2]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[MOVsi3:%[0-9]+]]:gpr = MOVsi [[COPY3]], 130, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVSR3:%[0-9]+]]:spr = VMOVSR killed [[MOVsi3]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VADDS1:%[0-9]+]]:spr = ninf VADDS killed [[VMOVSR3]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRS1:%[0-9]+]]:gpr = VMOVRS killed [[VADDS1]], 14 /* CC::al */, $noreg
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRS1]]
|
|
; CHECK-NOBF16-NEXT: BL &__truncsfbf2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp, implicit-def $r0
|
|
; CHECK-NOBF16-NEXT: ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
|
|
; CHECK-NOBF16-NEXT: [[COPY4:%[0-9]+]]:rgpr = COPY $r0
|
|
; CHECK-NOBF16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY4]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: [[VMOVRH:%[0-9]+]]:rgpr = VMOVRH killed [[VMOVHR]], 14, $noreg
|
|
; CHECK-NOBF16-NEXT: $r0 = COPY [[VMOVRH]]
|
|
; CHECK-NOBF16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
|
|
entry:
|
|
%add1 = fadd ninf bfloat %x, %y
|
|
%add2 = fadd ninf bfloat %add1, %z
|
|
ret bfloat %add2
|
|
}
|