{RISCV] Adjust check lines to reduce duplication

This commit is contained in:
Philip Reames 2023-09-25 11:25:26 -07:00 committed by Philip Reames
parent 0cff5805f5
commit d9942319d7

View File

@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <4 x i32> @add_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) { define <4 x i32> @add_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
; RV32-LABEL: add_constant_rhs: ; RV32-LABEL: add_constant_rhs:
@ -259,43 +259,24 @@ define <4 x i32> @udiv_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) { define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) {
; RV32-LABEL: fadd_constant_rhs: ; CHECK-LABEL: fadd_constant_rhs:
; RV32: # %bb.0: ; CHECK: # %bb.0:
; RV32-NEXT: lui a0, 269184 ; CHECK-NEXT: lui a0, 269184
; RV32-NEXT: fmv.w.x fa5, a0 ; CHECK-NEXT: fmv.w.x fa5, a0
; RV32-NEXT: fadd.s fa4, fa0, fa5 ; CHECK-NEXT: fadd.s fa4, fa0, fa5
; RV32-NEXT: lui a0, 269440 ; CHECK-NEXT: lui a0, 269440
; RV32-NEXT: fmv.w.x fa0, a0 ; CHECK-NEXT: fmv.w.x fa0, a0
; RV32-NEXT: fadd.s fa1, fa1, fa0 ; CHECK-NEXT: fadd.s fa1, fa1, fa0
; RV32-NEXT: lui a0, 262144 ; CHECK-NEXT: lui a0, 262144
; RV32-NEXT: fmv.w.x fa0, a0 ; CHECK-NEXT: fmv.w.x fa0, a0
; RV32-NEXT: fadd.s fa2, fa2, fa0 ; CHECK-NEXT: fadd.s fa2, fa2, fa0
; RV32-NEXT: fadd.s fa5, fa3, fa5 ; CHECK-NEXT: fadd.s fa5, fa3, fa5
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vfslide1down.vf v8, v8, fa4 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa4
; RV32-NEXT: vfslide1down.vf v8, v8, fa1 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa1
; RV32-NEXT: vfslide1down.vf v8, v8, fa2 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa2
; RV32-NEXT: vfslide1down.vf v8, v8, fa5 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; RV32-NEXT: ret ; CHECK-NEXT: ret
;
; RV64-LABEL: fadd_constant_rhs:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 269184
; RV64-NEXT: fmv.w.x fa5, a0
; RV64-NEXT: fadd.s fa4, fa0, fa5
; RV64-NEXT: lui a0, 269440
; RV64-NEXT: fmv.w.x fa0, a0
; RV64-NEXT: fadd.s fa1, fa1, fa0
; RV64-NEXT: lui a0, 262144
; RV64-NEXT: fmv.w.x fa0, a0
; RV64-NEXT: fadd.s fa2, fa2, fa0
; RV64-NEXT: fadd.s fa5, fa3, fa5
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vfslide1down.vf v8, v8, fa4
; RV64-NEXT: vfslide1down.vf v8, v8, fa1
; RV64-NEXT: vfslide1down.vf v8, v8, fa2
; RV64-NEXT: vfslide1down.vf v8, v8, fa5
; RV64-NEXT: ret
%e0 = fadd float %a, 23.0 %e0 = fadd float %a, 23.0
%e1 = fadd float %b, 25.0 %e1 = fadd float %b, 25.0
%e2 = fadd float %c, 2.0 %e2 = fadd float %c, 2.0
@ -308,43 +289,24 @@ define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) {
} }
define <4 x float> @fdiv_constant_rhs(float %a, float %b, float %c, float %d) { define <4 x float> @fdiv_constant_rhs(float %a, float %b, float %c, float %d) {
; RV32-LABEL: fdiv_constant_rhs: ; CHECK-LABEL: fdiv_constant_rhs:
; RV32: # %bb.0: ; CHECK: # %bb.0:
; RV32-NEXT: lui a0, 269184 ; CHECK-NEXT: lui a0, 269184
; RV32-NEXT: fmv.w.x fa5, a0 ; CHECK-NEXT: fmv.w.x fa5, a0
; RV32-NEXT: fdiv.s fa4, fa0, fa5 ; CHECK-NEXT: fdiv.s fa4, fa0, fa5
; RV32-NEXT: lui a0, 269440 ; CHECK-NEXT: lui a0, 269440
; RV32-NEXT: fmv.w.x fa0, a0 ; CHECK-NEXT: fmv.w.x fa0, a0
; RV32-NEXT: fdiv.s fa1, fa1, fa0 ; CHECK-NEXT: fdiv.s fa1, fa1, fa0
; RV32-NEXT: lui a0, 266752 ; CHECK-NEXT: lui a0, 266752
; RV32-NEXT: fmv.w.x fa0, a0 ; CHECK-NEXT: fmv.w.x fa0, a0
; RV32-NEXT: fdiv.s fa2, fa2, fa0 ; CHECK-NEXT: fdiv.s fa2, fa2, fa0
; RV32-NEXT: fdiv.s fa5, fa3, fa5 ; CHECK-NEXT: fdiv.s fa5, fa3, fa5
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vfslide1down.vf v8, v8, fa4 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa4
; RV32-NEXT: vfslide1down.vf v8, v8, fa1 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa1
; RV32-NEXT: vfslide1down.vf v8, v8, fa2 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa2
; RV32-NEXT: vfslide1down.vf v8, v8, fa5 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; RV32-NEXT: ret ; CHECK-NEXT: ret
;
; RV64-LABEL: fdiv_constant_rhs:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 269184
; RV64-NEXT: fmv.w.x fa5, a0
; RV64-NEXT: fdiv.s fa4, fa0, fa5
; RV64-NEXT: lui a0, 269440
; RV64-NEXT: fmv.w.x fa0, a0
; RV64-NEXT: fdiv.s fa1, fa1, fa0
; RV64-NEXT: lui a0, 266752
; RV64-NEXT: fmv.w.x fa0, a0
; RV64-NEXT: fdiv.s fa2, fa2, fa0
; RV64-NEXT: fdiv.s fa5, fa3, fa5
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vfslide1down.vf v8, v8, fa4
; RV64-NEXT: vfslide1down.vf v8, v8, fa1
; RV64-NEXT: vfslide1down.vf v8, v8, fa2
; RV64-NEXT: vfslide1down.vf v8, v8, fa5
; RV64-NEXT: ret
%e0 = fdiv float %a, 23.0 %e0 = fdiv float %a, 23.0
%e1 = fdiv float %b, 25.0 %e1 = fdiv float %b, 25.0
%e2 = fdiv float %c, 10.0 %e2 = fdiv float %c, 10.0