[RISCV] Add a negative case for strided load matching

This covers the bug identified in review of pr 65777.
This commit is contained in:
Philip Reames 2023-09-08 13:29:34 -07:00 committed by Philip Reames
parent d01f559ce9
commit 0a298277d3

View File

@ -13294,127 +13294,84 @@ define <4 x i32> @mgather_unit_stride_load_wide_idx(ptr %base) {
ret <4 x i32> %v
}
; TODO: Recognize as strided load with SEW=32
define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV32-LABEL: mgather_strided_2xSEW:
; This looks like a strided load (at i8), but isn't at index type.
define <4 x i32> @mgather_narrow_edge_case(ptr %base) {
; RV32-LABEL: mgather_narrow_edge_case:
; RV32: # %bb.0:
; RV32-NEXT: lui a1, %hi(.LCPI106_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI106_0)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vluxei32.v v8, (a0), v10
; RV32-NEXT: li a1, -512
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.i v0, 5
; RV32-NEXT: vmv.v.x v8, a1
; RV32-NEXT: vmerge.vim v8, v8, 0, v0
; RV32-NEXT: vluxei32.v v8, (a0), v8
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_strided_2xSEW:
; RV64V-LABEL: mgather_narrow_edge_case:
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, %hi(.LCPI106_0)
; RV64V-NEXT: addi a1, a1, %lo(.LCPI106_0)
; RV64V-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64V-NEXT: vle64.v v12, (a1)
; RV64V-NEXT: vluxei64.v v8, (a0), v12
; RV64V-NEXT: li a1, -512
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vmv.v.x v8, a1
; RV64V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64V-NEXT: vmv.v.i v0, 5
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vmerge.vim v10, v8, 0, v0
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_strided_2xSEW:
; RV64ZVE32F-LABEL: mgather_narrow_edge_case:
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmset.m v8
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: # implicit-def: $v8
; RV64ZVE32F-NEXT: beqz zero, .LBB106_9
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB106_10
; RV64ZVE32F-NEXT: .LBB106_2: # %else2
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: bnez a2, .LBB106_11
; RV64ZVE32F-NEXT: .LBB106_3: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: bnez a2, .LBB106_12
; RV64ZVE32F-NEXT: .LBB106_4: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: bnez a2, .LBB106_13
; RV64ZVE32F-NEXT: .LBB106_5: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB106_14
; RV64ZVE32F-NEXT: .LBB106_6: # %else14
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: bnez a2, .LBB106_15
; RV64ZVE32F-NEXT: .LBB106_7: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: bnez a1, .LBB106_16
; RV64ZVE32F-NEXT: .LBB106_8: # %else20
; RV64ZVE32F-NEXT: bnez zero, .LBB106_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vlse32.v v8, (a0), zero
; RV64ZVE32F-NEXT: .LBB106_2: # %else
; RV64ZVE32F-NEXT: andi a3, a1, 2
; RV64ZVE32F-NEXT: addi a2, a0, -512
; RV64ZVE32F-NEXT: bnez a3, .LBB106_6
; RV64ZVE32F-NEXT: # %bb.3: # %else2
; RV64ZVE32F-NEXT: andi a3, a1, 4
; RV64ZVE32F-NEXT: bnez a3, .LBB106_7
; RV64ZVE32F-NEXT: .LBB106_4: # %else5
; RV64ZVE32F-NEXT: andi a1, a1, 8
; RV64ZVE32F-NEXT: bnez a1, .LBB106_8
; RV64ZVE32F-NEXT: .LBB106_5: # %else8
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB106_9: # %cond.load
; RV64ZVE32F-NEXT: vlse16.v v8, (a0), zero
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB106_2
; RV64ZVE32F-NEXT: .LBB106_10: # %cond.load1
; RV64ZVE32F-NEXT: addi a2, a0, 2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
; RV64ZVE32F-NEXT: .LBB106_6: # %cond.load1
; RV64ZVE32F-NEXT: lw a3, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a3
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB106_3
; RV64ZVE32F-NEXT: .LBB106_11: # %cond.load4
; RV64ZVE32F-NEXT: addi a2, a0, 8
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB106_4
; RV64ZVE32F-NEXT: .LBB106_12: # %cond.load7
; RV64ZVE32F-NEXT: addi a2, a0, 10
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB106_5
; RV64ZVE32F-NEXT: .LBB106_13: # %cond.load10
; RV64ZVE32F-NEXT: addi a2, a0, 16
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB106_6
; RV64ZVE32F-NEXT: .LBB106_14: # %cond.load13
; RV64ZVE32F-NEXT: addi a2, a0, 18
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB106_7
; RV64ZVE32F-NEXT: .LBB106_15: # %cond.load16
; RV64ZVE32F-NEXT: addi a2, a0, 24
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB106_8
; RV64ZVE32F-NEXT: .LBB106_16: # %cond.load19
; RV64ZVE32F-NEXT: addi a0, a0, 26
; RV64ZVE32F-NEXT: lh a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: andi a3, a1, 4
; RV64ZVE32F-NEXT: beqz a3, .LBB106_4
; RV64ZVE32F-NEXT: .LBB106_7: # %cond.load4
; RV64ZVE32F-NEXT: lw a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a1, a1, 8
; RV64ZVE32F-NEXT: beqz a1, .LBB106_5
; RV64ZVE32F-NEXT: .LBB106_8: # %cond.load7
; RV64ZVE32F-NEXT: lw a0, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i16 0
%allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
%v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
ret <8 x i16> %v
%head = insertelement <4 x i1> poison, i1 true, i32 0
%allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%ptrs = getelementptr inbounds i32, ptr %base, <4 x i8> <i8 0, i8 128, i8 0, i8 128>
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
ret <4 x i32> %v
}
; TODO: Recognize as indexed load with SEW=32
define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV32-LABEL: mgather_gather_2xSEW:
; TODO: Recognize as strided load with SEW=32
define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV32-LABEL: mgather_strided_2xSEW:
; RV32: # %bb.0:
; RV32-NEXT: lui a1, %hi(.LCPI107_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI107_0)
@ -13423,7 +13380,7 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV32-NEXT: vluxei32.v v8, (a0), v10
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_gather_2xSEW:
; RV64V-LABEL: mgather_strided_2xSEW:
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, %hi(.LCPI107_0)
; RV64V-NEXT: addi a1, a1, %lo(.LCPI107_0)
@ -13432,7 +13389,7 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64V-NEXT: vluxei64.v v8, (a0), v12
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_gather_2xSEW:
; RV64ZVE32F-LABEL: mgather_strided_2xSEW:
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmset.m v8
@ -13476,7 +13433,7 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB107_3
; RV64ZVE32F-NEXT: .LBB107_11: # %cond.load4
; RV64ZVE32F-NEXT: addi a2, a0, 4
; RV64ZVE32F-NEXT: addi a2, a0, 8
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
@ -13484,7 +13441,7 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB107_4
; RV64ZVE32F-NEXT: .LBB107_12: # %cond.load7
; RV64ZVE32F-NEXT: addi a2, a0, 6
; RV64ZVE32F-NEXT: addi a2, a0, 10
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
@ -13508,7 +13465,7 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB107_7
; RV64ZVE32F-NEXT: .LBB107_15: # %cond.load16
; RV64ZVE32F-NEXT: addi a2, a0, 20
; RV64ZVE32F-NEXT: addi a2, a0, 24
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
@ -13516,6 +13473,123 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB107_8
; RV64ZVE32F-NEXT: .LBB107_16: # %cond.load19
; RV64ZVE32F-NEXT: addi a0, a0, 26
; RV64ZVE32F-NEXT: lh a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
; RV64ZVE32F-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i16 0
%allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
%v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
ret <8 x i16> %v
}
; TODO: Recognize as indexed load with SEW=32
define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV32-LABEL: mgather_gather_2xSEW:
; RV32: # %bb.0:
; RV32-NEXT: lui a1, %hi(.LCPI108_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI108_0)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vluxei32.v v8, (a0), v10
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_gather_2xSEW:
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, %hi(.LCPI108_0)
; RV64V-NEXT: addi a1, a1, %lo(.LCPI108_0)
; RV64V-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64V-NEXT: vle64.v v12, (a1)
; RV64V-NEXT: vluxei64.v v8, (a0), v12
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_gather_2xSEW:
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmset.m v8
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: # implicit-def: $v8
; RV64ZVE32F-NEXT: beqz zero, .LBB108_9
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB108_10
; RV64ZVE32F-NEXT: .LBB108_2: # %else2
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: bnez a2, .LBB108_11
; RV64ZVE32F-NEXT: .LBB108_3: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: bnez a2, .LBB108_12
; RV64ZVE32F-NEXT: .LBB108_4: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: bnez a2, .LBB108_13
; RV64ZVE32F-NEXT: .LBB108_5: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB108_14
; RV64ZVE32F-NEXT: .LBB108_6: # %else14
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: bnez a2, .LBB108_15
; RV64ZVE32F-NEXT: .LBB108_7: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: bnez a1, .LBB108_16
; RV64ZVE32F-NEXT: .LBB108_8: # %else20
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB108_9: # %cond.load
; RV64ZVE32F-NEXT: vlse16.v v8, (a0), zero
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB108_2
; RV64ZVE32F-NEXT: .LBB108_10: # %cond.load1
; RV64ZVE32F-NEXT: addi a2, a0, 2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB108_3
; RV64ZVE32F-NEXT: .LBB108_11: # %cond.load4
; RV64ZVE32F-NEXT: addi a2, a0, 4
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB108_4
; RV64ZVE32F-NEXT: .LBB108_12: # %cond.load7
; RV64ZVE32F-NEXT: addi a2, a0, 6
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB108_5
; RV64ZVE32F-NEXT: .LBB108_13: # %cond.load10
; RV64ZVE32F-NEXT: addi a2, a0, 16
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB108_6
; RV64ZVE32F-NEXT: .LBB108_14: # %cond.load13
; RV64ZVE32F-NEXT: addi a2, a0, 18
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB108_7
; RV64ZVE32F-NEXT: .LBB108_15: # %cond.load16
; RV64ZVE32F-NEXT: addi a2, a0, 20
; RV64ZVE32F-NEXT: lh a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB108_8
; RV64ZVE32F-NEXT: .LBB108_16: # %cond.load19
; RV64ZVE32F-NEXT: addi a0, a0, 22
; RV64ZVE32F-NEXT: lh a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma