
When expanding a load into two loads, use nuw for the add that computes the offset from the base of the second load, because the original load doesn't straddle the address space. It turns out there's already a dedicated helper function for doing this, `getObjectPtrOffset`. This is in target-independent code, however in practice it only seems to affact WebAssembly code, because WebAssembly load and store instructions' constant offsets don't perform wrapping, so constant folding often depends on the nuw flag being present. This was noticed in the development of #119204.
138 lines
5.1 KiB
LLVM
138 lines
5.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
|
|
; RUN: llc < %s -verify-machineinstrs -mcpu=mvp -mattr=+multivalue -target-abi=experimental-mv | FileCheck %s --check-prefix=MULTIVALUE
|
|
; RUN: llc < %s -verify-machineinstrs -mcpu=mvp | FileCheck %s --check-prefix=NO_MULTIVALUE
|
|
|
|
; Test libcall signatures when multivalue is enabled and disabled
|
|
|
|
target triple = "wasm32-unknown-unknown"
|
|
|
|
define i128 @multivalue_sdiv(i128 %a, i128 %b) {
|
|
; MULTIVALUE-LABEL: multivalue_sdiv:
|
|
; MULTIVALUE: .functype multivalue_sdiv (i64, i64, i64, i64) -> (i64, i64)
|
|
; MULTIVALUE-NEXT: # %bb.0:
|
|
; MULTIVALUE-NEXT: local.get 0
|
|
; MULTIVALUE-NEXT: local.get 1
|
|
; MULTIVALUE-NEXT: local.get 2
|
|
; MULTIVALUE-NEXT: local.get 3
|
|
; MULTIVALUE-NEXT: call __divti3
|
|
; MULTIVALUE-NEXT: # fallthrough-return
|
|
;
|
|
; NO_MULTIVALUE-LABEL: multivalue_sdiv:
|
|
; NO_MULTIVALUE: .functype multivalue_sdiv (i32, i64, i64, i64, i64) -> ()
|
|
; NO_MULTIVALUE-NEXT: .local i32
|
|
; NO_MULTIVALUE-NEXT: # %bb.0:
|
|
; NO_MULTIVALUE-NEXT: global.get __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: i32.const 16
|
|
; NO_MULTIVALUE-NEXT: i32.sub
|
|
; NO_MULTIVALUE-NEXT: local.tee 5
|
|
; NO_MULTIVALUE-NEXT: global.set __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: local.get 1
|
|
; NO_MULTIVALUE-NEXT: local.get 2
|
|
; NO_MULTIVALUE-NEXT: local.get 3
|
|
; NO_MULTIVALUE-NEXT: local.get 4
|
|
; NO_MULTIVALUE-NEXT: call __divti3
|
|
; NO_MULTIVALUE-NEXT: local.get 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i64.load 8
|
|
; NO_MULTIVALUE-NEXT: i64.store 8
|
|
; NO_MULTIVALUE-NEXT: local.get 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i64.load 0
|
|
; NO_MULTIVALUE-NEXT: i64.store 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i32.const 16
|
|
; NO_MULTIVALUE-NEXT: i32.add
|
|
; NO_MULTIVALUE-NEXT: global.set __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: # fallthrough-return
|
|
%div = sdiv i128 %a, %b
|
|
ret i128 %div
|
|
}
|
|
|
|
|
|
define fp128 @multivalue_fsub(fp128 %a, fp128 %b) {
|
|
; MULTIVALUE-LABEL: multivalue_fsub:
|
|
; MULTIVALUE: .functype multivalue_fsub (i64, i64, i64, i64) -> (i64, i64)
|
|
; MULTIVALUE-NEXT: # %bb.0:
|
|
; MULTIVALUE-NEXT: local.get 0
|
|
; MULTIVALUE-NEXT: local.get 1
|
|
; MULTIVALUE-NEXT: local.get 2
|
|
; MULTIVALUE-NEXT: local.get 3
|
|
; MULTIVALUE-NEXT: call __subtf3
|
|
; MULTIVALUE-NEXT: # fallthrough-return
|
|
;
|
|
; NO_MULTIVALUE-LABEL: multivalue_fsub:
|
|
; NO_MULTIVALUE: .functype multivalue_fsub (i32, i64, i64, i64, i64) -> ()
|
|
; NO_MULTIVALUE-NEXT: .local i32
|
|
; NO_MULTIVALUE-NEXT: # %bb.0:
|
|
; NO_MULTIVALUE-NEXT: global.get __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: i32.const 16
|
|
; NO_MULTIVALUE-NEXT: i32.sub
|
|
; NO_MULTIVALUE-NEXT: local.tee 5
|
|
; NO_MULTIVALUE-NEXT: global.set __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: local.get 1
|
|
; NO_MULTIVALUE-NEXT: local.get 2
|
|
; NO_MULTIVALUE-NEXT: local.get 3
|
|
; NO_MULTIVALUE-NEXT: local.get 4
|
|
; NO_MULTIVALUE-NEXT: call __subtf3
|
|
; NO_MULTIVALUE-NEXT: local.get 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i64.load 8
|
|
; NO_MULTIVALUE-NEXT: i64.store 8
|
|
; NO_MULTIVALUE-NEXT: local.get 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i64.load 0
|
|
; NO_MULTIVALUE-NEXT: i64.store 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i32.const 16
|
|
; NO_MULTIVALUE-NEXT: i32.add
|
|
; NO_MULTIVALUE-NEXT: global.set __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: # fallthrough-return
|
|
%sub = fsub fp128 %a, %b
|
|
ret fp128 %sub
|
|
}
|
|
|
|
define i128 @multivalue_lshr(i128 %a, i128 %b) {
|
|
; MULTIVALUE-LABEL: multivalue_lshr:
|
|
; MULTIVALUE: .functype multivalue_lshr (i64, i64, i64, i64) -> (i64, i64)
|
|
; MULTIVALUE-NEXT: # %bb.0:
|
|
; MULTIVALUE-NEXT: local.get 2
|
|
; MULTIVALUE-NEXT: local.get 3
|
|
; MULTIVALUE-NEXT: local.get 0
|
|
; MULTIVALUE-NEXT: i32.wrap_i64
|
|
; MULTIVALUE-NEXT: call __ashlti3
|
|
; MULTIVALUE-NEXT: # fallthrough-return
|
|
;
|
|
; NO_MULTIVALUE-LABEL: multivalue_lshr:
|
|
; NO_MULTIVALUE: .functype multivalue_lshr (i32, i64, i64, i64, i64) -> ()
|
|
; NO_MULTIVALUE-NEXT: .local i32
|
|
; NO_MULTIVALUE-NEXT: # %bb.0:
|
|
; NO_MULTIVALUE-NEXT: global.get __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: i32.const 16
|
|
; NO_MULTIVALUE-NEXT: i32.sub
|
|
; NO_MULTIVALUE-NEXT: local.tee 5
|
|
; NO_MULTIVALUE-NEXT: global.set __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: local.get 3
|
|
; NO_MULTIVALUE-NEXT: local.get 4
|
|
; NO_MULTIVALUE-NEXT: local.get 1
|
|
; NO_MULTIVALUE-NEXT: i32.wrap_i64
|
|
; NO_MULTIVALUE-NEXT: call __ashlti3
|
|
; NO_MULTIVALUE-NEXT: local.get 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i64.load 8
|
|
; NO_MULTIVALUE-NEXT: i64.store 8
|
|
; NO_MULTIVALUE-NEXT: local.get 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i64.load 0
|
|
; NO_MULTIVALUE-NEXT: i64.store 0
|
|
; NO_MULTIVALUE-NEXT: local.get 5
|
|
; NO_MULTIVALUE-NEXT: i32.const 16
|
|
; NO_MULTIVALUE-NEXT: i32.add
|
|
; NO_MULTIVALUE-NEXT: global.set __stack_pointer
|
|
; NO_MULTIVALUE-NEXT: # fallthrough-return
|
|
%tmp = shl i128 %b, %a
|
|
ret i128 %tmp
|
|
}
|