
addr matching was the only gatekeeper for starting selecting G_LOAD and G_STORE using SDAG patterns. * Introduce a complex renderer gi_addr for addr. In this patch only the existing functionality has been implemented. The renderer's name is the same as in SDAG: selectAddr. Apparently the type of GIComplexOperandMatcher doesn't matter as RISCV also uses s32 for both 64 and 32 bit pointers. * X86SelectAddress now is used for both: pattern matching and manual selection. As a result it accumulates all the code that previously was distributed among different selection functions. * Replace getLoadStoreOp with getPtrLoadStoreOp in Load/Store selector as GlobalISel matcher or emitter can't map the pointer type into i32/i64 types used in SDAG patterns for pointers. So the load and store selection of pointers is still manual. getLoadStoreOp is still present because it is used in G_FCONSTANT lowering that requires extra efforts to select it using SDAG patterns. * Since truncating stores are not supported, we custom legalize them by matching types of store and MMO. * Introduce a constant pool flag in X86AddressMode because otherwise we need to introduce a GlobalISel copy for X86ISelAddressMode. * Also please notice in the tests that GlobalISel prefers to fold memory operands immediately comparing to SDAG. The reason is that GlobalISel doesn't have target hooks in GIM_CheckIsSafeToFold. Or maybe another check on profitability is required along with safety check that is currently not present.
117 lines
3.9 KiB
LLVM
117 lines
3.9 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -global-isel=0 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
|
|
; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
|
|
; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
|
|
; RUN: llc < %s -global-isel=0 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86
|
|
; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86
|
|
; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,GISEL-X86
|
|
|
|
define i8 @test_udiv_i8(i8 %arg1, i8 %arg2) nounwind {
|
|
; X64-LABEL: test_udiv_i8:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movzbl %dil, %eax
|
|
; X64-NEXT: divb %sil
|
|
; X64-NEXT: retq
|
|
;
|
|
; DAG-X86-LABEL: test_udiv_i8:
|
|
; DAG-X86: # %bb.0:
|
|
; DAG-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; DAG-X86-NEXT: divb {{[0-9]+}}(%esp)
|
|
; DAG-X86-NEXT: retl
|
|
;
|
|
; GISEL-X86-LABEL: test_udiv_i8:
|
|
; GISEL-X86: # %bb.0:
|
|
; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; GISEL-X86-NEXT: movzbl %al, %eax
|
|
; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
|
; GISEL-X86-NEXT: divb %cl
|
|
; GISEL-X86-NEXT: retl
|
|
%ret = udiv i8 %arg1, %arg2
|
|
ret i8 %ret
|
|
}
|
|
|
|
define i16 @test_udiv_i16(i16 %arg1, i16 %arg2) nounwind {
|
|
; X64-LABEL: test_udiv_i16:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X64-NEXT: xorl %edx, %edx
|
|
; X64-NEXT: divw %si
|
|
; X64-NEXT: retq
|
|
;
|
|
; DAG-X86-LABEL: test_udiv_i16:
|
|
; DAG-X86: # %bb.0:
|
|
; DAG-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
; DAG-X86-NEXT: xorl %edx, %edx
|
|
; DAG-X86-NEXT: divw {{[0-9]+}}(%esp)
|
|
; DAG-X86-NEXT: retl
|
|
;
|
|
; GISEL-X86-LABEL: test_udiv_i16:
|
|
; GISEL-X86: # %bb.0:
|
|
; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
|
|
; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; GISEL-X86-NEXT: xorl %edx, %edx
|
|
; GISEL-X86-NEXT: divw %cx
|
|
; GISEL-X86-NEXT: retl
|
|
%ret = udiv i16 %arg1, %arg2
|
|
ret i16 %ret
|
|
}
|
|
|
|
define i32 @test_udiv_i32(i32 %arg1, i32 %arg2) nounwind {
|
|
; X64-LABEL: test_udiv_i32:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: xorl %edx, %edx
|
|
; X64-NEXT: divl %esi
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_udiv_i32:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: xorl %edx, %edx
|
|
; X86-NEXT: divl {{[0-9]+}}(%esp)
|
|
; X86-NEXT: retl
|
|
%ret = udiv i32 %arg1, %arg2
|
|
ret i32 %ret
|
|
}
|
|
|
|
define i64 @test_udiv_i64(i64 %arg1, i64 %arg2) nounwind {
|
|
; X64-LABEL: test_udiv_i64:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: xorl %edx, %edx
|
|
; X64-NEXT: divq %rsi
|
|
; X64-NEXT: retq
|
|
;
|
|
; DAG-X86-LABEL: test_udiv_i64:
|
|
; DAG-X86: # %bb.0:
|
|
; DAG-X86-NEXT: subl $12, %esp
|
|
; DAG-X86-NEXT: pushl {{[0-9]+}}(%esp)
|
|
; DAG-X86-NEXT: pushl {{[0-9]+}}(%esp)
|
|
; DAG-X86-NEXT: pushl {{[0-9]+}}(%esp)
|
|
; DAG-X86-NEXT: pushl {{[0-9]+}}(%esp)
|
|
; DAG-X86-NEXT: calll __udivdi3
|
|
; DAG-X86-NEXT: addl $28, %esp
|
|
; DAG-X86-NEXT: retl
|
|
;
|
|
; GISEL-X86-LABEL: test_udiv_i64:
|
|
; GISEL-X86: # %bb.0:
|
|
; GISEL-X86-NEXT: pushl %esi
|
|
; GISEL-X86-NEXT: subl $24, %esp
|
|
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; GISEL-X86-NEXT: movl %eax, (%esp)
|
|
; GISEL-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
|
|
; GISEL-X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
|
|
; GISEL-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
|
|
; GISEL-X86-NEXT: calll __udivdi3
|
|
; GISEL-X86-NEXT: addl $24, %esp
|
|
; GISEL-X86-NEXT: popl %esi
|
|
; GISEL-X86-NEXT: retl
|
|
%ret = udiv i64 %arg1, %arg2
|
|
ret i64 %ret
|
|
}
|