
This patch utilizes the -maix-small-local-exec-tls option to produce a faster, non-TOC-based access sequence for the local-exec TLS model. Specifically, for when the offsets from the TLS variable are non-zero. In particular, this patch produces either a single: - addi/la with a displacement off of R13 plus a non-zero offset for when an address is calculated, or - load or store off of R13 plus a non-zero offset for when an address is calculated and used for further access where R13 is the thread pointer, respectively. In order to produce a single addi or load/store off of the thread pointer with a non-zero offset, this patch also adds the necessary support in the assembly printer when printing these instructions. Specifically: - The non-zero offset is added to the TLS variable address when the address of the TLS variable + it's offset is less than 32KB. - Otherwise, when the address of the TLS variable + its offset is greater than 32KB, the non-zero offset (and a multiple of 64KB) is subtracted from the TLS address. This handling in the assembly printer is necessary to ensure that the TLS address + the non-zero offset is between [-32768, 32768), so that the total displacement can fit within the addi/load/store instructions. This patch is meant to be a follow-up to 3f46e5453d9310b15d974e876f6132e3cf50c4b1 (where the optimization occurs for when the offset is zero).
193 lines
8.2 KiB
LLVM
193 lines
8.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
|
|
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
|
|
; RUN: -mtriple powerpc64-ibm-aix-xcoff -mattr=+aix-small-local-exec-tls < %s \
|
|
; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-EXEC-SMALLCM64
|
|
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
|
|
; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
|
|
; RUN: -mattr=+aix-small-local-exec-tls < %s | FileCheck %s \
|
|
; RUN: --check-prefix=SMALL-LOCAL-EXEC-LARGECM64
|
|
|
|
@ThreadLocalVarInit = thread_local(localexec) global i32 1, align 4
|
|
@VarInit = local_unnamed_addr global i32 87, align 4
|
|
@IThreadLocalVarInit = internal thread_local(localexec) global i32 1, align 4
|
|
declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) #1
|
|
%struct.anon = type { i32 }
|
|
@ThreadLocalStruct = thread_local(localexec) global %struct.anon zeroinitializer, align 1
|
|
@a = thread_local(localexec) global [87 x i32] zeroinitializer, align 4
|
|
|
|
define nonnull ptr @AddrTest1() local_unnamed_addr #0 {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: AddrTest1:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: la r3, a[TL]@le+12(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: AddrTest1:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: la r3, a[TL]@le+12(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @a)
|
|
%arrayidx = getelementptr inbounds [87 x i32], ptr %0, i64 0, i64 3
|
|
ret ptr %arrayidx
|
|
}
|
|
|
|
define signext i32 @testUnaligned() {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: testUnaligned:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: la r3, ThreadLocalStruct[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwa r3, 0(r3)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: testUnaligned:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: la r3, ThreadLocalStruct[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwa r3, 0(r3)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @ThreadLocalStruct)
|
|
%x = getelementptr inbounds %struct.anon, ptr %0, i32 0, i32 0
|
|
%1 = load i32, ptr %x, align 1
|
|
ret i32 %1
|
|
}
|
|
|
|
define void @storeITLInit(i32 noundef signext %x) {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: storeITLInit:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: stw r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: storeITLInit:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: stw r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @IThreadLocalVarInit)
|
|
store i32 %x, ptr %0, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @storeTLInit(i32 noundef signext %x) {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: storeTLInit:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: stw r3, ThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: storeTLInit:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: stw r3, ThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ThreadLocalVarInit)
|
|
store i32 %x, ptr %0, align 4
|
|
ret void
|
|
}
|
|
|
|
define signext i32 @loadITLInit() {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: loadITLInit:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwa r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: loadITLInit:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwa r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @IThreadLocalVarInit)
|
|
%1 = load i32, ptr %0, align 4
|
|
ret i32 %1
|
|
}
|
|
|
|
define signext i32 @loadITLInit2() {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: loadITLInit2:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: ld r4, L..C0(r2) # @VarInit
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwz r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwz r4, 0(r4)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: add r3, r4, r3
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: extsw r3, r3
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: loadITLInit2:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: addis r4, L..C0@u(r2)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwz r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: ld r4, L..C0@l(r4)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwz r4, 0(r4)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: add r3, r4, r3
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: extsw r3, r3
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @IThreadLocalVarInit)
|
|
%1 = load i32, ptr %0, align 4
|
|
%2 = load i32, ptr @VarInit, align 4
|
|
%add = add nsw i32 %2, %1
|
|
ret i32 %add
|
|
}
|
|
|
|
define signext i32 @loadTLInit() {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: loadTLInit:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwa r3, ThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: loadTLInit:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwa r3, ThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ThreadLocalVarInit)
|
|
%1 = load i32, ptr %0, align 4
|
|
ret i32 %1
|
|
}
|
|
|
|
define signext i32 @loadTLInit2() {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: loadTLInit2:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: ld r4, L..C0(r2) # @VarInit
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwz r3, ThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwz r4, 0(r4)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: add r3, r4, r3
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: extsw r3, r3
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: loadTLInit2:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: addis r4, L..C0@u(r2)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwz r3, ThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: ld r4, L..C0@l(r4)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwz r4, 0(r4)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: add r3, r4, r3
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: extsw r3, r3
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ThreadLocalVarInit)
|
|
%1 = load i32, ptr %0, align 4
|
|
%2 = load i32, ptr @VarInit, align 4
|
|
%add = add nsw i32 %2, %1
|
|
ret i32 %add
|
|
}
|
|
|
|
define void @loadStore1(i32 noundef signext %x) {
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-LABEL: loadStore1:
|
|
; SMALL-LOCAL-EXEC-SMALLCM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: lwz r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: addi r3, r3, 9
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: stw r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: blr
|
|
;
|
|
; SMALL-LOCAL-EXEC-LARGECM64-LABEL: loadStore1:
|
|
; SMALL-LOCAL-EXEC-LARGECM64: # %bb.0: # %entry
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: lwz r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: addi r3, r3, 9
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: stw r3, IThreadLocalVarInit[TL]@le(r13)
|
|
; SMALL-LOCAL-EXEC-LARGECM64-NEXT: blr
|
|
entry:
|
|
%0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @IThreadLocalVarInit)
|
|
%1 = load i32, ptr %0, align 4
|
|
%add = add nsw i32 %1, 9
|
|
store i32 %add, ptr %0, align 4
|
|
ret void
|
|
}
|
|
|