llvm-project/clang/test/CodeGen/AArch64/struct-coerce-using-ptr.cpp
Antonio Frighetto 9e0c06d708 [clang][CodeGen] Set dead_on_return when passing arguments indirectly
Let Clang emit `dead_on_return` attribute on pointer arguments
that are passed indirectly, namely, large aggregates that the
ABI mandates be passed by value; thus, the parameter is destroyed
within the callee. Writes to such arguments are not observable by
the caller after the callee returns.

This should desirably enable further MemCpyOpt/DSE optimizations.

Previous discussion: https://discourse.llvm.org/t/rfc-add-dead-on-return-attribute/86871.
2025-07-18 11:50:18 +02:00

798 lines
42 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// RUN: %clang_cc1 -triple aarch64-none-elf -fcxx-exceptions -fexceptions -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-A64
// RUN: %clang_cc1 -triple arm64_32-apple-ios7.0 -fcxx-exceptions -fexceptions -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-A64_32
struct Sll {
long long x, y;
};
// CHECK-A64-LABEL: define dso_local void @_Z3Tll3Sll(
// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SLL:%.*]], align 8
// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SLL]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store i64 1, ptr [[X]], align 8
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z3Tll3Sll(
// CHECK-A64_32-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SLL:%.*]], align 8
// CHECK-A64_32-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SLL]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i64 1, ptr [[X]], align 8
// CHECK-A64_32-NEXT: ret void
//
void Tll(Sll s) { s.x = 1; }
struct Sp {
int *x;
};
// CHECK-A64-LABEL: define dso_local void @_Z2Tp2Sp(
// CHECK-A64-SAME: ptr [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SP:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store ptr [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z2Tp2Sp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SP:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = trunc i64 [[S_COERCE]] to i32
// CHECK-A64_32-NEXT: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tp(Sp s) { *s.x = 1; }
struct Spp {
int *x, *y;
};
// CHECK-A64-LABEL: define dso_local void @_Z3Tpp3Spp(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP:%.*]], align 8
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z3Tpp3Spp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpp(Spp s) { *s.x = 1; }
struct Sppp {
int *x, *y, *z;
};
// CHECK-A64-LABEL: define dso_local void @_Z4Tppp4Sppp(
// CHECK-A64-SAME: ptr dead_on_return noundef [[S:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// CHECK-A64-NEXT: store ptr [[S]], ptr [[S_INDIRECT_ADDR]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPPP:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z4Tppp4Sppp(
// CHECK-A64_32-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPPP:%.*]], align 4
// CHECK-A64_32-NEXT: [[TMP_COERCE:%.*]] = alloca [2 x i64], align 8
// CHECK-A64_32-NEXT: store [2 x i64] [[S_COERCE]], ptr [[TMP_COERCE]], align 8
// CHECK-A64_32-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[S]], ptr align 8 [[TMP_COERCE]], i32 12, i1 false)
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tppp(Sppp s) { *s.x = 1; }
struct Spi {
int *x, y;
};
// CHECK-A64-LABEL: define dso_local void @_Z3Tpi3Spi(
// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPI:%.*]], align 8
// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPI]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z3Tpi3Spi(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPI:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPI]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpi(Spi s) { *s.x = 1; }
struct Srp {
int &x, *y;
};
// CHECK-A64-LABEL: define dso_local void @_Z3Trp3Srp(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SRP:%.*]], align 8
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SRP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8, !nonnull [[META2:![0-9]+]], !align [[META3:![0-9]+]]
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z3Trp3Srp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SRP:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SRP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4, !nonnull [[META2:![0-9]+]], !align [[META3:![0-9]+]]
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Trp(Srp s) { s.x = 1; }
struct __attribute__((__packed__)) Spp_packed {
int *x, *y;
};
// CHECK-A64-LABEL: define dso_local void @_Z10Tpp_packed10Spp_packed(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_PACKED:%.*]], align 1
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 1
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_PACKED]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 1
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z10Tpp_packed10Spp_packed(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_PACKED:%.*]], align 1
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 1
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_PACKED]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 1
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpp_packed(Spp_packed s) { *s.x = 1; }
struct __attribute__((__packed__)) Spp_superpacked {
Spp_packed x;
};
// CHECK-A64-LABEL: define dso_local void @_Z15Tpp_superpacked15Spp_superpacked(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_SUPERPACKED:%.*]], align 1
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_SUPERPACKED]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[COERCE_DIVE]], align 1
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_SUPERPACKED]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[X1:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_PACKED:%.*]], ptr [[X]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X1]], align 1
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z15Tpp_superpacked15Spp_superpacked(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_SUPERPACKED:%.*]], align 1
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_SUPERPACKED]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[COERCE_DIVE]], align 1
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_SUPERPACKED]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[X1:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_PACKED:%.*]], ptr [[X]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X1]], align 1
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpp_superpacked(Spp_superpacked s) { *s.x.x = 1; }
union Upp {
int *x;
long long *y;
};
// CHECK-A64-LABEL: define dso_local void @_Z11Tupp_packed3Upp(
// CHECK-A64-SAME: ptr [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[UNION_UPP:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[UNION_UPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store ptr [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z11Tupp_packed3Upp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[UNION_UPP:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[UNION_UPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = trunc i64 [[S_COERCE]] to i32
// CHECK-A64_32-NEXT: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tupp_packed(Upp s) { *s.x = 1; }
union USpp {
Spp s;
long long y;
};
// CHECK-A64-LABEL: define dso_local void @_Z12TUSpp_packed4USpp(
// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[UNION_USPP:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[UNION_USPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z12TUSpp_packed4USpp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[UNION_USPP:%.*]], align 8
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[UNION_USPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void TUSpp_packed(USpp s) { *s.s.x = 1; }
struct Spf {
int *x;
int z[];
};
// CHECK-A64-LABEL: define dso_local void @_Z3Tpf3Spf(
// CHECK-A64-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPF:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPF]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[S_COERCE]] to ptr
// CHECK-A64-NEXT: store ptr [[COERCE_VAL_IP]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPF]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z3Tpf3Spf(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPF:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPF]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = trunc i64 [[S_COERCE]] to i32
// CHECK-A64_32-NEXT: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPF]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpf(Spf s) { *s.x = 1; }
struct Sppf {
int *x, *y;
int z[];
};
// CHECK-A64-LABEL: define dso_local void @_Z4Tppf4Sppf(
// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPPF:%.*]], align 8
// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPPF]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z4Tppf4Sppf(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPPF:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPPF]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tppf(Sppf s) { *s.x = 1; }
struct SSpSp {
struct Sp a, b;
};
// CHECK-A64-LABEL: define dso_local void @_Z5TSpSp5SSpSp(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPSP:%.*]], align 8
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPSP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP:%.*]], ptr [[A]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z5TSpSp5SSpSp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPSP:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPSP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP:%.*]], ptr [[A]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void TSpSp(SSpSp s) { *s.a.x = 1; }
struct SSpp {
Spp a;
};
// CHECK-A64-LABEL: define dso_local void @_Z4TSpp4SSpp(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPP:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP:%.*]], ptr [[A]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z4TSpp4SSpp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPP:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP:%.*]], ptr [[A]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void TSpp(SSpp s) { *s.a.x = 1; }
struct SSp : public Sp {
int* b;
};
// CHECK-A64-LABEL: define dso_local void @_Z3TSp3SSp(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSP:%.*]], align 8
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z3TSp3SSp(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SSP:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void TSp(SSp s) { *s.x = 1; }
struct Si {
int x;
};
struct SSpi : public Si {
int* y;
};
// CHECK-A64-LABEL: define dso_local void @_Z4TSpi4SSpi(
// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPI:%.*]], align 8
// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SI:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store i32 1, ptr [[X]], align 8
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z4TSpi4SSpi(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPI:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SI:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i32 1, ptr [[X]], align 4
// CHECK-A64_32-NEXT: ret void
//
void TSpi(SSpi s) { s.x = 1; }
struct Spa {
int* xs[1];
};
// CHECK-A64-LABEL: define dso_local void @_Z3Tpa3Spa(
// CHECK-A64-SAME: ptr [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPA:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store ptr [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x ptr], ptr [[XS]], i64 0, i64 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z3Tpa3Spa(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPA:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = trunc i64 [[S_COERCE]] to i32
// CHECK-A64_32-NEXT: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x ptr], ptr [[XS]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpa(Spa s) { *s.xs[0] = 1; }
struct Spa2 {
int* xs[2];
};
// CHECK-A64-LABEL: define dso_local void @_Z4Tpa24Spa2(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPA2:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA2]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA2]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x ptr], ptr [[XS]], i64 0, i64 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z4Tpa24Spa2(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPA2:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA2]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA2]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x ptr], ptr [[XS]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpa2(Spa2 s) { *s.xs[0] = 1; }
struct Spa3 {
int* xs[3];
};
// CHECK-A64-LABEL: define dso_local void @_Z4Tpa34Spa3(
// CHECK-A64-SAME: ptr dead_on_return noundef [[S:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// CHECK-A64-NEXT: store ptr [[S]], ptr [[S_INDIRECT_ADDR]], align 8
// CHECK-A64-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA3:%.*]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x ptr], ptr [[XS]], i64 0, i64 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z4Tpa34Spa3(
// CHECK-A64_32-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPA3:%.*]], align 4
// CHECK-A64_32-NEXT: [[TMP_COERCE:%.*]] = alloca [2 x i64], align 8
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA3]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store [2 x i64] [[S_COERCE]], ptr [[TMP_COERCE]], align 8
// CHECK-A64_32-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[COERCE_DIVE]], ptr align 8 [[TMP_COERCE]], i32 12, i1 false)
// CHECK-A64_32-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA3]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x ptr], ptr [[XS]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpa3(Spa3 s) { *s.xs[0] = 1; }
struct __attribute__((aligned(16))) Spp_align16 {
int *x, *y;
};
// CHECK-A64-LABEL: define dso_local void @_Z11Tpp_align1611Spp_align16(
// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_ALIGN16:%.*]], align 16
// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 16
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_ALIGN16]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 16
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z11Tpp_align1611Spp_align16(
// CHECK-A64_32-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_ALIGN16:%.*]], align 16
// CHECK-A64_32-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 16
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_ALIGN16]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 16
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpp_align16(Spp_align16 s) { *s.x = 1; }
struct SSpp_align16 {
Spp_align16 a;
};
// CHECK-A64-LABEL: define dso_local void @_Z12TSpp_align1612SSpp_align16(
// CHECK-A64-SAME: i128 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPP_ALIGN16:%.*]], align 16
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP_ALIGN16]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store i128 [[S_COERCE]], ptr [[COERCE_DIVE]], align 16
// CHECK-A64-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP_ALIGN16]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_ALIGN16:%.*]], ptr [[A]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 16
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z12TSpp_align1612SSpp_align16(
// CHECK-A64_32-SAME: i128 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPP_ALIGN16:%.*]], align 16
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP_ALIGN16]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i128 [[S_COERCE]], ptr [[COERCE_DIVE]], align 16
// CHECK-A64_32-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP_ALIGN16]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_ALIGN16:%.*]], ptr [[A]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 16
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void TSpp_align16(SSpp_align16 s) { *s.a.x = 1; }
struct Sempty {
};
// CHECK-A64-LABEL: define dso_local void @_Z6Tempty6Sempty(
// CHECK-A64-SAME: i8 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SEMPTY:%.*]], align 1
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SEMPTY]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store i8 [[S_COERCE]], ptr [[COERCE_DIVE]], align 1
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z6Tempty6Sempty(
// CHECK-A64_32-SAME: i8 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SEMPTY:%.*]], align 1
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SEMPTY]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i8 [[S_COERCE]], ptr [[COERCE_DIVE]], align 1
// CHECK-A64_32-NEXT: ret void
//
void Tempty(Sempty s) { }
struct SpSempty {
Sempty y;
int *x;
};
// CHECK-A64-LABEL: define dso_local void @_Z8TpSempty8SpSempty(
// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPSEMPTY:%.*]], align 8
// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPSEMPTY]], ptr [[S]], i32 0, i32 1
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z8TpSempty8SpSempty(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPSEMPTY:%.*]], align 4
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[S]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPSEMPTY]], ptr [[S]], i32 0, i32 1
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void TpSempty(SpSempty s) { *s.x = 1; }
struct Spaddrspace {
__attribute__((address_space(100))) int *x;
};
// CHECK-A64-LABEL: define dso_local void @_Z11Tpaddrspace11Spaddrspace(
// CHECK-A64-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPADDRSPACE:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[S_COERCE]] to ptr addrspace(100)
// CHECK-A64-NEXT: store ptr addrspace(100) [[COERCE_VAL_IP]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[X]], align 8
// CHECK-A64-NEXT: store i32 1, ptr addrspace(100) [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z11Tpaddrspace11Spaddrspace(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPADDRSPACE:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = trunc i64 [[S_COERCE]] to i32
// CHECK-A64_32-NEXT: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[X]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr addrspace(100) [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tpaddrspace(Spaddrspace s) { *s.x = 1; }
// CHECK-A64-LABEL: define dso_local void @_Z11Cpaddrspacev(
// CHECK-A64-SAME: ) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPADDRSPACE:%.*]], align 8
// CHECK-A64-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_SPADDRSPACE]], align 8
// CHECK-A64-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP]], ptr align 8 [[S]], i64 8, i1 false)
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPADDRSPACE]], ptr [[AGG_TMP]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[COERCE_VAL_PI:%.*]] = ptrtoint ptr addrspace(100) [[TMP0]] to i64
// CHECK-A64-NEXT: call void @_Z11Tpaddrspace11Spaddrspace(i64 [[COERCE_VAL_PI]])
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z11Cpaddrspacev(
// CHECK-A64_32-SAME: ) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SPADDRSPACE:%.*]], align 4
// CHECK-A64_32-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_SPADDRSPACE]], align 4
// CHECK-A64_32-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_TMP]], ptr align 4 [[S]], i32 4, i1 false)
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPADDRSPACE]], ptr [[AGG_TMP]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[COERCE_VAL_PI:%.*]] = ptrtoint ptr addrspace(100) [[TMP0]] to i32
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = zext i32 [[COERCE_VAL_PI]] to i64
// CHECK-A64_32-NEXT: call void @_Z11Tpaddrspace11Spaddrspace(i64 [[COERCE_VAL_II]])
// CHECK-A64_32-NEXT: ret void
//
void Cpaddrspace() { Spaddrspace s; Tpaddrspace(s); }
struct Sp2addrspace {
__attribute__((address_space(100))) int *x[2];
};
// CHECK-A64-LABEL: define dso_local void @_Z12Tp2addrspace12Sp2addrspace(
// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SP2ADDRSPACE:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SP2ADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP2ADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x ptr addrspace(100)], ptr [[X]], i64 0, i64 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[ARRAYIDX]], align 8
// CHECK-A64-NEXT: store i32 1, ptr addrspace(100) [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z12Tp2addrspace12Sp2addrspace(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SP2ADDRSPACE:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SP2ADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: store i64 [[S_COERCE]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP2ADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x ptr addrspace(100)], ptr [[X]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[ARRAYIDX]], align 4
// CHECK-A64_32-NEXT: store i32 1, ptr addrspace(100) [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Tp2addrspace(Sp2addrspace s) { *s.x[0] = 1; }
// CHECK-A64-LABEL: define dso_local void @_Z12Cp2addrspacev(
// CHECK-A64-SAME: ) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SP2ADDRSPACE:%.*]], align 8
// CHECK-A64-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_SP2ADDRSPACE]], align 8
// CHECK-A64-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP]], ptr align 8 [[S]], i64 16, i1 false)
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SP2ADDRSPACE]], ptr [[AGG_TMP]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load [2 x i64], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: call void @_Z12Tp2addrspace12Sp2addrspace([2 x i64] [[TMP0]])
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z12Cp2addrspacev(
// CHECK-A64_32-SAME: ) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SP2ADDRSPACE:%.*]], align 4
// CHECK-A64_32-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_SP2ADDRSPACE]], align 4
// CHECK-A64_32-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_TMP]], ptr align 4 [[S]], i32 8, i1 false)
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SP2ADDRSPACE]], ptr [[AGG_TMP]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load i64, ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: call void @_Z12Tp2addrspace12Sp2addrspace(i64 [[TMP0]])
// CHECK-A64_32-NEXT: ret void
//
void Cp2addrspace() { Sp2addrspace s; Tp2addrspace(s); }
struct Sraddrspace {
__attribute__((address_space(100))) int &x;
};
// CHECK-A64-LABEL: define dso_local void @_Z11Traddrspace11Sraddrspace(
// CHECK-A64-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SRADDRSPACE:%.*]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[S_COERCE]] to ptr addrspace(100)
// CHECK-A64-NEXT: store ptr addrspace(100) [[COERCE_VAL_IP]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[X]], align 8, !align [[META3]]
// CHECK-A64-NEXT: store i32 1, ptr addrspace(100) [[TMP0]], align 4
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z11Traddrspace11Sraddrspace(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SRADDRSPACE:%.*]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = trunc i64 [[S_COERCE]] to i32
// CHECK-A64_32-NEXT: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[X]], align 4, !align [[META3]]
// CHECK-A64_32-NEXT: store i32 1, ptr addrspace(100) [[TMP0]], align 4
// CHECK-A64_32-NEXT: ret void
//
void Traddrspace(Sraddrspace s) { s.x = 1; }
// CHECK-A64-LABEL: define dso_local void @_Z11Craddrspace11Sraddrspace(
// CHECK-A64-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64-NEXT: [[ENTRY:.*:]]
// CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SRADDRSPACE:%.*]], align 8
// CHECK-A64-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_SRADDRSPACE]], align 8
// CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[S_COERCE]] to ptr addrspace(100)
// CHECK-A64-NEXT: store ptr addrspace(100) [[COERCE_VAL_IP]], ptr [[COERCE_DIVE]], align 8
// CHECK-A64-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP]], ptr align 8 [[S]], i64 8, i1 false)
// CHECK-A64-NEXT: [[COERCE_DIVE1:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[AGG_TMP]], i32 0, i32 0
// CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[COERCE_DIVE1]], align 8
// CHECK-A64-NEXT: [[COERCE_VAL_PI:%.*]] = ptrtoint ptr addrspace(100) [[TMP0]] to i64
// CHECK-A64-NEXT: call void @_Z11Traddrspace11Sraddrspace(i64 [[COERCE_VAL_PI]])
// CHECK-A64-NEXT: ret void
//
// CHECK-A64_32-LABEL: define void @_Z11Craddrspace11Sraddrspace(
// CHECK-A64_32-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] {
// CHECK-A64_32-NEXT: [[ENTRY:.*:]]
// CHECK-A64_32-NEXT: [[S:%.*]] = alloca [[STRUCT_SRADDRSPACE:%.*]], align 4
// CHECK-A64_32-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_SRADDRSPACE]], align 4
// CHECK-A64_32-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[S]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[COERCE_VAL_II:%.*]] = trunc i64 [[S_COERCE]] to i32
// CHECK-A64_32-NEXT: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
// CHECK-A64_32-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_TMP]], ptr align 4 [[S]], i32 4, i1 false)
// CHECK-A64_32-NEXT: [[COERCE_DIVE1:%.*]] = getelementptr inbounds nuw [[STRUCT_SRADDRSPACE]], ptr [[AGG_TMP]], i32 0, i32 0
// CHECK-A64_32-NEXT: [[TMP0:%.*]] = load ptr addrspace(100), ptr [[COERCE_DIVE1]], align 4
// CHECK-A64_32-NEXT: [[COERCE_VAL_PI:%.*]] = ptrtoint ptr addrspace(100) [[TMP0]] to i32
// CHECK-A64_32-NEXT: [[COERCE_VAL_II2:%.*]] = zext i32 [[COERCE_VAL_PI]] to i64
// CHECK-A64_32-NEXT: call void @_Z11Traddrspace11Sraddrspace(i64 [[COERCE_VAL_II2]])
// CHECK-A64_32-NEXT: ret void
//
void Craddrspace(Sraddrspace s) { Traddrspace(s); }
//.
// CHECK-A64: [[META2]] = !{}
// CHECK-A64: [[META3]] = !{i64 4}
//.
// CHECK-A64_32: [[META2]] = !{}
// CHECK-A64_32: [[META3]] = !{i64 4}
//.