llvm-project/clang/test/CodeGen/builtin-counted-by-ref.c
Bill Wendling 7475156d49
[Clang] Add __builtin_counted_by_ref builtin (#114495)
The __builtin_counted_by_ref builtin is used on a flexible array
pointer and returns a pointer to the "counted_by" attribute's COUNT
argument, which is a field in the same non-anonymous struct as the
flexible array member. This is useful for automatically setting the
count field without needing the programmer's intervention. Otherwise
it's possible to get this anti-pattern:
    
      ptr = alloc(<ty>, ..., COUNT);
      ptr->FAM[9] = 42; /* <<< Sanitizer will complain */
      ptr->count = COUNT;
    
To prevent this anti-pattern, the user can create an allocator that
automatically performs the assignment:
    
      #define alloc(TY, FAM, COUNT) ({ \
          TY __p = alloc(get_size(TY, COUNT));             \
          if (__builtin_counted_by_ref(__p->FAM))          \
              *__builtin_counted_by_ref(__p->FAM) = COUNT; \
          __p;                                             \
      })

The builtin's behavior is heavily dependent upon the "counted_by"
attribute existing. It's main utility is during allocation to avoid
the above anti-pattern. If the flexible array member doesn't have that
attribute, the builtin becomes a no-op. Therefore, if the flexible
array member has a "count" field not referenced by "counted_by", it
must be set explicitly after the allocation as this builtin will
return a "nullptr" and the assignment will most likely be elided.

---------

Co-authored-by: Bill Wendling <isanbard@gmail.com>
Co-authored-by: Aaron Ballman <aaron@aaronballman.com>
2024-11-07 22:03:55 +00:00

178 lines
7.7 KiB
C

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s --check-prefix=X86_64
// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck %s --check-prefix=I386
struct a {
char x;
short count;
int array[] __attribute__((counted_by(count)));
};
// X86_64-LABEL: define dso_local ptr @test1(
// X86_64-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] {
// X86_64-NEXT: [[ENTRY:.*:]]
// X86_64-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
// X86_64-NEXT: [[P:%.*]] = alloca ptr, align 8
// X86_64-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// X86_64-NEXT: [[MUL:%.*]] = mul i64 4, [[CONV]]
// X86_64-NEXT: [[ADD:%.*]] = add i64 4, [[MUL]]
// X86_64-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 noundef [[ADD]]) #[[ATTR2:[0-9]+]]
// X86_64-NEXT: store ptr [[CALL]], ptr [[P]], align 8
// X86_64-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[CONV1:%.*]] = trunc i32 [[TMP1]] to i16
// X86_64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 8
// X86_64-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[TMP2]], i32 0, i32 1
// X86_64-NEXT: store i16 [[CONV1]], ptr [[DOT_COUNTED_BY_GEP]], align 2
// X86_64-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 8
// X86_64-NEXT: ret ptr [[TMP3]]
//
// I386-LABEL: define dso_local ptr @test1(
// I386-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] {
// I386-NEXT: [[ENTRY:.*:]]
// I386-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
// I386-NEXT: [[P:%.*]] = alloca ptr, align 4
// I386-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[MUL:%.*]] = mul i32 4, [[TMP0]]
// I386-NEXT: [[ADD:%.*]] = add i32 4, [[MUL]]
// I386-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 noundef [[ADD]]) #[[ATTR2:[0-9]+]]
// I386-NEXT: store ptr [[CALL]], ptr [[P]], align 4
// I386-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i16
// I386-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 4
// I386-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[TMP2]], i32 0, i32 1
// I386-NEXT: store i16 [[CONV]], ptr [[DOT_COUNTED_BY_GEP]], align 2
// I386-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 4
// I386-NEXT: ret ptr [[TMP3]]
//
struct a *test1(int size) {
struct a *p = __builtin_malloc(sizeof(struct a) + sizeof(int) * size);
*__builtin_counted_by_ref(p->array) = size;
return p;
}
struct b {
int _filler;
struct {
int __filler;
struct {
int ___filler;
struct {
char count;
};
};
};
struct {
int filler_;
struct {
int filler__;
struct {
long array[] __attribute__((counted_by(count)));
};
};
};
};
// X86_64-LABEL: define dso_local ptr @test2(
// X86_64-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
// X86_64-NEXT: [[ENTRY:.*:]]
// X86_64-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
// X86_64-NEXT: [[P:%.*]] = alloca ptr, align 8
// X86_64-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// X86_64-NEXT: [[MUL:%.*]] = mul i64 4, [[CONV]]
// X86_64-NEXT: [[ADD:%.*]] = add i64 4, [[MUL]]
// X86_64-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 noundef [[ADD]]) #[[ATTR2]]
// X86_64-NEXT: store ptr [[CALL]], ptr [[P]], align 8
// X86_64-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[CONV1:%.*]] = trunc i32 [[TMP1]] to i8
// X86_64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 8
// X86_64-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[TMP2]], i32 0, i32 1, i32 1, i32 1, i32 0
// X86_64-NEXT: store i8 [[CONV1]], ptr [[DOT_COUNTED_BY_GEP]], align 1
// X86_64-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 8
// X86_64-NEXT: ret ptr [[TMP3]]
//
// I386-LABEL: define dso_local ptr @test2(
// I386-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
// I386-NEXT: [[ENTRY:.*:]]
// I386-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
// I386-NEXT: [[P:%.*]] = alloca ptr, align 4
// I386-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[MUL:%.*]] = mul i32 4, [[TMP0]]
// I386-NEXT: [[ADD:%.*]] = add i32 4, [[MUL]]
// I386-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 noundef [[ADD]]) #[[ATTR2]]
// I386-NEXT: store ptr [[CALL]], ptr [[P]], align 4
// I386-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
// I386-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 4
// I386-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[TMP2]], i32 0, i32 1, i32 1, i32 1, i32 0
// I386-NEXT: store i8 [[CONV]], ptr [[DOT_COUNTED_BY_GEP]], align 1
// I386-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 4
// I386-NEXT: ret ptr [[TMP3]]
//
struct b *test2(int size) {
struct b *p = __builtin_malloc(sizeof(struct a) + sizeof(int) * size);
*__builtin_counted_by_ref(p->array) = size;
return p;
}
struct c {
char x;
short count;
int array[];
};
// X86_64-LABEL: define dso_local ptr @test3(
// X86_64-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
// X86_64-NEXT: [[ENTRY:.*:]]
// X86_64-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
// X86_64-NEXT: [[P:%.*]] = alloca ptr, align 8
// X86_64-NEXT: [[__IGNORED:%.*]] = alloca i64, align 8
// X86_64-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// X86_64-NEXT: [[MUL:%.*]] = mul i64 4, [[CONV]]
// X86_64-NEXT: [[ADD:%.*]] = add i64 4, [[MUL]]
// X86_64-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 noundef [[ADD]]) #[[ATTR2]]
// X86_64-NEXT: store ptr [[CALL]], ptr [[P]], align 8
// X86_64-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// X86_64-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
// X86_64-NEXT: store i64 [[CONV1]], ptr [[__IGNORED]], align 8
// X86_64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 8
// X86_64-NEXT: ret ptr [[TMP2]]
//
// I386-LABEL: define dso_local ptr @test3(
// I386-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
// I386-NEXT: [[ENTRY:.*:]]
// I386-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
// I386-NEXT: [[P:%.*]] = alloca ptr, align 4
// I386-NEXT: [[__IGNORED:%.*]] = alloca i32, align 4
// I386-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// I386-NEXT: [[MUL:%.*]] = mul i32 4, [[TMP0]]
// I386-NEXT: [[ADD:%.*]] = add i32 4, [[MUL]]
// I386-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 noundef [[ADD]]) #[[ATTR2]]
// I386-NEXT: store ptr [[CALL]], ptr [[P]], align 4
// I386-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
// I386-NEXT: store i32 [[TMP1]], ptr [[__IGNORED]], align 4
// I386-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 4
// I386-NEXT: ret ptr [[TMP2]]
//
struct c *test3(int size) {
struct c *p = __builtin_malloc(sizeof(struct c) + sizeof(int) * size);
unsigned long int __ignored;
*_Generic(
__builtin_counted_by_ref(p->array),
void *: &__ignored,
default: __builtin_counted_by_ref(p->array)) = size;
return p;
}