llvm-project/clang/test/CodeGen/builtin-masked.c
Joseph Huber 9888f0c3c4
[Clang] Add builtins for masked vector loads / stores (#154464)
Summary:
Clang has support for boolean vectors, these builtins expose the LLVM
instruction of the same name. This differs from a manual load and select
by potentially suppressing traps from deactivated lanes.

Fixes: https://github.com/llvm/llvm-project/issues/107753
2025-08-20 13:33:32 -05:00

54 lines
2.9 KiB
C

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
typedef int v8i __attribute__((ext_vector_type(8)));
typedef _Bool v8b __attribute__((ext_vector_type(8)));
// CHECK-LABEL: define dso_local <8 x i32> @test_load(
// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef [[P:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1
// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1
// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[M1]] to i8
// CHECK-NEXT: store i8 [[TMP0]], ptr [[M_ADDR]], align 1
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P_ADDR]], align 8
// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP2]], i32 32, <8 x i1> [[TMP1]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
//
v8i test_load(v8b m, v8i *p) {
return __builtin_masked_load(m, p);
}
// CHECK-LABEL: define dso_local void @test_store(
// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr noundef [[P:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1
// CHECK-NEXT: [[V_ADDR:%.*]] = alloca <8 x i32>, align 32
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1
// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1
// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
// CHECK-NEXT: [[V:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8
// CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1
// CHECK-NEXT: store <8 x i32> [[V]], ptr [[V_ADDR]], align 32
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8
// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr [[TMP4]], i32 32, <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_store(v8b m, v8i v, v8i *p) {
__builtin_masked_store(m, v, p);
}