// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple riscv32 -target-feature +xcvalu -emit-llvm %s -o - \ // RUN: | FileCheck %s #include // CHECK-LABEL: @test_abs( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[TMP0]], i1 true) // CHECK-NEXT: ret i32 [[TMP1]] // int test_abs(int a) { return __builtin_abs(a); } // CHECK-LABEL: @test_alu_slet( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle i32 [[TMP0]], [[TMP1]] // CHECK-NEXT: [[SLE:%.*]] = zext i1 [[TMP2]] to i32 // CHECK-NEXT: ret i32 [[SLE]] // int test_alu_slet(int32_t a, int32_t b) { return __builtin_riscv_cv_alu_slet(a, b); } // CHECK-LABEL: @test_alu_sletu( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = icmp ule i32 [[TMP0]], [[TMP1]] // CHECK-NEXT: [[SLEU:%.*]] = zext i1 [[TMP2]] to i32 // CHECK-NEXT: ret i32 [[SLEU]] // int test_alu_sletu(uint32_t a, uint32_t b) { return __builtin_riscv_cv_alu_sletu(a, b); } // CHECK-LABEL: @test_alu_exths( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2 // CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2 // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i16 // CHECK-NEXT: [[EXTHS:%.*]] = sext i16 [[TMP1]] to i32 // CHECK-NEXT: ret i32 [[EXTHS]] // int test_alu_exths(int16_t a) { return __builtin_riscv_cv_alu_exths(a); } // CHECK-LABEL: @test_alu_exthz( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2 // CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2 // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP0]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i16 // CHECK-NEXT: [[EXTHZ:%.*]] = zext i16 [[TMP1]] to i32 // CHECK-NEXT: ret i32 [[EXTHZ]] // int test_alu_exthz(uint16_t a) { return __builtin_riscv_cv_alu_exthz(a); } // CHECK-LABEL: @test_alu_extbs( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1 // CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1 // CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i8 // CHECK-NEXT: [[EXTBS:%.*]] = sext i8 [[TMP1]] to i32 // CHECK-NEXT: ret i32 [[EXTBS]] // int test_alu_extbs(int8_t a) { return __builtin_riscv_cv_alu_extbs(a); } // CHECK-LABEL: @test_alu_extbz( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1 // CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1 // CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i8 // CHECK-NEXT: [[EXTBZ:%.*]] = zext i8 [[TMP1]] to i32 // CHECK-NEXT: ret i32 [[EXTBZ]] // int test_alu_extbz(uint8_t a) { return __builtin_riscv_cv_alu_extbz(a); } // CHECK-LABEL: @test_alu_clip( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.cv.alu.clip(i32 [[TMP0]], i32 15) // CHECK-NEXT: ret i32 [[TMP1]] // int test_alu_clip(int32_t a) { return __builtin_riscv_cv_alu_clip(a, 15); } // CHECK-LABEL: @test_alu_clipu( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.cv.alu.clipu(i32 [[TMP0]], i32 15) // CHECK-NEXT: ret i32 [[TMP1]] // int test_alu_clipu(uint32_t a) { return __builtin_riscv_cv_alu_clipu(a, 15); } // CHECK-LABEL: @test_alu_addN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.addN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_addN(int32_t a, int32_t b) { return __builtin_riscv_cv_alu_addN(a, b, 0); } // CHECK-LABEL: @test_alu_adduN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.adduN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_adduN(uint32_t a, uint32_t b) { return __builtin_riscv_cv_alu_adduN(a, b, 0); } // CHECK-LABEL: @test_alu_addRN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.addRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_addRN(int32_t a, int32_t b) { return __builtin_riscv_cv_alu_addRN(a, b, 0); } // CHECK-LABEL: @test_alu_adduRN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.adduRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_adduRN(uint32_t a, uint32_t b) { return __builtin_riscv_cv_alu_adduRN(a, b, 0); } // CHECK-LABEL: @test_alu_subN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_subN(int32_t a, int32_t b) { return __builtin_riscv_cv_alu_subN(a, b, 0); } // CHECK-LABEL: @test_alu_subuN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subuN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_subuN(uint32_t a, uint32_t b) { return __builtin_riscv_cv_alu_subuN(a, b, 0); } // CHECK-LABEL: @test_alu_subRN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_subRN(int32_t a, int32_t b) { return __builtin_riscv_cv_alu_subRN(a, b, 0); } // CHECK-LABEL: @test_alu_subuRN( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subuRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) // CHECK-NEXT: ret i32 [[TMP2]] // int test_alu_subuRN(uint32_t a, uint32_t b) { return __builtin_riscv_cv_alu_subuRN(a, b, 0); }