
A proposed fix for the issue #95611, [OpenMP][SIMD] ordered has no effect in a loop SIMD region as of LLVM 18.1.0 Changes: - Implement new lowering behavior: Conservatively serialize "omp simd" loops that have `omp simd ordered` directive to prevent incorrect vectorization (which results in incorrect execution behavior of the miscompiled program). Implementation outline: - We start with the optimistic default initial value of `LoopStack.setParallel(/Enable=/true);` in `CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D)`. - We only disable the loop parallel memory access assumption with `if (HasOrderedDirective) LoopStack.setParallel(/Enable=/false);` using the `HasOrderedDirective` (which tests for the presence of an `OMPOrderedDirective`). - This results in no longer incorrectly vectorizing the loop when the `omp simd ordered` directive is present. Motivation: We'd like to prevent incorrect vectorization of the loops marked with the `#pragma omp ordered simd` directive which has previously resulted in miscompiled code. At the same time, we'd like the usage outside of the `#pragma omp ordered simd` context to remain unaffected: Note that in the test "clang/test/OpenMP/ordered_codegen.cpp" we only "lose" the `!llvm.access.group` metadata in `foo_simd` alone. This is conservative, in that it's possible some of the loops would be possible to vectorize, but we prefer to avoid miscompilation of the loops that are currently illegal to vectorize. A concrete example follows: ```cpp // "test.c" #include <float.h> #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <time.h> int compare_float(float x1, float x2, float scalar) { const float diff = fabsf(x1 - x2); x1 = fabsf(x1); x2 = fabsf(x2); const float l = (x2 > x1) ? x2 : x1; if (diff <= l * scalar * FLT_EPSILON) return 1; else return 0; } #define ARRAY_SIZE 256 __attribute__((noinline)) void initialization_loop( float X[ARRAY_SIZE][ARRAY_SIZE], float Y[ARRAY_SIZE][ARRAY_SIZE]) { const float max = 1000.0; srand(time(NULL)); for (int r = 0; r < ARRAY_SIZE; r++) { for (int c = 0; c < ARRAY_SIZE; c++) { X[r][c] = ((float)rand() / (float)(RAND_MAX)) * max; Y[r][c] = X[r][c]; } } } __attribute__((noinline)) void omp_simd_loop(float X[ARRAY_SIZE][ARRAY_SIZE]) { for (int r = 1; r < ARRAY_SIZE; ++r) { for (int c = 1; c < ARRAY_SIZE; ++c) { #pragma omp simd for (int k = 2; k < ARRAY_SIZE; ++k) { #pragma omp ordered simd X[r][k] = X[r][k - 2] + sinf((float)(r / c)); } } } } __attribute__((noinline)) int comparison_loop(float X[ARRAY_SIZE][ARRAY_SIZE], float Y[ARRAY_SIZE][ARRAY_SIZE]) { int totalErrors_simd = 0; const float scalar = 1.0; for (int r = 1; r < ARRAY_SIZE; ++r) { for (int c = 1; c < ARRAY_SIZE; ++c) { for (int k = 2; k < ARRAY_SIZE; ++k) { Y[r][k] = Y[r][k - 2] + sinf((float)(r / c)); } } // check row for simd update for (int k = 0; k < ARRAY_SIZE; ++k) { if (!compare_float(X[r][k], Y[r][k], scalar)) { ++totalErrors_simd; } } } return totalErrors_simd; } int main(void) { float X[ARRAY_SIZE][ARRAY_SIZE]; float Y[ARRAY_SIZE][ARRAY_SIZE]; initialization_loop(X, Y); omp_simd_loop(X); const int totalErrors_simd = comparison_loop(X, Y); if (totalErrors_simd) { fprintf(stdout, "totalErrors_simd: %d \n", totalErrors_simd); fprintf(stdout, "%s : %d - FAIL: error in ordered simd computation.\n", __FILE__, __LINE__); } else { fprintf(stdout, "Success!\n"); } return totalErrors_simd; } ``` Before: ``` $ clang -fopenmp-simd -O3 -ffast-math -lm test.c -o test && ./test totalErrors_simd: 15408 test.c : 76 - FAIL: error in ordered simd computation. ``` clang 19.1.0: https://godbolt.org/z/6EvhxqEhe After: ``` $ clang -fopenmp-simd -O3 -ffast-math test.c -o test && ./test Success! ``` Co-authored-by: Matt P. Dziubinski <matt-p.dziubinski@hpe.com>
2996 lines
197 KiB
C++
2996 lines
197 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefixes=CHECK1
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK1
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefixes=CHECK1-IRBUILDER
|
|
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK1-IRBUILDER
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -fopenmp-version=45 -o - | FileCheck %s --check-prefixes=CHECK3
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -fopenmp-version=45 -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK3
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -fopenmp-version=45 -o - | FileCheck %s --check-prefixes=CHECK3-IRBUILDER
|
|
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -fopenmp-version=45 -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK3-IRBUILDER
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
|
|
// expected-no-diagnostics
|
|
#ifndef HEADER
|
|
#define HEADER
|
|
|
|
void static_not_chunked(float *a, float *b, float *c, float *d) {
|
|
#pragma omp for schedule(static) ordered
|
|
|
|
// Loop header
|
|
|
|
for (int i = 32000000; i > 33; i += -7) {
|
|
// Start of body: calculate i from IV:
|
|
|
|
// ... start of ordered region ...
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
// ... end of ordered region ...
|
|
#pragma omp ordered
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
}
|
|
|
|
void dynamic1(float *a, float *b, float *c, float *d) {
|
|
#pragma omp for schedule(dynamic) ordered
|
|
|
|
// Loop header
|
|
|
|
for (unsigned long long i = 131071; i < 2147483647; i += 127) {
|
|
// Start of body: calculate i from IV:
|
|
|
|
// ... start of ordered region ...
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
// ... end of ordered region ...
|
|
#pragma omp ordered threads
|
|
a[i] = b[i] * c[i] * d[i];
|
|
|
|
// ... end iteration for ordered loop ...
|
|
}
|
|
}
|
|
|
|
void test_auto(float *a, float *b, float *c, float *d) {
|
|
unsigned int x = 0;
|
|
unsigned int y = 0;
|
|
#pragma omp for schedule(auto) collapse(2) ordered
|
|
|
|
// Loop header
|
|
|
|
// FIXME: When the iteration count of some nested loop is not a known constant,
|
|
// we should pre-calculate it, like we do for the total number of iterations!
|
|
for (char i = static_cast<char>(y); i <= '9'; ++i)
|
|
for (x = 11; x > 0; --x) {
|
|
// Start of body: indices are calculated from IV:
|
|
|
|
// ... start of ordered region ...
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
// ... end of ordered region ...
|
|
#pragma omp ordered
|
|
a[i] = b[i] * c[i] * d[i];
|
|
|
|
// ... end iteration for ordered loop ...
|
|
}
|
|
}
|
|
|
|
void runtime(float *a, float *b, float *c, float *d) {
|
|
int x = 0;
|
|
#pragma omp for collapse(2) schedule(runtime) ordered
|
|
|
|
// Loop header
|
|
|
|
for (unsigned char i = '0' ; i <= '9'; ++i)
|
|
for (x = -10; x < 10; ++x) {
|
|
// Start of body: indices are calculated from IV:
|
|
|
|
// ... start of ordered region ...
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
// ... end of ordered region ...
|
|
#pragma omp ordered threads
|
|
a[i] = b[i] * c[i] * d[i];
|
|
|
|
// ... end iteration for ordered loop ...
|
|
}
|
|
}
|
|
|
|
float f[10];
|
|
void foo_simd(int low, int up) {
|
|
#pragma omp simd
|
|
for (int i = low; i < up; ++i) {
|
|
f[i] = 0.0;
|
|
#pragma omp ordered simd
|
|
f[i] = 1.0;
|
|
}
|
|
#pragma omp for simd ordered
|
|
for (int i = low; i < up; ++i) {
|
|
f[i] = 0.0;
|
|
#pragma omp ordered simd
|
|
f[i] = 1.0;
|
|
}
|
|
}
|
|
|
|
|
|
#endif // HEADER
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM1]]
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
// CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM4]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
|
|
// CHECK1-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM7]]
|
|
// CHECK1-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 16908287, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_8u(ptr @[[GLOB1]], i32 [[TMP0]], i32 1073741891, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_8u(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP2]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127
|
|
// CHECK1-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK1-NEXT: store i64 [[ADD1]], ptr [[I]], align 8
|
|
// CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 [[TMP7]]
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP9]], i64 [[TMP10]]
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
// CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP13]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[TMP15]], i64 [[TMP16]]
|
|
// CHECK1-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1
|
|
// CHECK1-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_8u(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[X6:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I8:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[X9:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[Y]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[Y]], align 4
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
|
|
// CHECK1-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK1-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK1-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: store i8 [[TMP3]], ptr [[I]], align 1
|
|
// CHECK1-NEXT: store i32 11, ptr [[X6]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[TMP0]], i32 1073741894, i64 0, i64 [[TMP6]], i64 1, i64 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_8(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11
|
|
// CHECK1-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
|
|
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]]
|
|
// CHECK1-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8
|
|
// CHECK1-NEXT: store i8 [[CONV15]], ptr [[I8]], align 1
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11
|
|
// CHECK1-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11
|
|
// CHECK1-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]]
|
|
// CHECK1-NEXT: [[MUL19:%.*]] = mul nsw i64 [[SUB18]], 1
|
|
// CHECK1-NEXT: [[SUB20:%.*]] = sub nsw i64 11, [[MUL19]]
|
|
// CHECK1-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV21]], ptr [[X9]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM22]]
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX23]], align 4
|
|
// CHECK1-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]]
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM25]]
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX26]], align 4
|
|
// CHECK1-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]]
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[IDXPROM28]]
|
|
// CHECK1-NEXT: store float [[MUL27]], ptr [[ARRAYIDX29]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1
|
|
// CHECK1-NEXT: store i64 [[ADD30]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_8(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[X2:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 199, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 1073741893, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK1-NEXT: store i8 [[CONV]], ptr [[I]], align 1
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20
|
|
// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]]
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]]
|
|
// CHECK1-NEXT: store i32 [[ADD6]], ptr [[X2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[IDXPROM7]]
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX8]], align 4
|
|
// CHECK1-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds nuw float, ptr [[TMP14]], i64 [[IDXPROM10]]
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX11]], align 4
|
|
// CHECK1-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]]
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds nuw float, ptr [[TMP17]], i64 [[IDXPROM13]]
|
|
// CHECK1-NEXT: store float [[MUL12]], ptr [[ARRAYIDX14]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z8foo_simdii
|
|
// CHECK1-SAME: (i32 noundef [[LOW:%.*]], i32 noundef [[UP:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I5:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I26:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I28:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
|
|
// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
|
|
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
|
|
// CHECK1: simd.if.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]]
|
|
// CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: call void @__captured_stmt(ptr [[I5]])
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]]
|
|
// CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
|
|
// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
|
|
// CHECK1-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
|
|
// CHECK1-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
|
|
// CHECK1-NEXT: [[ADD15:%.*]] = add i32 [[TMP14]], [[MUL14]]
|
|
// CHECK1-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4
|
|
// CHECK1-NEXT: br label [[SIMD_IF_END]]
|
|
// CHECK1: simd.if.end:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]]
|
|
// CHECK1-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
|
|
// CHECK1-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
|
|
// CHECK1-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
|
|
// CHECK1-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP21]], ptr [[I26]], align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]]
|
|
// CHECK1-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_dispatch_next_4u(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]]
|
|
// CHECK1: omp.inner.for.cond29:
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1
|
|
// CHECK1-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]]
|
|
// CHECK1-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]]
|
|
// CHECK1: omp.inner.for.body32:
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1
|
|
// CHECK1-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]]
|
|
// CHECK1-NEXT: store i32 [[ADD34]], ptr [[I28]], align 4
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[I28]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM35]]
|
|
// CHECK1-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX36]], align 4
|
|
// CHECK1-NEXT: call void @__captured_stmt.1(ptr [[I28]])
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]]
|
|
// CHECK1: omp.body.continue37:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]]
|
|
// CHECK1: omp.inner.for.inc38:
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
// CHECK1: omp.inner.for.end40:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK1: .omp.final.then:
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]]
|
|
// CHECK1-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1
|
|
// CHECK1-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1
|
|
// CHECK1-NEXT: [[DIV44:%.*]] = udiv i32 [[ADD43]], 1
|
|
// CHECK1-NEXT: [[MUL45:%.*]] = mul i32 [[DIV44]], 1
|
|
// CHECK1-NEXT: [[ADD46:%.*]] = add i32 [[TMP36]], [[MUL45]]
|
|
// CHECK1-NEXT: store i32 [[ADD46]], ptr [[I28]], align 4
|
|
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK1: .omp.final.done:
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@__captured_stmt
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@__captured_stmt.1
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-IRBUILDER-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK1-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK1-IRBUILDER-NEXT: entry:
|
|
// CHECK1-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[SUB]], ptr [[I]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP9]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM3]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP12]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM6]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX7]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP13]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP15]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM9]]
|
|
// CHECK1-IRBUILDER-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1-IRBUILDER: omp.body.continue:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM11]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.end:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM12:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM12]])
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM13:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM13]])
|
|
// CHECK1-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-IRBUILDER-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK1-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-IRBUILDER-NEXT: entry:
|
|
// CHECK1-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i64 16908287, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6:[0-9]+]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_8u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1073741891, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_dispatch_next_8u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i64 [[TMP1]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add i64 [[TMP3]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP2]], [[ADD]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul i64 [[TMP4]], 127
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD2:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK1-IRBUILDER-NEXT: store i64 [[ADD2]], ptr [[I]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 [[TMP6]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[TMP9]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP12]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL7:%.*]] = fmul float [[MUL5]], [[TMP13]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP14]], i64 [[TMP15]]
|
|
// CHECK1-IRBUILDER-NEXT: store float [[MUL7]], ptr [[ARRAYIDX8]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1-IRBUILDER: omp.body.continue:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i64 [[ADD9]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_8u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM10]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.end:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM11]])
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM12:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM12]])
|
|
// CHECK1-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-IRBUILDER-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK1-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-IRBUILDER-NEXT: entry:
|
|
// CHECK1-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[X6:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I8:%.*]] = alloca i8, align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[X9:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[Y]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8
|
|
// CHECK1-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV3:%.*]] = sext i8 [[TMP1]] to i32
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: store i8 [[TMP2]], ptr [[I]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: store i32 11, ptr [[X6]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV7:%.*]] = sext i8 [[TMP3]] to i32
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.precond.then:
|
|
// CHECK1-IRBUILDER-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i64 [[TMP4]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8:[0-9]+]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1073741894, i64 0, i64 [[TMP5]], i64 1, i64 1)
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM10]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP8]], [[TMP9]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV12:%.*]] = sext i8 [[TMP10]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV13:%.*]] = sdiv i64 [[TMP11]], 11
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL14:%.*]] = mul nsw i64 [[DIV13]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD15:%.*]] = add nsw i64 [[CONV12]], [[MUL14]]
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV16:%.*]] = trunc i64 [[ADD15]] to i8
|
|
// CHECK1-IRBUILDER-NEXT: store i8 [[CONV16]], ptr [[I8]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV17:%.*]] = sdiv i64 [[TMP13]], 11
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL18:%.*]] = mul nsw i64 [[DIV17]], 11
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB19:%.*]] = sub nsw i64 [[TMP12]], [[MUL18]]
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL20:%.*]] = mul nsw i64 [[SUB19]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB21:%.*]] = sub nsw i64 11, [[MUL20]]
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV22:%.*]] = trunc i64 [[SUB21]] to i32
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[CONV22]], ptr [[X9]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM23:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM23]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP15]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP18]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM24]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX25]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL26:%.*]] = fmul float [[TMP16]], [[TMP19]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP20:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP21:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP21]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM27]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX28]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL29:%.*]] = fmul float [[MUL26]], [[TMP22]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP23:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP24:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM30:%.*]] = sext i8 [[TMP24]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM30]]
|
|
// CHECK1-IRBUILDER-NEXT: store float [[MUL29]], ptr [[ARRAYIDX31]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM23]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1-IRBUILDER: omp.body.continue:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD32:%.*]] = add nsw i64 [[TMP25]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i64 [[ADD32]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM33:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM33]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.end:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM34:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM34]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1-IRBUILDER: omp.precond.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM35:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM35]])
|
|
// CHECK1-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-IRBUILDER-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK1-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-IRBUILDER-NEXT: entry:
|
|
// CHECK1-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[X2:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 199, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10:[0-9]+]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1073741893, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP4]], 20
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK1-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK1-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[I]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP6]], 20
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 20
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], [[MUL5]]
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD7:%.*]] = add nsw i32 -10, [[MUL6]]
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD7]], ptr [[X2]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP8]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP7]], i64 [[IDXPROM]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP11]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 [[IDXPROM9]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX10]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL11:%.*]] = fmul float [[TMP9]], [[TMP12]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP14]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[IDXPROM12]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX13]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL14:%.*]] = fmul float [[MUL11]], [[TMP15]]
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM15:%.*]] = zext i8 [[TMP17]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds nuw float, ptr [[TMP16]], i64 [[IDXPROM15]]
|
|
// CHECK1-IRBUILDER-NEXT: store float [[MUL14]], ptr [[ARRAYIDX16]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1-IRBUILDER: omp.body.continue:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP18]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM18:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM18]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.end:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM19:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM19]])
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM20:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM20]])
|
|
// CHECK1-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-IRBUILDER-LABEL: define {{[^@]+}}@_Z8foo_simdii
|
|
// CHECK1-IRBUILDER-SAME: (i32 noundef [[LOW:%.*]], i32 noundef [[UP:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK1-IRBUILDER-NEXT: entry:
|
|
// CHECK1-IRBUILDER-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I5:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I26:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[I28:%.*]] = alloca i32, align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP4]], ptr [[I]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
|
|
// CHECK1-IRBUILDER: simd.if.then:
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]]
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: call void @__captured_stmt(ptr [[I5]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1-IRBUILDER: omp.body.continue:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD9:%.*]] = add i32 [[TMP12]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB10:%.*]] = sub i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD15:%.*]] = add i32 [[TMP13]], [[MUL14]]
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[SIMD_IF_END]]
|
|
// CHECK1-IRBUILDER: simd.if.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP16]], ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB21:%.*]] = sub i32 [[TMP18]], [[TMP19]]
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP20]], ptr [[I26]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP21]], [[TMP22]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.precond.then:
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP23]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12:[0-9]+]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 66, i32 0, i32 [[TMP24]], i32 1, i32 1)
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM29:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_dispatch_next_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM29]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP25]], 0
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.body:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.cond30:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD31:%.*]] = add i32 [[TMP28]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[CMP32:%.*]] = icmp ult i32 [[TMP27]], [[ADD31]]
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END42:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body33:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL34:%.*]] = mul i32 [[TMP30]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD35:%.*]] = add i32 [[TMP29]], [[MUL34]]
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD35]], ptr [[I28]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP31:%.*]] = load i32, ptr [[I28]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP31]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM36]]
|
|
// CHECK1-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX37]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: call void @__captured_stmt.1(ptr [[I28]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY33_ORDERED_AFTER:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.body33.ordered.after:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE38:%.*]]
|
|
// CHECK1-IRBUILDER: omp.body.continue38:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC39:%.*]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.inc39:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD40:%.*]] = add i32 [[TMP32]], 1
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD40]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM41:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM41]])
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
// CHECK1-IRBUILDER: omp.inner.for.end42:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1-IRBUILDER: omp.dispatch.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM43:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM43]])
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
|
|
// CHECK1-IRBUILDER-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK1-IRBUILDER: .omp.final.then:
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB44:%.*]] = sub i32 [[TMP36]], [[TMP37]]
|
|
// CHECK1-IRBUILDER-NEXT: [[SUB45:%.*]] = sub i32 [[SUB44]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD46:%.*]] = add i32 [[SUB45]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[DIV47:%.*]] = udiv i32 [[ADD46]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[MUL48:%.*]] = mul i32 [[DIV47]], 1
|
|
// CHECK1-IRBUILDER-NEXT: [[ADD49:%.*]] = add i32 [[TMP35]], [[MUL48]]
|
|
// CHECK1-IRBUILDER-NEXT: store i32 [[ADD49]], ptr [[I28]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK1-IRBUILDER: .omp.final.done:
|
|
// CHECK1-IRBUILDER-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1-IRBUILDER: omp.precond.end:
|
|
// CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM50]])
|
|
// CHECK1-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-IRBUILDER-LABEL: define {{[^@]+}}@__captured_stmt
|
|
// CHECK1-IRBUILDER-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK1-IRBUILDER-NEXT: entry:
|
|
// CHECK1-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-IRBUILDER-LABEL: define {{[^@]+}}@__captured_stmt.1
|
|
// CHECK1-IRBUILDER-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4]] {
|
|
// CHECK1-IRBUILDER-NEXT: entry:
|
|
// CHECK1-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM1]]
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM4]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
|
|
// CHECK3-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM7]]
|
|
// CHECK3-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 16908287, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_8u(ptr @[[GLOB1]], i32 [[TMP0]], i32 67, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_8u(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP2]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127
|
|
// CHECK3-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK3-NEXT: store i64 [[ADD1]], ptr [[I]], align 8
|
|
// CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 [[TMP7]]
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP9]], i64 [[TMP10]]
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP13]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
|
|
// CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[TMP15]], i64 [[TMP16]]
|
|
// CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1
|
|
// CHECK3-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_8u(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[X6:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I8:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[X9:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[Y]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[Y]], align 4
|
|
// CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
|
|
// CHECK3-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK3-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK3-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK3-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: store i8 [[TMP3]], ptr [[I]], align 1
|
|
// CHECK3-NEXT: store i32 11, ptr [[X6]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[TMP0]], i32 70, i64 0, i64 [[TMP6]], i64 1, i64 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_8(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11
|
|
// CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
|
|
// CHECK3-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]]
|
|
// CHECK3-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8
|
|
// CHECK3-NEXT: store i8 [[CONV15]], ptr [[I8]], align 1
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11
|
|
// CHECK3-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11
|
|
// CHECK3-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]]
|
|
// CHECK3-NEXT: [[MUL19:%.*]] = mul nsw i64 [[SUB18]], 1
|
|
// CHECK3-NEXT: [[SUB20:%.*]] = sub nsw i64 11, [[MUL19]]
|
|
// CHECK3-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32
|
|
// CHECK3-NEXT: store i32 [[CONV21]], ptr [[X9]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM22]]
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX23]], align 4
|
|
// CHECK3-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]]
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM25]]
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX26]], align 4
|
|
// CHECK3-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]]
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[IDXPROM28]]
|
|
// CHECK3-NEXT: store float [[MUL27]], ptr [[ARRAYIDX29]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1
|
|
// CHECK3-NEXT: store i64 [[ADD30]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_8(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[X2:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 199, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 69, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK3-NEXT: store i8 [[CONV]], ptr [[I]], align 1
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20
|
|
// CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]]
|
|
// CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]]
|
|
// CHECK3-NEXT: store i32 [[ADD6]], ptr [[X2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[IDXPROM7]]
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX8]], align 4
|
|
// CHECK3-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds nuw float, ptr [[TMP14]], i64 [[IDXPROM10]]
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX11]], align 4
|
|
// CHECK3-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]]
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds nuw float, ptr [[TMP17]], i64 [[IDXPROM13]]
|
|
// CHECK3-NEXT: store float [[MUL12]], ptr [[ARRAYIDX14]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z8foo_simdii
|
|
// CHECK3-SAME: (i32 noundef [[LOW:%.*]], i32 noundef [[UP:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I5:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I26:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I28:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
|
|
// CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
|
|
// CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
|
|
// CHECK3-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
|
|
// CHECK3: simd.if.then:
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]]
|
|
// CHECK3-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: call void @__captured_stmt(ptr [[I5]])
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]]
|
|
// CHECK3-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
|
|
// CHECK3-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
|
|
// CHECK3-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
|
|
// CHECK3-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
|
|
// CHECK3-NEXT: [[ADD15:%.*]] = add i32 [[TMP14]], [[MUL14]]
|
|
// CHECK3-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4
|
|
// CHECK3-NEXT: br label [[SIMD_IF_END]]
|
|
// CHECK3: simd.if.end:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]]
|
|
// CHECK3-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
|
|
// CHECK3-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
|
|
// CHECK3-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
|
|
// CHECK3-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
|
|
// CHECK3-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP21]], ptr [[I26]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]]
|
|
// CHECK3-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_dispatch_next_4u(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]]
|
|
// CHECK3: omp.inner.for.cond29:
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1
|
|
// CHECK3-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]]
|
|
// CHECK3-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]]
|
|
// CHECK3: omp.inner.for.body32:
|
|
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1
|
|
// CHECK3-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]]
|
|
// CHECK3-NEXT: store i32 [[ADD34]], ptr [[I28]], align 4
|
|
// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[I28]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM35]]
|
|
// CHECK3-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX36]], align 4
|
|
// CHECK3-NEXT: call void @__captured_stmt.1(ptr [[I28]])
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]]
|
|
// CHECK3: omp.body.continue37:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]]
|
|
// CHECK3: omp.inner.for.inc38:
|
|
// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
// CHECK3: omp.inner.for.end40:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK3: .omp.final.then:
|
|
// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]]
|
|
// CHECK3-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1
|
|
// CHECK3-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1
|
|
// CHECK3-NEXT: [[DIV44:%.*]] = udiv i32 [[ADD43]], 1
|
|
// CHECK3-NEXT: [[MUL45:%.*]] = mul i32 [[DIV44]], 1
|
|
// CHECK3-NEXT: [[ADD46:%.*]] = add i32 [[TMP36]], [[MUL45]]
|
|
// CHECK3-NEXT: store i32 [[ADD46]], ptr [[I28]], align 4
|
|
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK3: .omp.final.done:
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@__captured_stmt
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@__captured_stmt.1
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-IRBUILDER-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK3-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK3-IRBUILDER-NEXT: entry:
|
|
// CHECK3-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[SUB]], ptr [[I]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP9]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM3]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP12]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM6]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX7]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP13]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP15]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM9]]
|
|
// CHECK3-IRBUILDER-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3-IRBUILDER: omp.body.continue:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM11]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.end:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM12:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM12]])
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM13:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM13]])
|
|
// CHECK3-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-IRBUILDER-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK3-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-IRBUILDER-NEXT: entry:
|
|
// CHECK3-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i64 16908287, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6:[0-9]+]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_8u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 67, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_dispatch_next_8u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i64 [[TMP1]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add i64 [[TMP3]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP2]], [[ADD]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul i64 [[TMP4]], 127
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD2:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK3-IRBUILDER-NEXT: store i64 [[ADD2]], ptr [[I]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 [[TMP6]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[TMP9]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP12]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL7:%.*]] = fmul float [[MUL5]], [[TMP13]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP14]], i64 [[TMP15]]
|
|
// CHECK3-IRBUILDER-NEXT: store float [[MUL7]], ptr [[ARRAYIDX8]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3-IRBUILDER: omp.body.continue:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i64 [[ADD9]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_8u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM10]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.end:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM11]])
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM12:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM12]])
|
|
// CHECK3-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-IRBUILDER-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK3-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-IRBUILDER-NEXT: entry:
|
|
// CHECK3-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[X6:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I8:%.*]] = alloca i8, align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[X9:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[Y]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8
|
|
// CHECK3-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV3:%.*]] = sext i8 [[TMP1]] to i32
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: store i8 [[TMP2]], ptr [[I]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: store i32 11, ptr [[X6]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV7:%.*]] = sext i8 [[TMP3]] to i32
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.precond.then:
|
|
// CHECK3-IRBUILDER-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i64 [[TMP4]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8:[0-9]+]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 70, i64 0, i64 [[TMP5]], i64 1, i64 1)
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM10]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP8]], [[TMP9]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV12:%.*]] = sext i8 [[TMP10]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV13:%.*]] = sdiv i64 [[TMP11]], 11
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL14:%.*]] = mul nsw i64 [[DIV13]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD15:%.*]] = add nsw i64 [[CONV12]], [[MUL14]]
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV16:%.*]] = trunc i64 [[ADD15]] to i8
|
|
// CHECK3-IRBUILDER-NEXT: store i8 [[CONV16]], ptr [[I8]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV17:%.*]] = sdiv i64 [[TMP13]], 11
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL18:%.*]] = mul nsw i64 [[DIV17]], 11
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB19:%.*]] = sub nsw i64 [[TMP12]], [[MUL18]]
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL20:%.*]] = mul nsw i64 [[SUB19]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB21:%.*]] = sub nsw i64 11, [[MUL20]]
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV22:%.*]] = trunc i64 [[SUB21]] to i32
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[CONV22]], ptr [[X9]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM23:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM23]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP15]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP18]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM24]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX25]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL26:%.*]] = fmul float [[TMP16]], [[TMP19]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP20:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP21:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP21]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM27]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX28]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL29:%.*]] = fmul float [[MUL26]], [[TMP22]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP23:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP24:%.*]] = load i8, ptr [[I8]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM30:%.*]] = sext i8 [[TMP24]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM30]]
|
|
// CHECK3-IRBUILDER-NEXT: store float [[MUL29]], ptr [[ARRAYIDX31]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM23]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3-IRBUILDER: omp.body.continue:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD32:%.*]] = add nsw i64 [[TMP25]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i64 [[ADD32]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM33:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM33]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.end:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM34:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM34]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3-IRBUILDER: omp.precond.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM35:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM35]])
|
|
// CHECK3-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-IRBUILDER-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK3-IRBUILDER-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-IRBUILDER-NEXT: entry:
|
|
// CHECK3-IRBUILDER-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[X2:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 199, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10:[0-9]+]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 69, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP4]], 20
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK3-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK3-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[I]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP6]], 20
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 20
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], [[MUL5]]
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD7:%.*]] = add nsw i32 -10, [[MUL6]]
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD7]], ptr [[X2]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP8]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP7]], i64 [[IDXPROM]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP11]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 [[IDXPROM9]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX10]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL11:%.*]] = fmul float [[TMP9]], [[TMP12]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP14]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[IDXPROM12]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX13]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL14:%.*]] = fmul float [[MUL11]], [[TMP15]]
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM15:%.*]] = zext i8 [[TMP17]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds nuw float, ptr [[TMP16]], i64 [[IDXPROM15]]
|
|
// CHECK3-IRBUILDER-NEXT: store float [[MUL14]], ptr [[ARRAYIDX16]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3-IRBUILDER: omp.body.continue:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP18]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM18:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM18]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.end:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM19:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM19]])
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM20:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM20]])
|
|
// CHECK3-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-IRBUILDER-LABEL: define {{[^@]+}}@_Z8foo_simdii
|
|
// CHECK3-IRBUILDER-SAME: (i32 noundef [[LOW:%.*]], i32 noundef [[UP:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK3-IRBUILDER-NEXT: entry:
|
|
// CHECK3-IRBUILDER-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I5:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I26:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[I28:%.*]] = alloca i32, align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP4]], ptr [[I]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
|
|
// CHECK3-IRBUILDER: simd.if.then:
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]]
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK3-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: call void @__captured_stmt(ptr [[I5]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body.ordered.after:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3-IRBUILDER: omp.body.continue:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.inc:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD9:%.*]] = add i32 [[TMP12]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB10:%.*]] = sub i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD15:%.*]] = add i32 [[TMP13]], [[MUL14]]
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[SIMD_IF_END]]
|
|
// CHECK3-IRBUILDER: simd.if.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP16]], ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB21:%.*]] = sub i32 [[TMP18]], [[TMP19]]
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP20]], ptr [[I26]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP21]], [[TMP22]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.precond.then:
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP23]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12:[0-9]+]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 66, i32 0, i32 [[TMP24]], i32 1, i32 1)
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.cond:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM29:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_dispatch_next_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM29]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP25]], 0
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.body:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.cond30:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD31:%.*]] = add i32 [[TMP28]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[CMP32:%.*]] = icmp ult i32 [[TMP27]], [[ADD31]]
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END42:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body33:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL34:%.*]] = mul i32 [[TMP30]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD35:%.*]] = add i32 [[TMP29]], [[MUL34]]
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD35]], ptr [[I28]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP31:%.*]] = load i32, ptr [[I28]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP31]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM36]]
|
|
// CHECK3-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX37]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: call void @__captured_stmt.1(ptr [[I28]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY33_ORDERED_AFTER:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.body33.ordered.after:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE38:%.*]]
|
|
// CHECK3-IRBUILDER: omp.body.continue38:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC39:%.*]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.inc39:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD40:%.*]] = add i32 [[TMP32]], 1
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD40]], ptr [[DOTOMP_IV16]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM41:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM41]])
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
// CHECK3-IRBUILDER: omp.inner.for.end42:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.inc:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3-IRBUILDER: omp.dispatch.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM43:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM43]])
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
|
|
// CHECK3-IRBUILDER-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK3-IRBUILDER: .omp.final.then:
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB44:%.*]] = sub i32 [[TMP36]], [[TMP37]]
|
|
// CHECK3-IRBUILDER-NEXT: [[SUB45:%.*]] = sub i32 [[SUB44]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD46:%.*]] = add i32 [[SUB45]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[DIV47:%.*]] = udiv i32 [[ADD46]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[MUL48:%.*]] = mul i32 [[DIV47]], 1
|
|
// CHECK3-IRBUILDER-NEXT: [[ADD49:%.*]] = add i32 [[TMP35]], [[MUL48]]
|
|
// CHECK3-IRBUILDER-NEXT: store i32 [[ADD49]], ptr [[I28]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK3-IRBUILDER: .omp.final.done:
|
|
// CHECK3-IRBUILDER-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3-IRBUILDER: omp.precond.end:
|
|
// CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM50]])
|
|
// CHECK3-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-IRBUILDER-LABEL: define {{[^@]+}}@__captured_stmt
|
|
// CHECK3-IRBUILDER-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK3-IRBUILDER-NEXT: entry:
|
|
// CHECK3-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK3-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-IRBUILDER-LABEL: define {{[^@]+}}@__captured_stmt.1
|
|
// CHECK3-IRBUILDER-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR4]] {
|
|
// CHECK3-IRBUILDER-NEXT: entry:
|
|
// CHECK3-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK3-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
|
|
// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
|
|
// CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK3-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-IRBUILDER-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: store i32 32000000, ptr [[I]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
|
|
// CHECK5: for.cond:
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 33
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
|
|
// CHECK5: for.body:
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 [[IDXPROM]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP5]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM1]]
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]]
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP8]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM3]]
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
|
|
// CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[MUL]], [[TMP9]]
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM6]]
|
|
// CHECK5-NEXT: store float [[MUL5]], ptr [[ARRAYIDX7]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
|
|
// CHECK5: for.inc:
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], -7
|
|
// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
|
|
// CHECK5: for.end:
|
|
// CHECK5-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: store i64 131071, ptr [[I]], align 8
|
|
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
|
|
// CHECK5: for.cond:
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP0]], 2147483647
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
|
|
// CHECK5: for.body:
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP1]], i64 [[TMP2]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK5-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i64 [[TMP5]]
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
|
|
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]]
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP7]], i64 [[TMP8]]
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
// CHECK5-NEXT: [[MUL3:%.*]] = fmul float [[MUL]], [[TMP9]]
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 [[TMP11]]
|
|
// CHECK5-NEXT: store float [[MUL3]], ptr [[ARRAYIDX4]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
|
|
// CHECK5: for.inc:
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add i64 [[TMP12]], 127
|
|
// CHECK5-NEXT: store i64 [[ADD]], ptr [[I]], align 8
|
|
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
// CHECK5: for.end:
|
|
// CHECK5-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK5-NEXT: store i32 0, ptr [[Y]], align 4
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4
|
|
// CHECK5-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8
|
|
// CHECK5-NEXT: store i8 [[CONV]], ptr [[I]], align 1
|
|
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
|
|
// CHECK5: for.cond:
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV1]], 57
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END13:%.*]]
|
|
// CHECK5: for.body:
|
|
// CHECK5-NEXT: store i32 11, ptr [[X]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_COND2:%.*]]
|
|
// CHECK5: for.cond2:
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[X]], align 4
|
|
// CHECK5-NEXT: [[CMP3:%.*]] = icmp ugt i32 [[TMP2]], 0
|
|
// CHECK5-NEXT: br i1 [[CMP3]], label [[FOR_BODY4:%.*]], label [[FOR_END:%.*]]
|
|
// CHECK5: for.body4:
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP4]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 [[IDXPROM]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i8 [[TMP7]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[IDXPROM5]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
|
|
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP5]], [[TMP8]]
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM7:%.*]] = sext i8 [[TMP10]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM7]]
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX8]], align 4
|
|
// CHECK5-NEXT: [[MUL9:%.*]] = fmul float [[MUL]], [[TMP11]]
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM10:%.*]] = sext i8 [[TMP13]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM10]]
|
|
// CHECK5-NEXT: store float [[MUL9]], ptr [[ARRAYIDX11]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
|
|
// CHECK5: for.inc:
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[X]], align 4
|
|
// CHECK5-NEXT: [[DEC:%.*]] = add i32 [[TMP14]], -1
|
|
// CHECK5-NEXT: store i32 [[DEC]], ptr [[X]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_COND2]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
// CHECK5: for.end:
|
|
// CHECK5-NEXT: br label [[FOR_INC12:%.*]]
|
|
// CHECK5: for.inc12:
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[INC:%.*]] = add i8 [[TMP15]], 1
|
|
// CHECK5-NEXT: store i8 [[INC]], ptr [[I]], align 1
|
|
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
// CHECK5: for.end13:
|
|
// CHECK5-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: store i32 0, ptr [[X]], align 4
|
|
// CHECK5-NEXT: store i8 48, ptr [[I]], align 1
|
|
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
|
|
// CHECK5: for.cond:
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV]], 57
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END13:%.*]]
|
|
// CHECK5: for.body:
|
|
// CHECK5-NEXT: store i32 -10, ptr [[X]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_COND1:%.*]]
|
|
// CHECK5: for.cond1:
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[X]], align 4
|
|
// CHECK5-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP1]], 10
|
|
// CHECK5-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]]
|
|
// CHECK5: for.body3:
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP3]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 [[IDXPROM]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM4:%.*]] = zext i8 [[TMP6]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 [[IDXPROM4]]
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
|
|
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP4]], [[TMP7]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load ptr, ptr [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP9]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[IDXPROM6]]
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4
|
|
// CHECK5-NEXT: [[MUL8:%.*]] = fmul float [[MUL]], [[TMP10]]
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP12]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[IDXPROM9]]
|
|
// CHECK5-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
|
|
// CHECK5: for.inc:
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[X]], align 4
|
|
// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK5-NEXT: store i32 [[INC]], ptr [[X]], align 4
|
|
// CHECK5-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP7:![0-9]+]]
|
|
// CHECK5: for.end:
|
|
// CHECK5-NEXT: br label [[FOR_INC11:%.*]]
|
|
// CHECK5: for.inc11:
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load i8, ptr [[I]], align 1
|
|
// CHECK5-NEXT: [[INC12:%.*]] = add i8 [[TMP14]], 1
|
|
// CHECK5-NEXT: store i8 [[INC12]], ptr [[I]], align 1
|
|
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
|
|
// CHECK5: for.end13:
|
|
// CHECK5-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z8foo_simdii
|
|
// CHECK5-SAME: (i32 noundef [[LOW:%.*]], i32 noundef [[UP:%.*]]) #[[ATTR0]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I5:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I27:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_IV30:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I31:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4
|
|
// CHECK5-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK5-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
|
|
// CHECK5-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
|
|
// CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK5-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
|
|
// CHECK5-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP4]], ptr [[I]], align 4
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
|
|
// CHECK5: simd.if.then:
|
|
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK5-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1
|
|
// CHECK5-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]]
|
|
// CHECK5-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1
|
|
// CHECK5-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]]
|
|
// CHECK5-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]]
|
|
// CHECK5-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP12]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM9]]
|
|
// CHECK5-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX10]], align 4
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK5-NEXT: [[ADD11:%.*]] = add i32 [[TMP13]], 1
|
|
// CHECK5-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK5-NEXT: [[SUB12:%.*]] = sub i32 [[TMP15]], [[TMP16]]
|
|
// CHECK5-NEXT: [[SUB13:%.*]] = sub i32 [[SUB12]], 1
|
|
// CHECK5-NEXT: [[ADD14:%.*]] = add i32 [[SUB13]], 1
|
|
// CHECK5-NEXT: [[DIV15:%.*]] = udiv i32 [[ADD14]], 1
|
|
// CHECK5-NEXT: [[MUL16:%.*]] = mul i32 [[DIV15]], 1
|
|
// CHECK5-NEXT: [[ADD17:%.*]] = add i32 [[TMP14]], [[MUL16]]
|
|
// CHECK5-NEXT: store i32 [[ADD17]], ptr [[I5]], align 4
|
|
// CHECK5-NEXT: br label [[SIMD_IF_END]]
|
|
// CHECK5: simd.if.end:
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK5-NEXT: [[SUB22:%.*]] = sub i32 [[TMP19]], [[TMP20]]
|
|
// CHECK5-NEXT: [[SUB23:%.*]] = sub i32 [[SUB22]], 1
|
|
// CHECK5-NEXT: [[ADD24:%.*]] = add i32 [[SUB23]], 1
|
|
// CHECK5-NEXT: [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
|
|
// CHECK5-NEXT: [[SUB26:%.*]] = sub i32 [[DIV25]], 1
|
|
// CHECK5-NEXT: store i32 [[SUB26]], ptr [[DOTCAPTURE_EXPR_21]], align 4
|
|
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP21]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP22]], ptr [[I27]], align 4
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK5-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP23]], [[TMP24]]
|
|
// CHECK5-NEXT: br i1 [[CMP28]], label [[SIMD_IF_THEN29:%.*]], label [[SIMD_IF_END52:%.*]]
|
|
// CHECK5: simd.if.then29:
|
|
// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK5-NEXT: store i32 [[TMP25]], ptr [[DOTOMP_IV30]], align 4
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND32:%.*]]
|
|
// CHECK5: omp.inner.for.cond32:
|
|
// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4
|
|
// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK5-NEXT: [[ADD33:%.*]] = add i32 [[TMP27]], 1
|
|
// CHECK5-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP26]], [[ADD33]]
|
|
// CHECK5-NEXT: br i1 [[CMP34]], label [[OMP_INNER_FOR_BODY35:%.*]], label [[OMP_INNER_FOR_END45:%.*]]
|
|
// CHECK5: omp.inner.for.body35:
|
|
// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4
|
|
// CHECK5-NEXT: [[MUL36:%.*]] = mul i32 [[TMP29]], 1
|
|
// CHECK5-NEXT: [[ADD37:%.*]] = add i32 [[TMP28]], [[MUL36]]
|
|
// CHECK5-NEXT: store i32 [[ADD37]], ptr [[I31]], align 4
|
|
// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[I31]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP30]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM38]]
|
|
// CHECK5-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX39]], align 4
|
|
// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[I31]], align 4
|
|
// CHECK5-NEXT: [[IDXPROM40:%.*]] = sext i32 [[TMP31]] to i64
|
|
// CHECK5-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM40]]
|
|
// CHECK5-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX41]], align 4
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE42:%.*]]
|
|
// CHECK5: omp.body.continue42:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC43:%.*]]
|
|
// CHECK5: omp.inner.for.inc43:
|
|
// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4
|
|
// CHECK5-NEXT: [[ADD44:%.*]] = add i32 [[TMP32]], 1
|
|
// CHECK5-NEXT: store i32 [[ADD44]], ptr [[DOTOMP_IV30]], align 4
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND32]], !llvm.loop [[LOOP11:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end45:
|
|
// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4
|
|
// CHECK5-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4
|
|
// CHECK5-NEXT: [[SUB46:%.*]] = sub i32 [[TMP34]], [[TMP35]]
|
|
// CHECK5-NEXT: [[SUB47:%.*]] = sub i32 [[SUB46]], 1
|
|
// CHECK5-NEXT: [[ADD48:%.*]] = add i32 [[SUB47]], 1
|
|
// CHECK5-NEXT: [[DIV49:%.*]] = udiv i32 [[ADD48]], 1
|
|
// CHECK5-NEXT: [[MUL50:%.*]] = mul i32 [[DIV49]], 1
|
|
// CHECK5-NEXT: [[ADD51:%.*]] = add i32 [[TMP33]], [[MUL50]]
|
|
// CHECK5-NEXT: store i32 [[ADD51]], ptr [[I31]], align 4
|
|
// CHECK5-NEXT: br label [[SIMD_IF_END52]]
|
|
// CHECK5: simd.if.end52:
|
|
// CHECK5-NEXT: ret void
|
|
//
|