llvm-project/clang/test/OpenMP/cancel_codegen.cpp
Tom Eccles f7daa9d302
[mlir][OpenMP] fix crash outlining infinite loop (#129872)
Previously an extra block was created by splitting the previous exit
block. This produced incorrect results when the outlined region
statically never terminated because then there wouldn't be a valid exit
block for the outlined region, this caused this newly added block to
have an incoming edge from outside of the outlining region, which caused
outlining to fail.

So far as I can tell this extra block no longer serves any purpose. The
comment says it is supposed to collate multiple control flow edges into
one place, but the code as it is now does not achieve this. In fact, as
can be seen from the changes to lit tests, this block was not actually
outlined in the end. This is because there are actually two code
extractors: one in the callback for creating a parallel op which is used
to find what the input/output variables are (which does have this block
added to it), and another one which actually does the outlining (which
this block was not added to).

Tested with the gfortran and fujitsu test suites.

Fixes #112884
2025-03-07 11:02:52 +00:00

1332 lines
85 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple x86_64-apple-darwin13.4.0 -emit-pch -o %t.0 %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -std=c++11 -include-pch %t.0 -verify %s -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-enable-irbuilder -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-apple-darwin13.4.0 -emit-pch -o %t.1 %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-enable-irbuilder -std=c++11 -include-pch %t.1 -verify %s -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - %s | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple x86_64-apple-darwin13.4.0 -emit-pch -o %t.2 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -std=c++11 -include-pch %t.2 -verify %s -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -verify -fopenmp -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin13.4.0 -emit-pch -o %t.3 %s
// RUN: %clang_cc1 -fopenmp -std=c++11 -include-pch %t.3 -verify %s -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-apple-darwin13.4.0 -emit-pch -o %t.4 %s
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -std=c++11 -include-pch %t.4 -verify %s -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -verify -fopenmp-simd -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - %s | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin13.4.0 -emit-pch -o %t.5 %s
// RUN: %clang_cc1 -fopenmp-simd -std=c++11 -include-pch %t.5 -verify %s -triple x86_64-apple-darwin13.4.0 -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
float flag;
int main (int argc, char **argv) {
#pragma omp parallel
{
#pragma omp cancel parallel if(flag)
argv[0][0] = argc;
#pragma omp barrier
argv[0][0] += argc;
}
#pragma omp sections
{
#pragma omp cancel sections
}
#pragma omp sections
{
#pragma omp cancel sections
#pragma omp section
{
#pragma omp cancel sections
}
}
#pragma omp for
for (int i = 0; i < argc; ++i) {
#pragma omp cancel for if(cancel: flag)
}
#pragma omp task
{
#pragma omp cancel taskgroup
}
#pragma omp parallel sections
{
#pragma omp cancel sections
}
#pragma omp parallel sections
{
#pragma omp cancel sections
#pragma omp section
{
#pragma omp cancel sections
}
}
int r = 0;
#pragma omp parallel for reduction(+: r)
for (int i = 0; i < argc; ++i) {
#pragma omp cancel for
r += i;
}
return argc;
}
#endif
// CHECK1-LABEL: define {{[^@]+}}@main
// CHECK1-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I24:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[R:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @main.omp_outlined, ptr [[ARGV_ADDR]], ptr [[ARGC_ADDR]])
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
// CHECK1-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 0
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP7]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK1-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK1: cancel.cont:
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB5:[0-9]+]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_LB_1]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_UB_2]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_3]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_4]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_SECTIONS_IL_4]], ptr [[DOTOMP_SECTIONS_LB_1]], ptr [[DOTOMP_SECTIONS_UB_2]], ptr [[DOTOMP_SECTIONS_ST_3]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_2]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = icmp slt i32 [[TMP11]], 1
// CHECK1-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP11]], i32 1
// CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_SECTIONS_UB_2]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_1]], align 4
// CHECK1-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_SECTIONS_IV_5]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND6:%.*]]
// CHECK1: omp.inner.for.cond6:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_2]], align 4
// CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY8:%.*]], label [[OMP_INNER_FOR_END18:%.*]]
// CHECK1: omp.inner.for.body8:
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4
// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_SECTIONS_EXIT15:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE9:%.*]]
// CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE12:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case9:
// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
// CHECK1-NEXT: br i1 [[TMP19]], label [[DOTCANCEL_EXIT10:%.*]], label [[DOTCANCEL_CONTINUE11:%.*]]
// CHECK1: .cancel.exit10:
// CHECK1-NEXT: br label [[CANCEL_EXIT19:%.*]]
// CHECK1: cancel.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[CANCEL_CONT]]
// CHECK1: .cancel.continue11:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT15]]
// CHECK1: .omp.sections.case12:
// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK1-NEXT: br i1 [[TMP21]], label [[DOTCANCEL_EXIT13:%.*]], label [[DOTCANCEL_CONTINUE14:%.*]]
// CHECK1: .cancel.exit13:
// CHECK1-NEXT: br label [[CANCEL_EXIT19]]
// CHECK1: .cancel.continue14:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT15]]
// CHECK1: .omp.sections.exit15:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]]
// CHECK1: omp.inner.for.inc16:
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4
// CHECK1-NEXT: [[INC17:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK1-NEXT: store i32 [[INC17]], ptr [[DOTOMP_SECTIONS_IV_5]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND6]]
// CHECK1: omp.inner.for.end18:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[CANCEL_CONT20:%.*]]
// CHECK1: cancel.cont20:
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB5]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP23]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[SUB22:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB22]], ptr [[DOTCAPTURE_EXPR_21]], align 4
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[CMP23:%.*]] = icmp slt i32 0, [[TMP25]]
// CHECK1-NEXT: br i1 [[CMP23]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
// CHECK1-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB6:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
// CHECK1-NEXT: [[CMP25:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
// CHECK1-NEXT: br i1 [[CMP25]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE]] ], [ [[TMP30]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND26:%.*]]
// CHECK1: omp.inner.for.cond26:
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP27:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
// CHECK1-NEXT: br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
// CHECK1: omp.inner.for.body28:
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP34]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I24]], align 4
// CHECK1-NEXT: [[TMP35:%.*]] = load float, ptr @flag, align 4
// CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP35]], 0.000000e+00
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP0]], i32 2)
// CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
// CHECK1-NEXT: br i1 [[TMP37]], label [[DOTCANCEL_EXIT29:%.*]], label [[DOTCANCEL_CONTINUE30:%.*]]
// CHECK1: .cancel.exit29:
// CHECK1-NEXT: br label [[CANCEL_EXIT34:%.*]]
// CHECK1: cancel.exit19:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[CANCEL_CONT20]]
// CHECK1: .cancel.continue30:
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]]
// CHECK1: omp.inner.for.inc31:
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP38]], 1
// CHECK1-NEXT: store i32 [[ADD32]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND26]]
// CHECK1: omp.inner.for.end33:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB6]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: cancel.exit34:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB6]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[CANCEL_CONT35:%.*]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: br label [[CANCEL_CONT35]]
// CHECK1: cancel.cont35:
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP39:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry.)
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP39]], i32 0, i32 0
// CHECK1-NEXT: [[TMP41:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP39]])
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @main.omp_outlined.1)
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @main.omp_outlined.2)
// CHECK1-NEXT: store i32 0, ptr [[R]], align 4
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @main.omp_outlined.3, ptr [[ARGC_ADDR]], ptr [[R]])
// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
// CHECK1-NEXT: ret i32 [[TMP42]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[ARGV:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load float, ptr @flag, align 4
// CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP2]], 0.000000e+00
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP4]], i32 1)
// CHECK1-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK1-NEXT: br i1 [[TMP6]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel_barrier(ptr @[[GLOB2]], i32 [[TMP8]])
// CHECK1-NEXT: br label [[RETURN:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP10]] to i8
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP11]], i64 0
// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 0
// CHECK1-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX1]], align 1
// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_cancel_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP14]])
// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]]
// CHECK1: .cancel.exit2:
// CHECK1-NEXT: br label [[RETURN]]
// CHECK1: .cancel.continue3:
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds ptr, ptr [[TMP18]], i64 0
// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[ARRAYIDX4]], align 8
// CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i64 0
// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX5]], align 1
// CHECK1-NEXT: [[CONV6:%.*]] = sext i8 [[TMP20]] to i32
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV6]], [[TMP17]]
// CHECK1-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD]] to i8
// CHECK1-NEXT: store i8 [[CONV7]], ptr [[ARRAYIDX5]], align 1
// CHECK1-NEXT: br label [[RETURN]]
// CHECK1: return:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], ptr noalias noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[CLEANUP_DEST_SLOT_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META5:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias [[META14:![0-9]+]]
// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias [[META14]]
// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias [[META14]]
// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias [[META14]]
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias [[META14]]
// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias [[META14]]
// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias [[META14]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias [[META14]]
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP9]], i32 4)
// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]]
// CHECK1: .cancel.exit.i:
// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias [[META14]]
// CHECK1-NEXT: br label [[DOTOMP_OUTLINED__EXIT:%.*]]
// CHECK1: .cancel.continue.i:
// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias [[META14]]
// CHECK1-NEXT: br label [[DOTOMP_OUTLINED__EXIT]]
// CHECK1: .omp_outlined..exit:
// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias [[META14]]
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined.1
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 0
// CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 0
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
// CHECK1-NEXT: br i1 [[TMP10]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK1: cancel.cont:
// CHECK1-NEXT: ret void
// CHECK1: cancel.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined.2
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
// CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
// CHECK1-NEXT: br i1 [[TMP10]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK1: .omp.sections.case1:
// CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]]
// CHECK1: .cancel.exit2:
// CHECK1-NEXT: br label [[CANCEL_EXIT]]
// CHECK1: .cancel.continue3:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK1: cancel.cont:
// CHECK1-NEXT: ret void
// CHECK1: cancel.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined.3
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[R:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[R_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[R3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store ptr [[R]], ptr [[R_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[R_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: store i32 0, ptr [[R3]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB6]], i32 [[TMP7]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP17]], i32 2)
// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
// CHECK1-NEXT: br i1 [[TMP19]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I4]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[R3]], align 4
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], [[TMP20]]
// CHECK1-NEXT: store i32 [[ADD7]], ptr [[R3]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB6]], i32 [[TMP24]])
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[R3]], ptr [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB7:[0-9]+]], i32 [[TMP27]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @main.omp_outlined.3.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP28]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[R3]], align 4
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
// CHECK1-NEXT: store i32 [[ADD9]], ptr [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB7]], i32 [[TMP27]], ptr @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: cancel.exit:
// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB6]], i32 [[TMP32]])
// CHECK1-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[R3]], align 4
// CHECK1-NEXT: [[TMP34:%.*]] = atomicrmw add ptr [[TMP1]], i32 [[TMP33]] monotonic, align 4
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: br label [[CANCEL_CONT]]
// CHECK1: cancel.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined.3.omp.reduction.reduction_func
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@main
// CHECK3-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[STRUCTARG:%.*]] = alloca { ptr, ptr }, align 8
// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[P_LOWERBOUND:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[P_UPPERBOUND:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[P_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[P_LASTITER28:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[P_LOWERBOUND29:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[P_UPPERBOUND30:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[P_STRIDE31:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I36:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK3-NEXT: [[R:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK3-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
// CHECK3-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
// CHECK3-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK3: omp_parallel:
// CHECK3-NEXT: [[GEP_ARGC_ADDR:%.*]] = getelementptr { ptr, ptr }, ptr [[STRUCTARG]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[ARGC_ADDR]], ptr [[GEP_ARGC_ADDR]], align 8
// CHECK3-NEXT: [[GEP_ARGV_ADDR:%.*]] = getelementptr { ptr, ptr }, ptr [[STRUCTARG]], i32 0, i32 1
// CHECK3-NEXT: store ptr [[ARGV_ADDR]], ptr [[GEP_ARGV_ADDR]], align 8
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @main..omp_par, ptr [[STRUCTARG]])
// CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
// CHECK3: omp.par.exit:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_PREHEADER:%.*]]
// CHECK3: omp_section_loop.preheader:
// CHECK3-NEXT: store i32 0, ptr [[P_LOWERBOUND]], align 4
// CHECK3-NEXT: store i32 0, ptr [[P_UPPERBOUND]], align 4
// CHECK3-NEXT: store i32 1, ptr [[P_STRIDE]], align 4
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM11]], i32 34, ptr [[P_LASTITER]], ptr [[P_LOWERBOUND]], ptr [[P_UPPERBOUND]], ptr [[P_STRIDE]], i32 1, i32 0)
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[P_LOWERBOUND]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[P_UPPERBOUND]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], [[TMP0]]
// CHECK3-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 1
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_HEADER:%.*]]
// CHECK3: omp_section_loop.header:
// CHECK3-NEXT: [[OMP_SECTION_LOOP_IV:%.*]] = phi i32 [ 0, [[OMP_SECTION_LOOP_PREHEADER]] ], [ [[OMP_SECTION_LOOP_NEXT:%.*]], [[OMP_SECTION_LOOP_INC:%.*]] ]
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_COND:%.*]]
// CHECK3: omp_section_loop.cond:
// CHECK3-NEXT: [[OMP_SECTION_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_SECTION_LOOP_IV]], [[TMP3]]
// CHECK3-NEXT: br i1 [[OMP_SECTION_LOOP_CMP]], label [[OMP_SECTION_LOOP_BODY:%.*]], label [[OMP_SECTION_LOOP_EXIT:%.*]]
// CHECK3: omp_section_loop.body:
// CHECK3-NEXT: [[TMP4:%.*]] = add i32 [[OMP_SECTION_LOOP_IV]], [[TMP0]]
// CHECK3-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 1
// CHECK3-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], 0
// CHECK3-NEXT: switch i32 [[TMP6]], label [[OMP_SECTION_LOOP_BODY_SECTIONS_AFTER:%.*]] [
// CHECK3-NEXT: i32 0, label [[OMP_SECTION_LOOP_BODY_CASE:%.*]]
// CHECK3-NEXT: ]
// CHECK3: omp_section_loop.body.case:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM10]], i32 3)
// CHECK3-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
// CHECK3-NEXT: br i1 [[TMP8]], label [[OMP_SECTION_LOOP_BODY_CASE_SPLIT:%.*]], label [[OMP_SECTION_LOOP_BODY_CASE_CNCL:%.*]]
// CHECK3: omp_section_loop.body.case.split:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_BODY_CASE_SECTION_AFTER:%.*]]
// CHECK3: omp_section_loop.body.case.section.after:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_BODY_SECTIONS_AFTER]]
// CHECK3: omp_section_loop.body.sections.after:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_INC]]
// CHECK3: omp_section_loop.inc:
// CHECK3-NEXT: [[OMP_SECTION_LOOP_NEXT]] = add nuw i32 [[OMP_SECTION_LOOP_IV]], 1
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_HEADER]]
// CHECK3: omp_section_loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM11]])
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM12:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM12]])
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_AFTER:%.*]]
// CHECK3: omp_section_loop.after:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_AFTERSECTIONS_FINI:%.*]]
// CHECK3: omp_section_loop.aftersections.fini:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_PREHEADER13:%.*]]
// CHECK3: omp_section_loop.preheader13:
// CHECK3-NEXT: store i32 0, ptr [[P_LOWERBOUND29]], align 4
// CHECK3-NEXT: store i32 1, ptr [[P_UPPERBOUND30]], align 4
// CHECK3-NEXT: store i32 1, ptr [[P_STRIDE31]], align 4
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM32:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM32]], i32 34, ptr [[P_LASTITER28]], ptr [[P_LOWERBOUND29]], ptr [[P_UPPERBOUND30]], ptr [[P_STRIDE31]], i32 1, i32 0)
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[P_LOWERBOUND29]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[P_UPPERBOUND30]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = sub i32 [[TMP10]], [[TMP9]]
// CHECK3-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], 1
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_HEADER14:%.*]]
// CHECK3: omp_section_loop.header14:
// CHECK3-NEXT: [[OMP_SECTION_LOOP_IV20:%.*]] = phi i32 [ 0, [[OMP_SECTION_LOOP_PREHEADER13]] ], [ [[OMP_SECTION_LOOP_NEXT22:%.*]], [[OMP_SECTION_LOOP_INC17:%.*]] ]
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_COND15:%.*]]
// CHECK3: omp_section_loop.cond15:
// CHECK3-NEXT: [[OMP_SECTION_LOOP_CMP21:%.*]] = icmp ult i32 [[OMP_SECTION_LOOP_IV20]], [[TMP12]]
// CHECK3-NEXT: br i1 [[OMP_SECTION_LOOP_CMP21]], label [[OMP_SECTION_LOOP_BODY16:%.*]], label [[OMP_SECTION_LOOP_EXIT18:%.*]]
// CHECK3: omp_section_loop.body16:
// CHECK3-NEXT: [[TMP13:%.*]] = add i32 [[OMP_SECTION_LOOP_IV20]], [[TMP9]]
// CHECK3-NEXT: [[TMP14:%.*]] = mul i32 [[TMP13]], 1
// CHECK3-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], 0
// CHECK3-NEXT: switch i32 [[TMP15]], label [[OMP_SECTION_LOOP_BODY16_SECTIONS_AFTER:%.*]] [
// CHECK3-NEXT: i32 0, label [[OMP_SECTION_LOOP_BODY_CASE23:%.*]]
// CHECK3-NEXT: i32 1, label [[OMP_SECTION_LOOP_BODY_CASE25:%.*]]
// CHECK3-NEXT: ]
// CHECK3: omp_section_loop.body.case23:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM24:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM24]], i32 3)
// CHECK3-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 0
// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_SECTION_LOOP_BODY_CASE23_SPLIT:%.*]], label [[OMP_SECTION_LOOP_BODY_CASE23_CNCL:%.*]]
// CHECK3: omp_section_loop.body.case23.split:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_BODY_CASE23_SECTION_AFTER:%.*]]
// CHECK3: omp_section_loop.body.case23.section.after:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_BODY16_SECTIONS_AFTER]]
// CHECK3: omp_section_loop.body.case25:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM27:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM27]], i32 3)
// CHECK3-NEXT: [[TMP19:%.*]] = icmp eq i32 [[TMP18]], 0
// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_SECTION_LOOP_BODY_CASE25_SPLIT:%.*]], label [[OMP_SECTION_LOOP_BODY_CASE25_CNCL:%.*]]
// CHECK3: omp_section_loop.body.case25.split:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_BODY_CASE25_SECTION_AFTER26:%.*]]
// CHECK3: omp_section_loop.body.case25.section.after26:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_BODY_CASE25_SECTION_AFTER:%.*]]
// CHECK3: omp_section_loop.body.case25.section.after:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_BODY16_SECTIONS_AFTER]]
// CHECK3: omp_section_loop.body16.sections.after:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_INC17]]
// CHECK3: omp_section_loop.inc17:
// CHECK3-NEXT: [[OMP_SECTION_LOOP_NEXT22]] = add nuw i32 [[OMP_SECTION_LOOP_IV20]], 1
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_HEADER14]]
// CHECK3: omp_section_loop.exit18:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM32]])
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM33:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[OMP_GLOBAL_THREAD_NUM33]])
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_AFTER19:%.*]]
// CHECK3: omp_section_loop.after19:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_AFTER19SECTIONS_FINI:%.*]]
// CHECK3: omp_section_loop.after19sections.fini:
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP21]], 0
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK3-NEXT: [[SUB35:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK3-NEXT: store i32 [[SUB35]], ptr [[DOTCAPTURE_EXPR_34]], align 4
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP22]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK3: omp.precond.then:
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4
// CHECK3-NEXT: store i32 [[TMP23]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM37:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM37]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4
// CHECK3-NEXT: [[CMP38:%.*]] = icmp sgt i32 [[TMP24]], [[TMP25]]
// CHECK3-NEXT: br i1 [[CMP38]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP26]], [[COND_TRUE]] ], [ [[TMP27]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP28]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP39:%.*]] = icmp sle i32 [[TMP29]], [[TMP30]]
// CHECK3-NEXT: br i1 [[CMP39]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP31]], 1
// CHECK3-NEXT: [[ADD40:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD40]], ptr [[I36]], align 4
// CHECK3-NEXT: [[TMP32:%.*]] = load float, ptr @flag, align 4
// CHECK3-NEXT: [[TOBOOL41:%.*]] = fcmp une float [[TMP32]], 0.000000e+00
// CHECK3-NEXT: br i1 [[TOBOOL41]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK3: omp_if.then:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM42:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8:[0-9]+]])
// CHECK3-NEXT: [[TMP33:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM42]], i32 2)
// CHECK3-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK3-NEXT: br i1 [[TMP34]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK3: .cancel.exit:
// CHECK3-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK3: omp_section_loop.body.case.cncl:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_EXIT]]
// CHECK3: omp_section_loop.body.case23.cncl:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_EXIT18]]
// CHECK3: omp_section_loop.body.case25.cncl:
// CHECK3-NEXT: br label [[OMP_SECTION_LOOP_EXIT18]]
// CHECK3: .cancel.continue:
// CHECK3-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK3: omp_if.else:
// CHECK3-NEXT: br label [[OMP_IF_END]]
// CHECK3: omp_if.end:
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP35]], 1
// CHECK3-NEXT: store i32 [[ADD43]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM45:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM45]])
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: cancel.exit:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM44:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM44]])
// CHECK3-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: br label [[CANCEL_CONT]]
// CHECK3: cancel.cont:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM46:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[OMP_GLOBAL_THREAD_NUM46]])
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM47:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB14:[0-9]+]])
// CHECK3-NEXT: [[TMP36:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM47]], i32 1, i64 40, i64 1, ptr @.omp_task_entry.)
// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP36]], i32 0, i32 0
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM48:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB14]])
// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM48]], ptr [[TMP36]])
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @main.omp_outlined)
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @main.omp_outlined.1)
// CHECK3-NEXT: store i32 0, ptr [[R]], align 4
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @main.omp_outlined.2, ptr [[ARGC_ADDR]], ptr [[R]])
// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
// CHECK3-NEXT: ret i32 [[TMP39]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@main..omp_par
// CHECK3-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[TMP0:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK3-NEXT: omp.par.entry:
// CHECK3-NEXT: [[GEP_ARGC_ADDR:%.*]] = getelementptr { ptr, ptr }, ptr [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[LOADGEP_ARGC_ADDR:%.*]] = load ptr, ptr [[GEP_ARGC_ADDR]], align 8
// CHECK3-NEXT: [[GEP_ARGV_ADDR:%.*]] = getelementptr { ptr, ptr }, ptr [[TMP0]], i32 0, i32 1
// CHECK3-NEXT: [[LOADGEP_ARGV_ADDR:%.*]] = load ptr, ptr [[GEP_ARGV_ADDR]], align 8
// CHECK3-NEXT: [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TID_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TID_ADDR_LOCAL]], align 4
// CHECK3-NEXT: [[TID:%.*]] = load i32, ptr [[TID_ADDR_LOCAL]], align 4
// CHECK3-NEXT: br label [[OMP_PAR_REGION:%.*]]
// CHECK3: omp.par.region:
// CHECK3-NEXT: [[TMP2:%.*]] = load float, ptr @flag, align 4
// CHECK3-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP2]], 0.000000e+00
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[TMP14:%.*]], label [[TMP3:%.*]]
// CHECK3: 3:
// CHECK3-NEXT: br label [[TMP4:%.*]]
// CHECK3: 4:
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[LOADGEP_ARGC_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8
// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[LOADGEP_ARGV_ADDR]], align 8
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP6]], i64 0
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
// CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 0
// CHECK3-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX3]], align 1
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM4]])
// CHECK3-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
// CHECK3-NEXT: br i1 [[TMP9]], label [[DOTCONT:%.*]], label [[DOTCNCL5:%.*]]
// CHECK3: .cncl5:
// CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]]
// CHECK3: .cont:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[LOADGEP_ARGC_ADDR]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[LOADGEP_ARGV_ADDR]], align 8
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds ptr, ptr [[TMP11]], i64 0
// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX6]], align 8
// CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 0
// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX7]], align 1
// CHECK3-NEXT: [[CONV8:%.*]] = sext i8 [[TMP13]] to i32
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK3-NEXT: [[CONV9:%.*]] = trunc i32 [[ADD]] to i8
// CHECK3-NEXT: store i8 [[CONV9]], ptr [[ARRAYIDX7]], align 1
// CHECK3-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]]
// CHECK3: omp.par.region.parallel.after:
// CHECK3-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]]
// CHECK3: omp.par.pre_finalize:
// CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB]]
// CHECK3: 14:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], i32 1)
// CHECK3-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 0
// CHECK3-NEXT: br i1 [[TMP16]], label [[DOTSPLIT:%.*]], label [[DOTCNCL:%.*]]
// CHECK3: .cncl:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_cancel_barrier(ptr @[[GLOB2]], i32 [[OMP_GLOBAL_THREAD_NUM2]])
// CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB]]
// CHECK3: .split:
// CHECK3-NEXT: br label [[TMP4]]
// CHECK3: omp.par.exit.exitStub:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK3-SAME: (i32 noundef [[TMP0:%.*]], ptr noalias noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[CLEANUP_DEST_SLOT_I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias [[META12:![0-9]+]]
// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias [[META12]]
// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias [[META12]]
// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias [[META12]]
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias [[META12]]
// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias [[META12]]
// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias [[META12]]
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12:[0-9]+]])
// CHECK3-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], i32 4)
// CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
// CHECK3-NEXT: br i1 [[TMP10]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]]
// CHECK3: .cancel.exit.i:
// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias [[META12]]
// CHECK3-NEXT: br label [[DOTOMP_OUTLINED__EXIT:%.*]]
// CHECK3: .cancel.continue.i:
// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias [[META12]]
// CHECK3-NEXT: br label [[DOTOMP_OUTLINED__EXIT]]
// CHECK3: .omp_outlined..exit:
// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias [[META12]]
// CHECK3-NEXT: ret i32 0
//
//
// CHECK3-LABEL: define {{[^@]+}}@main.omp_outlined
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR5:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB17:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB15:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = icmp slt i32 [[TMP0]], 0
// CHECK3-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[TMP0]], i32 0
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: switch i32 [[TMP6]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.sections.case:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], i32 3)
// CHECK3-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
// CHECK3-NEXT: br i1 [[TMP8]], label [[DOTOMP_SECTIONS_CASE_SPLIT:%.*]], label [[DOTOMP_SECTIONS_CASE_CNCL:%.*]]
// CHECK3: .omp.sections.case.split:
// CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK3: .omp.sections.case.cncl:
// CHECK3-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK3: .omp.sections.exit:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK3-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB19:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB15]], i32 [[OMP_GLOBAL_THREAD_NUM3]])
// CHECK3-NEXT: br label [[CANCEL_CONT]]
// CHECK3: cancel.cont:
// CHECK3-NEXT: ret void
// CHECK3: cancel.exit:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB19]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB15]], i32 [[OMP_GLOBAL_THREAD_NUM2]])
// CHECK3-NEXT: br label [[CANCEL_CONT]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@main.omp_outlined.1
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB15]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = icmp slt i32 [[TMP0]], 1
// CHECK3-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[TMP0]], i32 1
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: switch i32 [[TMP6]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK3-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.sections.case:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], i32 3)
// CHECK3-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
// CHECK3-NEXT: br i1 [[TMP8]], label [[DOTOMP_SECTIONS_CASE_SPLIT:%.*]], label [[DOTOMP_SECTIONS_CASE_CNCL:%.*]]
// CHECK3: .omp.sections.case.split:
// CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK3: .omp.sections.case.cncl:
// CHECK3-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK3: .omp.sections.case2:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]], i32 3)
// CHECK3-NEXT: [[TMP10:%.*]] = icmp eq i32 [[TMP9]], 0
// CHECK3-NEXT: br i1 [[TMP10]], label [[DOTOMP_SECTIONS_CASE2_SPLIT:%.*]], label [[DOTOMP_SECTIONS_CASE2_CNCL:%.*]]
// CHECK3: .omp.sections.case2.split:
// CHECK3-NEXT: br label [[DOTOMP_SECTIONS_CASE2_SECTION_AFTER:%.*]]
// CHECK3: .omp.sections.case2.section.after:
// CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK3: .omp.sections.case2.cncl:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_END]]
// CHECK3: .omp.sections.exit:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK3-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM5:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB15]], i32 [[OMP_GLOBAL_THREAD_NUM5]])
// CHECK3-NEXT: br label [[CANCEL_CONT]]
// CHECK3: cancel.cont:
// CHECK3-NEXT: ret void
// CHECK3: cancel.exit:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB15]], i32 [[OMP_GLOBAL_THREAD_NUM4]])
// CHECK3-NEXT: br label [[CANCEL_CONT]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@main.omp_outlined.2
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[R:%.*]]) #[[ATTR5]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[R_ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[R3:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8
// CHECK3-NEXT: store ptr [[R]], ptr [[R_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[R_ADDR]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK3: omp.precond.then:
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: store i32 0, ptr [[R3]], align 4
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB25:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM7:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]])
// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM7]], i32 2)
// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK3-NEXT: br i1 [[TMP15]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK3: .cancel.exit:
// CHECK3-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK3: .cancel.continue:
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I4]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[R3]], align 4
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP17]], [[TMP16]]
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[R3]], align 4
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB29:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM11]])
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK3-NEXT: store ptr [[R3]], ptr [[TMP19]], align 8
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM12:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB29]])
// CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB30:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM12]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @main.omp_outlined.2.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[R3]], align 4
// CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK3-NEXT: store i32 [[ADD13]], ptr [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB30]], i32 [[OMP_GLOBAL_THREAD_NUM12]], ptr @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: cancel.exit:
// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB29]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM10]])
// CHECK3-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[R3]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = atomicrmw add ptr [[TMP1]], i32 [[TMP23]] monotonic, align 4
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: br label [[CANCEL_CONT]]
// CHECK3: cancel.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@main.omp_outlined.2.omp.reduction.reduction_func
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
// CHECK3-NEXT: ret void
//