llvm-project/clang/test/OpenMP/target_parallel_codegen.cpp
Roman Lebedev 16d0381841
Return "[CGCall] Annotate this argument with alignment"
The original change was reverted because it was discovered
that clang mishandles thunks, and they receive wrong
attributes for their this/return types - the ones for the function
they will call, not the ones they have.

While i have tried to fix this in https://reviews.llvm.org/D100388
that patch has been up and stuck for a month now,
with little signs of progress.

So while it will be good to solve this for real,
for now we can simply avoid introducing the bug,
by not annotating this/return for thunks.

This reverts commit 6270b3a1eafaba4279e021418c5a2c5a35abc002,
relanding 0aa0458f1429372038ca6a4edc7e94c96cd9a753.
2021-05-13 20:33:14 +03:00

15201 lines
1.0 MiB

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test host codegen.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK15
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16
// Test host codegen.
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK17
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK18
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK19
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK20
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK21
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK22
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK23
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK24
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK25
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK26
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK27
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK28
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK29
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK30
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK31
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK32
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// We have 8 target regions, but only 6 that actually will generate offloading
// code and have mapped arguments, and only 4 have all-constant map sizes.
// Check target registration is registered as a Ctor.
template<typename tx, typename ty>
struct TT{
tx X;
ty Y;
};
int foo(int n) {
int a = 0;
short aa = 0;
float b[10];
float bn[n];
double c[5][10];
double cn[5][n];
TT<long long, char> d;
#pragma omp target parallel nowait
{
}
#pragma omp target parallel if(target: 0)
{
a += 1;
}
#pragma omp target parallel if(target: 1)
{
aa += 1;
#pragma omp cancel parallel
}
#pragma omp target parallel if(target: n>10)
{
a += 1;
aa += 1;
}
// We capture 3 VLA sizes in this target region
// The names below are not necessarily consistent with the names used for the
// addresses above as some are repeated.
#pragma omp target parallel if(target: n>20)
{
a += 1;
b[2] += 1.0;
bn[3] += 1.0;
c[1][2] += 1.0;
cn[1][3] += 1.0;
d.X += 1;
d.Y += 1;
}
return a;
}
// Check that the offloading functions are emitted and that the arguments are
// correct and loaded correctly for the target regions in foo().
// Create stack storage and store argument in there.
// Create stack storage and store argument in there.
// Create stack storage and store argument in there.
// Create local storage for each capture.
// To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
template<typename tx>
tx ftemplate(int n) {
tx a = 0;
short aa = 0;
tx b[10];
#pragma omp target parallel if(target: n>40)
{
a += 1;
aa += 1;
b[2] += 1;
}
return a;
}
static
int fstatic(int n) {
int a = 0;
short aa = 0;
char aaa = 0;
int b[10];
#pragma omp target parallel if(target: n>50)
{
a += 1;
aa += 1;
aaa += 1;
b[2] += 1;
}
return a;
}
struct S1 {
double a;
int r1(int n){
int b = n+1;
short int c[2][n];
#pragma omp target parallel if(target: n>60)
{
this->a = (double)b + 1.5;
c[1][1] = ++a;
}
return c[1][1] + (int)b;
}
};
int bar(int n){
int a = 0;
a += foo(n);
S1 S;
a += S.r1(n);
a += fstatic(n);
a += ftemplate<int>(n);
return a;
}
// We capture 2 VLA sizes in this target region
// The names below are not necessarily consistent with the names used for the
// addresses above as some are repeated.
// Check that the offloading functions are emitted and that the arguments are
// correct and loaded correctly for the target regions of the callees of bar().
// Create local storage for each capture.
// Store captures in the context.
// To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
// Create local storage for each capture.
// Store captures in the context.
// To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
// Create local storage for each capture.
// Store captures in the context.
// To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
#endif
// CHECK1-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK1-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[A_CASTED3:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED5:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[A_CASTED12:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
// CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
// CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP7]])
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP12]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2
// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
// CHECK1-NEXT: store i64 [[TMP14]], i64* [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK1-NEXT: store i64 [[TMP14]], i64* [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP14]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[A_CASTED3]] to i32*
// CHECK1-NEXT: store i32 [[TMP24]], i32* [[CONV4]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i64, i64* [[A_CASTED3]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = load i16, i16* [[AA]], align 2
// CHECK1-NEXT: [[CONV6:%.*]] = bitcast i64* [[AA_CASTED5]] to i16*
// CHECK1-NEXT: store i16 [[TMP26]], i16* [[CONV6]], align 2
// CHECK1-NEXT: [[TMP27:%.*]] = load i64, i64* [[AA_CASTED5]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 10
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK1-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
// CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
// CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64*
// CHECK1-NEXT: store i64 [[TMP27]], i64* [[TMP35]], align 8
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK1-NEXT: store i64 [[TMP27]], i64* [[TMP37]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP38]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK1-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK1-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
// CHECK1-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]]
// CHECK1: omp_offload.failed10:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT11]]
// CHECK1: omp_offload.cont11:
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: [[TMP43:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
// CHECK1-NEXT: store i32 [[TMP43]], i32* [[CONV13]], align 4
// CHECK1-NEXT: [[TMP44:%.*]] = load i64, i64* [[A_CASTED12]], align 8
// CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CMP14:%.*]] = icmp sgt i32 [[TMP45]], 20
// CHECK1-NEXT: br i1 [[CMP14]], label [[OMP_IF_THEN15:%.*]], label [[OMP_IF_ELSE21:%.*]]
// CHECK1: omp_if.then15:
// CHECK1-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP2]], 4
// CHECK1-NEXT: [[TMP47:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK1-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK1-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64*
// CHECK1-NEXT: store i64 [[TMP44]], i64* [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK1-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64*
// CHECK1-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK1-NEXT: store i64 4, i64* [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP54]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
// CHECK1-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8
// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
// CHECK1-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK1-NEXT: store i64 40, i64* [[TMP59]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP60]], align 8
// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2
// CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2
// CHECK1-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK1-NEXT: store i64 8, i64* [[TMP65]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP66]], align 8
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3
// CHECK1-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK1-NEXT: store float* [[VLA]], float** [[TMP68]], align 8
// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3
// CHECK1-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK1-NEXT: store float* [[VLA]], float** [[TMP70]], align 8
// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK1-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3
// CHECK1-NEXT: store i8* null, i8** [[TMP72]], align 8
// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4
// CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4
// CHECK1-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8
// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK1-NEXT: store i64 400, i64* [[TMP77]], align 8
// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4
// CHECK1-NEXT: store i8* null, i8** [[TMP78]], align 8
// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5
// CHECK1-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
// CHECK1-NEXT: store i64 5, i64* [[TMP80]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5
// CHECK1-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
// CHECK1-NEXT: store i64 5, i64* [[TMP82]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK1-NEXT: store i64 8, i64* [[TMP83]], align 8
// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5
// CHECK1-NEXT: store i8* null, i8** [[TMP84]], align 8
// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6
// CHECK1-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64*
// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8
// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6
// CHECK1-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK1-NEXT: store i64 8, i64* [[TMP89]], align 8
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6
// CHECK1-NEXT: store i8* null, i8** [[TMP90]], align 8
// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7
// CHECK1-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8
// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7
// CHECK1-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8
// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK1-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8
// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7
// CHECK1-NEXT: store i8* null, i8** [[TMP96]], align 8
// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8
// CHECK1-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8
// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8
// CHECK1-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8
// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK1-NEXT: store i64 16, i64* [[TMP101]], align 8
// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8
// CHECK1-NEXT: store i8* null, i8** [[TMP102]], align 8
// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK1-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK1-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK1-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
// CHECK1: omp_offload.failed19:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT20]]
// CHECK1: omp_offload.cont20:
// CHECK1-NEXT: br label [[OMP_IF_END22:%.*]]
// CHECK1: omp_if.else21:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_IF_END22]]
// CHECK1: omp_if.end22:
// CHECK1-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK1-NEXT: ret i32 [[TMP108]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK1-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !20
// CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !20
// CHECK1-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !20
// CHECK1-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !20
// CHECK1-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !20
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK1: omp_offload.failed.i:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK1: .omp_outlined..1.exit:
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK1-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK1-SAME: (i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK1-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK1-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK1-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK1-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK1-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK1-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK1-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK1-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK1-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK1-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK1-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK1-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK1-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK1-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK1-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK1-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK1-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK1-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK1-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK1-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK1-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK1-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK1-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK1-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK1-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK1-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK1-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z3bari
// CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK1-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: ret i32 [[TMP8]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK1-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
// CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK1-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
// CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
// CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
// CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]]
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32
// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]]
// CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK1-NEXT: ret i32 [[ADD4]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK1-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK1-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: ret i32 [[TMP31]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
// CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: ret i32 [[TMP24]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK1-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK1-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK1-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK1-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK1-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK1-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK1-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK1-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK1-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK1-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK1-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK1-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK1-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK1-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK1-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK2-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK2-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[A_CASTED3:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED5:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK2-NEXT: [[A_CASTED12:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK2-NEXT: store i32 0, i32* [[A]], align 4
// CHECK2-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK2-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK2-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK2-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
// CHECK2-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK2-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
// CHECK2-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
// CHECK2-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP7]])
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP12]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2
// CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2
// CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
// CHECK2-NEXT: store i64 [[TMP14]], i64* [[TMP16]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK2-NEXT: store i64 [[TMP14]], i64* [[TMP18]], align 8
// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK2: omp_offload.failed:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP14]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK2: omp_offload.cont:
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[CONV4:%.*]] = bitcast i64* [[A_CASTED3]] to i32*
// CHECK2-NEXT: store i32 [[TMP24]], i32* [[CONV4]], align 4
// CHECK2-NEXT: [[TMP25:%.*]] = load i64, i64* [[A_CASTED3]], align 8
// CHECK2-NEXT: [[TMP26:%.*]] = load i16, i16* [[AA]], align 2
// CHECK2-NEXT: [[CONV6:%.*]] = bitcast i64* [[AA_CASTED5]] to i16*
// CHECK2-NEXT: store i16 [[TMP26]], i16* [[CONV6]], align 2
// CHECK2-NEXT: [[TMP27:%.*]] = load i64, i64* [[AA_CASTED5]], align 8
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 10
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK2: omp_if.then:
// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK2-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
// CHECK2-NEXT: store i64 [[TMP25]], i64* [[TMP30]], align 8
// CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK2-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
// CHECK2-NEXT: store i64 [[TMP25]], i64* [[TMP32]], align 8
// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64*
// CHECK2-NEXT: store i64 [[TMP27]], i64* [[TMP35]], align 8
// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK2-NEXT: store i64 [[TMP27]], i64* [[TMP37]], align 8
// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK2-NEXT: store i8* null, i8** [[TMP38]], align 8
// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK2-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK2-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
// CHECK2-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]]
// CHECK2: omp_offload.failed10:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT11]]
// CHECK2: omp_offload.cont11:
// CHECK2-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK2: omp_if.else:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_IF_END]]
// CHECK2: omp_if.end:
// CHECK2-NEXT: [[TMP43:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
// CHECK2-NEXT: store i32 [[TMP43]], i32* [[CONV13]], align 4
// CHECK2-NEXT: [[TMP44:%.*]] = load i64, i64* [[A_CASTED12]], align 8
// CHECK2-NEXT: [[TMP45:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CMP14:%.*]] = icmp sgt i32 [[TMP45]], 20
// CHECK2-NEXT: br i1 [[CMP14]], label [[OMP_IF_THEN15:%.*]], label [[OMP_IF_ELSE21:%.*]]
// CHECK2: omp_if.then15:
// CHECK2-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP2]], 4
// CHECK2-NEXT: [[TMP47:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK2-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], 8
// CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK2-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64*
// CHECK2-NEXT: store i64 [[TMP44]], i64* [[TMP50]], align 8
// CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK2-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64*
// CHECK2-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8
// CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK2-NEXT: store i64 4, i64* [[TMP53]], align 8
// CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP54]], align 8
// CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
// CHECK2-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8
// CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
// CHECK2-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8
// CHECK2-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK2-NEXT: store i64 40, i64* [[TMP59]], align 8
// CHECK2-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
// CHECK2-NEXT: store i8* null, i8** [[TMP60]], align 8
// CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2
// CHECK2-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8
// CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2
// CHECK2-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8
// CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK2-NEXT: store i64 8, i64* [[TMP65]], align 8
// CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2
// CHECK2-NEXT: store i8* null, i8** [[TMP66]], align 8
// CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3
// CHECK2-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK2-NEXT: store float* [[VLA]], float** [[TMP68]], align 8
// CHECK2-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3
// CHECK2-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK2-NEXT: store float* [[VLA]], float** [[TMP70]], align 8
// CHECK2-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK2-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8
// CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3
// CHECK2-NEXT: store i8* null, i8** [[TMP72]], align 8
// CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4
// CHECK2-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8
// CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4
// CHECK2-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8
// CHECK2-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK2-NEXT: store i64 400, i64* [[TMP77]], align 8
// CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4
// CHECK2-NEXT: store i8* null, i8** [[TMP78]], align 8
// CHECK2-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5
// CHECK2-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
// CHECK2-NEXT: store i64 5, i64* [[TMP80]], align 8
// CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5
// CHECK2-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
// CHECK2-NEXT: store i64 5, i64* [[TMP82]], align 8
// CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK2-NEXT: store i64 8, i64* [[TMP83]], align 8
// CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5
// CHECK2-NEXT: store i8* null, i8** [[TMP84]], align 8
// CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6
// CHECK2-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64*
// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8
// CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6
// CHECK2-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8
// CHECK2-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK2-NEXT: store i64 8, i64* [[TMP89]], align 8
// CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6
// CHECK2-NEXT: store i8* null, i8** [[TMP90]], align 8
// CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7
// CHECK2-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8
// CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7
// CHECK2-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8
// CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK2-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8
// CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7
// CHECK2-NEXT: store i8* null, i8** [[TMP96]], align 8
// CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8
// CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8
// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8
// CHECK2-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8
// CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK2-NEXT: store i64 16, i64* [[TMP101]], align 8
// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8
// CHECK2-NEXT: store i8* null, i8** [[TMP102]], align 8
// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK2-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK2-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK2-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
// CHECK2: omp_offload.failed19:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT20]]
// CHECK2: omp_offload.cont20:
// CHECK2-NEXT: br label [[OMP_IF_END22:%.*]]
// CHECK2: omp_if.else21:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_IF_END22]]
// CHECK2: omp_if.end22:
// CHECK2-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK2-NEXT: ret i32 [[TMP108]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK2-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK2-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK2-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
// CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK2-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]])
// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]])
// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !20
// CHECK2-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !20
// CHECK2-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !20
// CHECK2-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !20
// CHECK2-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !20
// CHECK2-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK2-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK2-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK2-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK2: omp_offload.failed.i:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK2-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK2: .omp_outlined..1.exit:
// CHECK2-NEXT: ret i32 0
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK2-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK2-SAME: (i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK2-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK2-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK2-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK2: .cancel.exit:
// CHECK2-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK2: .cancel.continue:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK2-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK2-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK2-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK2-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK2-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK2-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK2-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK2-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK2-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK2-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK2-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK2-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK2-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK2-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK2-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK2-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK2-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK2-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK2-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK2-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK2-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK2-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK2-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK2-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK2-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK2-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK2-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK2-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK2-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK2-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK2-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK2-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK2-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK2-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK2-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z3bari
// CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK2-NEXT: store i32 0, i32* [[A]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK2-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK2-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK2-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: ret i32 [[TMP8]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK2-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
// CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK2-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK2-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK2-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK2: omp_if.then:
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
// CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK2-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8
// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
// CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
// CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8
// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8
// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8
// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8
// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8
// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8
// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8
// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8
// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
// CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK2: omp_offload.failed:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK2: omp_offload.cont:
// CHECK2-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK2: omp_if.else:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_IF_END]]
// CHECK2: omp_if.end:
// CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]]
// CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32
// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]]
// CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK2-NEXT: ret i32 [[ADD4]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
// CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK2-NEXT: store i32 0, i32* [[A]], align 4
// CHECK2-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK2-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK2-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK2: omp_if.then:
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8
// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK2-NEXT: store i8* null, i8** [[TMP16]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8
// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK2-NEXT: store i8* null, i8** [[TMP26]], align 8
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK2-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK2-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK2: omp_offload.failed:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK2: omp_offload.cont:
// CHECK2-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK2: omp_if.else:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_IF_END]]
// CHECK2: omp_if.end:
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: ret i32 [[TMP31]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK2-NEXT: store i32 0, i32* [[A]], align 4
// CHECK2-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK2: omp_if.then:
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
// CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK2: omp_offload.failed:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK2: omp_offload.cont:
// CHECK2-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK2: omp_if.else:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK2-NEXT: br label [[OMP_IF_END]]
// CHECK2: omp_if.end:
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK2-NEXT: ret i32 [[TMP24]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK2-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK2-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK2-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK2-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK2-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK2-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK2-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK2-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK2-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK2-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK2-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK2-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK2-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK2-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK2-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK2-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK2-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK2-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK2-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK2-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK2-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK2-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK3-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK3-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED3:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[A_CASTED10:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[A]], align 4
// CHECK3-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
// CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP5]])
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP10]]) #[[ATTR3:[0-9]+]]
// CHECK3-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK3-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
// CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
// CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP16]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP18]], i8** [[TMP19]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK3-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP12]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: store i32 [[TMP22]], i32* [[A_CASTED2]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[A_CASTED2]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = load i16, i16* [[AA]], align 2
// CHECK3-NEXT: [[CONV4:%.*]] = bitcast i32* [[AA_CASTED3]] to i16*
// CHECK3-NEXT: store i16 [[TMP24]], i16* [[CONV4]], align 2
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[AA_CASTED3]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP26]], 10
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK3: omp_if.then:
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
// CHECK3-NEXT: store i32 [[TMP23]], i32* [[TMP28]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK3-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
// CHECK3-NEXT: store i32 [[TMP23]], i32* [[TMP30]], align 4
// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK3-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32*
// CHECK3-NEXT: store i32 [[TMP25]], i32* [[TMP33]], align 4
// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32*
// CHECK3-NEXT: store i32 [[TMP25]], i32* [[TMP35]], align 4
// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP36]], align 4
// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK3-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK3-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
// CHECK3-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
// CHECK3: omp_offload.failed8:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT9]]
// CHECK3: omp_offload.cont9:
// CHECK3-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK3: omp_if.else:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_IF_END]]
// CHECK3: omp_if.end:
// CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: store i32 [[TMP41]], i32* [[A_CASTED10]], align 4
// CHECK3-NEXT: [[TMP42:%.*]] = load i32, i32* [[A_CASTED10]], align 4
// CHECK3-NEXT: [[TMP43:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[TMP43]], 20
// CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_IF_THEN12:%.*]], label [[OMP_IF_ELSE18:%.*]]
// CHECK3: omp_if.then12:
// CHECK3-NEXT: [[TMP44:%.*]] = mul nuw i32 [[TMP1]], 4
// CHECK3-NEXT: [[TMP45:%.*]] = sext i32 [[TMP44]] to i64
// CHECK3-NEXT: [[TMP46:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK3-NEXT: [[TMP47:%.*]] = mul nuw i32 [[TMP46]], 8
// CHECK3-NEXT: [[TMP48:%.*]] = sext i32 [[TMP47]] to i64
// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK3-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32*
// CHECK3-NEXT: store i32 [[TMP42]], i32* [[TMP50]], align 4
// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK3-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32*
// CHECK3-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4
// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK3-NEXT: store i64 4, i64* [[TMP53]], align 4
// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1
// CHECK3-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4
// CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1
// CHECK3-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4
// CHECK3-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK3-NEXT: store i64 40, i64* [[TMP59]], align 4
// CHECK3-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP60]], align 4
// CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2
// CHECK3-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4
// CHECK3-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2
// CHECK3-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4
// CHECK3-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK3-NEXT: store i64 4, i64* [[TMP65]], align 4
// CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2
// CHECK3-NEXT: store i8* null, i8** [[TMP66]], align 4
// CHECK3-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3
// CHECK3-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK3-NEXT: store float* [[VLA]], float** [[TMP68]], align 4
// CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3
// CHECK3-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK3-NEXT: store float* [[VLA]], float** [[TMP70]], align 4
// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK3-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4
// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3
// CHECK3-NEXT: store i8* null, i8** [[TMP72]], align 4
// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4
// CHECK3-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4
// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4
// CHECK3-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4
// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK3-NEXT: store i64 400, i64* [[TMP77]], align 4
// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4
// CHECK3-NEXT: store i8* null, i8** [[TMP78]], align 4
// CHECK3-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5
// CHECK3-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK3-NEXT: store i32 5, i32* [[TMP80]], align 4
// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5
// CHECK3-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32*
// CHECK3-NEXT: store i32 5, i32* [[TMP82]], align 4
// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK3-NEXT: store i64 4, i64* [[TMP83]], align 4
// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5
// CHECK3-NEXT: store i8* null, i8** [[TMP84]], align 4
// CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6
// CHECK3-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32*
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4
// CHECK3-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6
// CHECK3-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32*
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4
// CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK3-NEXT: store i64 4, i64* [[TMP89]], align 4
// CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6
// CHECK3-NEXT: store i8* null, i8** [[TMP90]], align 4
// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7
// CHECK3-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4
// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7
// CHECK3-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4
// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK3-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4
// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7
// CHECK3-NEXT: store i8* null, i8** [[TMP96]], align 4
// CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8
// CHECK3-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4
// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8
// CHECK3-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4
// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK3-NEXT: store i64 12, i64* [[TMP101]], align 4
// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8
// CHECK3-NEXT: store i8* null, i8** [[TMP102]], align 4
// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK3-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK3-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK3-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK3: omp_offload.failed16:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK3: omp_offload.cont17:
// CHECK3-NEXT: br label [[OMP_IF_END19:%.*]]
// CHECK3: omp_if.else18:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_IF_END19]]
// CHECK3: omp_if.end19:
// CHECK3-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK3-NEXT: ret i32 [[TMP108]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK3-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK3-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
// CHECK3-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
// CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK3-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK3-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]])
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !21
// CHECK3-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !21
// CHECK3-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !21
// CHECK3-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !21
// CHECK3-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !21
// CHECK3-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK3-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK3-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK3-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK3: omp_offload.failed.i:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK3: .omp_outlined..1.exit:
// CHECK3-NEXT: ret i32 0
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK3-SAME: (i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK3-SAME: (i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK3-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK3-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK3-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK3-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK3: .cancel.exit:
// CHECK3-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK3: .cancel.continue:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK3-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK3-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK3-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK3-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK3-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK3-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK3-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK3-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK3-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK3-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK3-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK3-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK3-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK3-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK3-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK3-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK3-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK3-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK3-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK3-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK3-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK3-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK3-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK3-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK3-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK3-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK3-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK3-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z3bari
// CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[A]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK3-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK3-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: ret i32 [[TMP8]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK3-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
// CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK3-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK3: omp_if.then:
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK3-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
// CHECK3-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK3-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
// CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
// CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4
// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4
// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4
// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4
// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4
// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4
// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4
// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
// CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK3: omp_if.else:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_IF_END]]
// CHECK3: omp_if.end:
// CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]]
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32
// CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]]
// CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK3-NEXT: ret i32 [[ADD3]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK3-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
// CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[A]], align 4
// CHECK3-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK3-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK3-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK3: omp_if.then:
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP16]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK3: omp_if.else:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_IF_END]]
// CHECK3: omp_if.end:
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: ret i32 [[TMP31]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[A]], align 4
// CHECK3-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK3: omp_if.then:
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK3: omp_if.else:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_IF_END]]
// CHECK3: omp_if.end:
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK3-NEXT: ret i32 [[TMP24]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK3-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK3-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK3-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK3-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK3-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK3-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK3-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK3-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK3-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK3-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK3-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK3-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK3-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK3-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK3-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK4-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK4-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK4-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED3:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [2 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [2 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [2 x i8*], align 4
// CHECK4-NEXT: [[A_CASTED10:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[A]], align 4
// CHECK4-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK4-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK4-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
// CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
// CHECK4-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP5]])
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP10]]) #[[ATTR3:[0-9]+]]
// CHECK4-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK4-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
// CHECK4-NEXT: store i32 [[TMP12]], i32* [[TMP14]], align 4
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
// CHECK4-NEXT: store i32 [[TMP12]], i32* [[TMP16]], align 4
// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP18]], i8** [[TMP19]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK4-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK4-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK4: omp_offload.failed:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP12]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK4: omp_offload.cont:
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: store i32 [[TMP22]], i32* [[A_CASTED2]], align 4
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[A_CASTED2]], align 4
// CHECK4-NEXT: [[TMP24:%.*]] = load i16, i16* [[AA]], align 2
// CHECK4-NEXT: [[CONV4:%.*]] = bitcast i32* [[AA_CASTED3]] to i16*
// CHECK4-NEXT: store i16 [[TMP24]], i16* [[CONV4]], align 2
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[AA_CASTED3]], align 4
// CHECK4-NEXT: [[TMP26:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP26]], 10
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK4: omp_if.then:
// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
// CHECK4-NEXT: store i32 [[TMP23]], i32* [[TMP28]], align 4
// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK4-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
// CHECK4-NEXT: store i32 [[TMP23]], i32* [[TMP30]], align 4
// CHECK4-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK4-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32*
// CHECK4-NEXT: store i32 [[TMP25]], i32* [[TMP33]], align 4
// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32*
// CHECK4-NEXT: store i32 [[TMP25]], i32* [[TMP35]], align 4
// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 1
// CHECK4-NEXT: store i8* null, i8** [[TMP36]], align 4
// CHECK4-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK4-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK4-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
// CHECK4-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
// CHECK4: omp_offload.failed8:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT9]]
// CHECK4: omp_offload.cont9:
// CHECK4-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK4: omp_if.else:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_IF_END]]
// CHECK4: omp_if.end:
// CHECK4-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: store i32 [[TMP41]], i32* [[A_CASTED10]], align 4
// CHECK4-NEXT: [[TMP42:%.*]] = load i32, i32* [[A_CASTED10]], align 4
// CHECK4-NEXT: [[TMP43:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[TMP43]], 20
// CHECK4-NEXT: br i1 [[CMP11]], label [[OMP_IF_THEN12:%.*]], label [[OMP_IF_ELSE18:%.*]]
// CHECK4: omp_if.then12:
// CHECK4-NEXT: [[TMP44:%.*]] = mul nuw i32 [[TMP1]], 4
// CHECK4-NEXT: [[TMP45:%.*]] = sext i32 [[TMP44]] to i64
// CHECK4-NEXT: [[TMP46:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK4-NEXT: [[TMP47:%.*]] = mul nuw i32 [[TMP46]], 8
// CHECK4-NEXT: [[TMP48:%.*]] = sext i32 [[TMP47]] to i64
// CHECK4-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK4-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32*
// CHECK4-NEXT: store i32 [[TMP42]], i32* [[TMP50]], align 4
// CHECK4-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK4-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32*
// CHECK4-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4
// CHECK4-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK4-NEXT: store i64 4, i64* [[TMP53]], align 4
// CHECK4-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK4-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1
// CHECK4-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4
// CHECK4-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1
// CHECK4-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4
// CHECK4-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK4-NEXT: store i64 40, i64* [[TMP59]], align 4
// CHECK4-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1
// CHECK4-NEXT: store i8* null, i8** [[TMP60]], align 4
// CHECK4-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2
// CHECK4-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4
// CHECK4-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2
// CHECK4-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4
// CHECK4-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK4-NEXT: store i64 4, i64* [[TMP65]], align 4
// CHECK4-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2
// CHECK4-NEXT: store i8* null, i8** [[TMP66]], align 4
// CHECK4-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3
// CHECK4-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK4-NEXT: store float* [[VLA]], float** [[TMP68]], align 4
// CHECK4-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3
// CHECK4-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK4-NEXT: store float* [[VLA]], float** [[TMP70]], align 4
// CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK4-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4
// CHECK4-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3
// CHECK4-NEXT: store i8* null, i8** [[TMP72]], align 4
// CHECK4-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4
// CHECK4-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4
// CHECK4-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4
// CHECK4-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4
// CHECK4-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK4-NEXT: store i64 400, i64* [[TMP77]], align 4
// CHECK4-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4
// CHECK4-NEXT: store i8* null, i8** [[TMP78]], align 4
// CHECK4-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5
// CHECK4-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK4-NEXT: store i32 5, i32* [[TMP80]], align 4
// CHECK4-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5
// CHECK4-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32*
// CHECK4-NEXT: store i32 5, i32* [[TMP82]], align 4
// CHECK4-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK4-NEXT: store i64 4, i64* [[TMP83]], align 4
// CHECK4-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5
// CHECK4-NEXT: store i8* null, i8** [[TMP84]], align 4
// CHECK4-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6
// CHECK4-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32*
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4
// CHECK4-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6
// CHECK4-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32*
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4
// CHECK4-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK4-NEXT: store i64 4, i64* [[TMP89]], align 4
// CHECK4-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6
// CHECK4-NEXT: store i8* null, i8** [[TMP90]], align 4
// CHECK4-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7
// CHECK4-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4
// CHECK4-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7
// CHECK4-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4
// CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK4-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4
// CHECK4-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7
// CHECK4-NEXT: store i8* null, i8** [[TMP96]], align 4
// CHECK4-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8
// CHECK4-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4
// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8
// CHECK4-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4
// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK4-NEXT: store i64 12, i64* [[TMP101]], align 4
// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8
// CHECK4-NEXT: store i8* null, i8** [[TMP102]], align 4
// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK4-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK4-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK4-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK4: omp_offload.failed16:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK4: omp_offload.cont17:
// CHECK4-NEXT: br label [[OMP_IF_END19:%.*]]
// CHECK4: omp_if.else18:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_IF_END19]]
// CHECK4: omp_if.end19:
// CHECK4-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK4-NEXT: ret i32 [[TMP108]]
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK4-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK4-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK4-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
// CHECK4-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK4-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
// CHECK4-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK4-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK4-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]])
// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !21
// CHECK4-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !21
// CHECK4-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !21
// CHECK4-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !21
// CHECK4-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !21
// CHECK4-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK4-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK4-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK4-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK4: omp_offload.failed.i:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK4-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK4: .omp_outlined..1.exit:
// CHECK4-NEXT: ret i32 0
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK4-SAME: (i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK4-SAME: (i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK4-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK4-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK4-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK4-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK4-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK4: .cancel.exit:
// CHECK4-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK4: .cancel.continue:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK4-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK4-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK4-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK4-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK4-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK4-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK4-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK4-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK4-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK4-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK4-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK4-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK4-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK4-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK4-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK4-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK4-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK4-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK4-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK4-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK4-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK4-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK4-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK4-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK4-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK4-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK4-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK4-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK4-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK4-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK4-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK4-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK4-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK4-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK4-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z3bari
// CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[A]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK4-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK4-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK4-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: ret i32 [[TMP8]]
//
//
// CHECK4-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK4-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
// CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK4-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK4-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK4: omp_if.then:
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK4-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
// CHECK4-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
// CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK4-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4
// CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4
// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4
// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4
// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4
// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
// CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4
// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
// CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4
// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4
// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4
// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4
// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4
// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4
// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4
// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4
// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4
// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4
// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
// CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK4: omp_offload.failed:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK4: omp_offload.cont:
// CHECK4-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK4: omp_if.else:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_IF_END]]
// CHECK4: omp_if.end:
// CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]]
// CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32
// CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]]
// CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK4-NEXT: ret i32 [[ADD3]]
//
//
// CHECK4-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK4-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK4-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
// CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[A]], align 4
// CHECK4-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK4-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK4-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK4: omp_if.then:
// CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK4-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4
// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK4-NEXT: store i8* null, i8** [[TMP16]], align 4
// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4
// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4
// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4
// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK4-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK4-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK4: omp_offload.failed:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK4: omp_offload.cont:
// CHECK4-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK4: omp_if.else:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_IF_END]]
// CHECK4: omp_if.end:
// CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: ret i32 [[TMP31]]
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK4-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[A]], align 4
// CHECK4-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK4: omp_if.then:
// CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4
// CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4
// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK4: omp_offload.failed:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK4: omp_offload.cont:
// CHECK4-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK4: omp_if.else:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK4-NEXT: br label [[OMP_IF_END]]
// CHECK4: omp_if.end:
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK4-NEXT: ret i32 [[TMP24]]
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK4-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK4-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK4-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK4-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK4-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK4-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK4-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK4-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK4-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK4-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK4-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK4-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK4-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK4-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK4-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK4-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK4-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK4-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK4-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK4-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK4-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK4-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK5-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK5-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK5-NEXT: store i32 0, i32* [[A]], align 4
// CHECK5-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK5-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK5-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK5-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK5-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK5-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK5-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK5-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK5-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK5-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK5-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK5-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK5-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK5-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK5-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK5-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK5-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK5-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK5-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK5-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK5-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK5-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK5-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK5-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK5-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK5-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK5-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK5-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK5-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK5-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK5-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK5-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK5-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK5-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK5-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK5-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK5-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK5-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK5-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK5-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK5-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK5-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK5-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK5-NEXT: ret i32 [[TMP18]]
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z3bari
// CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK5-NEXT: store i32 0, i32* [[A]], align 4
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK5-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK5-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK5-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: ret i32 [[TMP8]]
//
//
// CHECK5-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK5-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK5-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK5-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK5-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK5-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK5-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK5-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK5-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK5-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK5-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK5-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK5-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK5-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK5-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK5-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK5-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK5-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK5-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK5-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK5-NEXT: ret i32 [[ADD9]]
//
//
// CHECK5-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK5-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK5-NEXT: store i32 0, i32* [[A]], align 4
// CHECK5-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK5-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK5-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK5-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK5-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK5-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK5-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK5-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK5-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: ret i32 [[TMP4]]
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK5-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK5-NEXT: store i32 0, i32* [[A]], align 4
// CHECK5-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK5-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK5-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK5-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: ret i32 [[TMP3]]
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK6-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK6-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK6-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK6-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK6-NEXT: store i32 0, i32* [[A]], align 4
// CHECK6-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK6-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK6-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK6-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK6-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK6-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK6-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK6-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK6-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK6-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK6-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK6-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK6-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK6-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK6-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK6-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK6-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK6-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK6-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK6-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK6-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK6-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK6-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK6-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK6-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK6-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK6-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK6-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK6-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK6-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK6-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK6-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK6-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK6-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK6-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK6-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK6-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK6-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK6-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK6-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK6-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK6-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK6-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK6-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK6-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK6-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK6-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK6-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK6-NEXT: ret i32 [[TMP18]]
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z3bari
// CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK6-NEXT: store i32 0, i32* [[A]], align 4
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK6-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK6-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK6-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: ret i32 [[TMP8]]
//
//
// CHECK6-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK6-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK6-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK6-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK6-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK6-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK6-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK6-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK6-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK6-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK6-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK6-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK6-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK6-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK6-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK6-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK6-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK6-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK6-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK6-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK6-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK6-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK6-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK6-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK6-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK6-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK6-NEXT: ret i32 [[ADD9]]
//
//
// CHECK6-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK6-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK6-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK6-NEXT: store i32 0, i32* [[A]], align 4
// CHECK6-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK6-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK6-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK6-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK6-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK6-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK6-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK6-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK6-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK6-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK6-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: ret i32 [[TMP4]]
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK6-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK6-NEXT: store i32 0, i32* [[A]], align 4
// CHECK6-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK6-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK6-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK6-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK6-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK6-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: ret i32 [[TMP3]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK7-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK7-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK7-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[A]], align 4
// CHECK7-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK7-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK7-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK7-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK7-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK7-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK7-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK7-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK7-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK7-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK7-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK7-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK7-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK7-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK7-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK7-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK7-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK7-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK7-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK7-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK7-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK7-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK7-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK7-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK7-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK7-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK7-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK7-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK7-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK7-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK7-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK7-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK7-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK7-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK7-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK7-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK7-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK7-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK7-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK7-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK7-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK7-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK7-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK7-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK7-NEXT: ret i32 [[TMP16]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z3bari
// CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[A]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK7-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK7-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK7-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: ret i32 [[TMP8]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK7-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK7-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK7-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK7-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK7-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK7-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK7-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK7-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK7-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK7-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK7-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK7-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK7-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK7-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK7-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK7-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK7-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK7-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK7-NEXT: ret i32 [[ADD9]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK7-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[A]], align 4
// CHECK7-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK7-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK7-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK7-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK7-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK7-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK7-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK7-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK7-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: ret i32 [[TMP4]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK7-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[A]], align 4
// CHECK7-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK7-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK7-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK7-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK7-NEXT: ret i32 [[TMP3]]
//
//
// CHECK8-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK8-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK8-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK8-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK8-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[A]], align 4
// CHECK8-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK8-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK8-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK8-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK8-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK8-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK8-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK8-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK8-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK8-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK8-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK8-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK8-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK8-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK8-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK8-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK8-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK8-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK8-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK8-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK8-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK8-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK8-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK8-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK8-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK8-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK8-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK8-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK8-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK8-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK8-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK8-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK8-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK8-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK8-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK8-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK8-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK8-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK8-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK8-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK8-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK8-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK8-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK8-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK8-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK8-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK8-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK8-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK8-NEXT: ret i32 [[TMP16]]
//
//
// CHECK8-LABEL: define {{[^@]+}}@_Z3bari
// CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[A]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK8-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK8-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK8-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: ret i32 [[TMP8]]
//
//
// CHECK8-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK8-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK8-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK8-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK8-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK8-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK8-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK8-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK8-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK8-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK8-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK8-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK8-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK8-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK8-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK8-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK8-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK8-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK8-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK8-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK8-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK8-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK8-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK8-NEXT: ret i32 [[ADD9]]
//
//
// CHECK8-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK8-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK8-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[A]], align 4
// CHECK8-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK8-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK8-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK8-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK8-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK8-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK8-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK8-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK8-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK8-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK8-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: ret i32 [[TMP4]]
//
//
// CHECK8-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK8-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[A]], align 4
// CHECK8-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK8-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK8-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK8-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK8-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK8-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK8-NEXT: ret i32 [[TMP3]]
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK9-SAME: (i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK9-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK9-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK9-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK9-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK9-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK9-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK9-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK9: .cancel.exit:
// CHECK9-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK9: .cancel.continue:
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK9-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK9-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK9-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK9-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK9-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK9-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK9-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK9-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK9-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK9-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK9-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK9-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK9-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK9-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK9-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK9-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK9-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK9-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK9-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK9-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK9-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK9-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK9-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK9-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK9-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK9-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK9-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK9-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK9-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK9-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK9-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK9-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK9-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK9-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK9-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK9-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK9-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK9-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK9-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK9-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK9-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK9-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK9-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK9-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK9-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK9-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK9-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK9-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK9-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK9-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK9-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK9-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK9-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK9-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK9-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK9-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK9-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK9-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK9-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK9-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK9-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK9-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK9-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK9-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK9-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK9-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK9-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK9-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK9-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK9-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK9-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK9-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK9-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK9-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK9-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK9-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK9-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK9-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK9-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK9-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK9-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK9-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK9-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK9-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK9-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK10-SAME: (i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK10-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK10-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK10-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK10-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK10-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK10-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK10-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK10: .cancel.exit:
// CHECK10-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK10: .cancel.continue:
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK10-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK10-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK10-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK10-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK10-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK10-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK10-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK10-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK10-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK10-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK10-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK10-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK10-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK10-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK10-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK10-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK10-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK10-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK10-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK10-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK10-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK10-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK10-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK10-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK10-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK10-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK10-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK10-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK10-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK10-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK10-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK10-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK10-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK10-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK10-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK10-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK10-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK10-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK10-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK10-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK10-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK10-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK10-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK10-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK10-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK10-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK10-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK10-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK10-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK10-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK10-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK10-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK10-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK10-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK10-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK10-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK10-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK10-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK10-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK10-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK10-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK10-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK10-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK10-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK10-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK10-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK10-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK10-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK10-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK10-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK10-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK10-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK10-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK10-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK10-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK10-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK10-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK10-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK10-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK10-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK10-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK10-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK10-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK10-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK10-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK10-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK10-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK10-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK10-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK10-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK10-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK10-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK10-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK10-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK10-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK10-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK10-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK10-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK11-SAME: (i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK11-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK11-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK11-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK11-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK11: .cancel.exit:
// CHECK11-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK11: .cancel.continue:
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK11-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK11-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK11-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK11-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK11-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK11-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK11-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK11-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK11-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK11-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK11-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK11-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK11-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK11-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK11-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK11-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK11-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK11-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK11-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK11-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK11-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK11-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK11-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK11-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK11-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK11-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK11-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK11-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK11-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK11-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK11-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK11-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK11-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK11-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK11-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK11-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK11-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK11-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK11-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK11-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK11-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK11-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK11-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK11-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK11-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK11-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK11-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK11-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK11-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK11-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK11-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK11-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK11-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK11-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK11-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK11-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK11-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK11-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK11-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK11-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK11-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK11-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK11-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK11-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK11-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK11-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK11-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK11-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK11-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK11-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK11-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK11-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK11-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK11-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK11-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK11-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK11-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK12-SAME: (i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK12-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK12-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK12-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK12-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK12-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK12: .cancel.exit:
// CHECK12-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK12: .cancel.continue:
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK12-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK12-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK12-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK12-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK12-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK12-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK12-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK12-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK12-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK12-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK12-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK12-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK12-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK12-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK12-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK12-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK12-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK12-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK12-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK12-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK12-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK12-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK12-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK12-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK12-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK12-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK12-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK12-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK12-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK12-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK12-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK12-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK12-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK12-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK12-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK12-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK12-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK12-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK12-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK12-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK12-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK12-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK12-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK12-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK12-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK12-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK12-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK12-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK12-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK12-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK12-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK12-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK12-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK12-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK12-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK12-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK12-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK12-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK12-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK12-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK12-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK12-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK12-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK12-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK12-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK12-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK12-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK12-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK12-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK12-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK12-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK12-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK12-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK12-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK12-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK12-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK12-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK12-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK12-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK12-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK12-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK12-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK12-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK12-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK12-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK12-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK12-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK12-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK12-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK13-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK13-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK13-NEXT: store i32 0, i32* [[A]], align 4
// CHECK13-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK13-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK13-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK13-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK13-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK13-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK13-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK13-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK13-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK13-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK13-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK13-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK13-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK13-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK13-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK13-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK13-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK13-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK13-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK13-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK13-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK13-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK13-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK13-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK13-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK13-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK13-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK13-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK13-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK13-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK13-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK13-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK13-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK13-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK13-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK13-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK13-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK13-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK13-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK13-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK13-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK13-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK13-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK13-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK13-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK13-NEXT: ret i32 [[TMP18]]
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z3bari
// CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK13-NEXT: store i32 0, i32* [[A]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK13-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK13-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK13-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: ret i32 [[TMP8]]
//
//
// CHECK13-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK13-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK13-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK13-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK13-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK13-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK13-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK13-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK13-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK13-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK13-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK13-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK13-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK13-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK13-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK13-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK13-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK13-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK13-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK13-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK13-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK13-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK13-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK13-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK13-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK13-NEXT: ret i32 [[ADD9]]
//
//
// CHECK13-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK13-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK13-NEXT: store i32 0, i32* [[A]], align 4
// CHECK13-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK13-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK13-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK13-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK13-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK13-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK13-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK13-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK13-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: ret i32 [[TMP4]]
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK13-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK13-NEXT: store i32 0, i32* [[A]], align 4
// CHECK13-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK13-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK13-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK13-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: ret i32 [[TMP3]]
//
//
// CHECK14-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK14-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK14-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK14-NEXT: store i32 0, i32* [[A]], align 4
// CHECK14-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK14-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK14-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK14-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK14-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK14-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK14-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK14-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK14-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK14-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK14-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK14-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK14-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK14-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK14-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK14-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK14-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK14-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK14-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK14-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK14-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK14-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK14-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK14-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK14-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK14-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK14-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK14-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK14-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK14-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK14-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK14-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK14-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK14-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK14-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK14-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK14-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK14-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK14-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK14-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK14-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK14-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK14-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK14-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK14-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK14-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK14-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK14-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK14-NEXT: ret i32 [[TMP18]]
//
//
// CHECK14-LABEL: define {{[^@]+}}@_Z3bari
// CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK14-NEXT: store i32 0, i32* [[A]], align 4
// CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK14-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK14-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK14-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: ret i32 [[TMP8]]
//
//
// CHECK14-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK14-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK14-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK14-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK14-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK14-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK14-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK14-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK14-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK14-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK14-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK14-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK14-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK14-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK14-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK14-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK14-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK14-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK14-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK14-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK14-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK14-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK14-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK14-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK14-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK14-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK14-NEXT: ret i32 [[ADD9]]
//
//
// CHECK14-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK14-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK14-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK14-NEXT: store i32 0, i32* [[A]], align 4
// CHECK14-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK14-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK14-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK14-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK14-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK14-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK14-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK14-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK14-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK14-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK14-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK14-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK14-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: ret i32 [[TMP4]]
//
//
// CHECK14-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK14-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK14-NEXT: store i32 0, i32* [[A]], align 4
// CHECK14-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK14-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK14-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK14-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK14-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK14-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK14-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: ret i32 [[TMP3]]
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK15-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK15-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[A]], align 4
// CHECK15-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK15-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK15-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK15-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK15-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK15-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK15-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK15-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK15-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK15-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK15-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK15-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK15-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK15-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK15-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK15-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK15-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK15-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK15-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK15-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK15-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK15-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK15-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK15-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK15-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK15-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK15-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK15-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK15-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK15-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK15-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK15-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK15-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK15-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK15-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK15-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK15-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK15-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK15-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK15-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK15-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK15-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK15-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK15-NEXT: ret i32 [[TMP16]]
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z3bari
// CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[A]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK15-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK15-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK15-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: ret i32 [[TMP8]]
//
//
// CHECK15-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK15-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK15-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK15-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK15-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK15-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK15-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK15-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK15-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK15-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK15-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK15-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK15-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK15-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK15-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK15-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK15-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK15-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK15-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK15-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK15-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK15-NEXT: ret i32 [[ADD9]]
//
//
// CHECK15-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK15-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[A]], align 4
// CHECK15-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK15-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK15-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK15-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK15-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK15-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK15-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK15-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK15-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: ret i32 [[TMP4]]
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK15-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[A]], align 4
// CHECK15-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK15-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK15-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK15-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK15-NEXT: ret i32 [[TMP3]]
//
//
// CHECK16-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK16-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK16-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK16-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK16-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[A]], align 4
// CHECK16-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK16-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK16-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK16-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK16-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK16-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK16-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK16-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK16-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK16-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK16-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK16-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK16-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK16-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK16-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK16-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK16-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK16-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK16-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK16-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK16-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK16-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK16-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK16-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK16-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK16-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK16-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK16-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK16-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK16-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK16-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK16-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK16-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK16-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK16-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK16-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK16-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK16-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK16-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK16-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK16-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK16-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK16-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK16-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK16-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK16-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK16-NEXT: ret i32 [[TMP16]]
//
//
// CHECK16-LABEL: define {{[^@]+}}@_Z3bari
// CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[A]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK16-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK16-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK16-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: ret i32 [[TMP8]]
//
//
// CHECK16-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK16-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK16-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK16-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK16-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK16-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK16-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK16-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK16-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK16-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK16-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK16-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK16-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK16-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK16-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK16-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK16-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK16-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK16-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK16-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK16-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK16-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK16-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK16-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK16-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK16-NEXT: ret i32 [[ADD9]]
//
//
// CHECK16-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK16-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK16-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[A]], align 4
// CHECK16-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK16-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK16-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK16-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK16-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK16-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK16-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK16-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK16-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK16-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK16-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK16-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: ret i32 [[TMP4]]
//
//
// CHECK16-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK16-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[A]], align 4
// CHECK16-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK16-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK16-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK16-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK16-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK16-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK16-NEXT: ret i32 [[TMP3]]
//
//
// CHECK17-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK17-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK17-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK17-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK17-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK17-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[A_CASTED3:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED5:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[A_CASTED12:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK17-NEXT: store i32 0, i32* [[A]], align 4
// CHECK17-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK17-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK17-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK17-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
// CHECK17-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
// CHECK17-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK17-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
// CHECK17-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK17-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
// CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
// CHECK17-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP7]])
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP12]]) #[[ATTR3:[0-9]+]]
// CHECK17-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK17-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2
// CHECK17-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
// CHECK17-NEXT: store i64 [[TMP14]], i64* [[TMP16]], align 8
// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK17-NEXT: store i64 [[TMP14]], i64* [[TMP18]], align 8
// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK17-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK17: omp_offload.failed:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP14]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK17: omp_offload.cont:
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[A_CASTED3]] to i32*
// CHECK17-NEXT: store i32 [[TMP24]], i32* [[CONV4]], align 4
// CHECK17-NEXT: [[TMP25:%.*]] = load i64, i64* [[A_CASTED3]], align 8
// CHECK17-NEXT: [[TMP26:%.*]] = load i16, i16* [[AA]], align 2
// CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[AA_CASTED5]] to i16*
// CHECK17-NEXT: store i16 [[TMP26]], i16* [[CONV6]], align 2
// CHECK17-NEXT: [[TMP27:%.*]] = load i64, i64* [[AA_CASTED5]], align 8
// CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 10
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK17: omp_if.then:
// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK17-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
// CHECK17-NEXT: store i64 [[TMP25]], i64* [[TMP30]], align 8
// CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK17-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
// CHECK17-NEXT: store i64 [[TMP25]], i64* [[TMP32]], align 8
// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64*
// CHECK17-NEXT: store i64 [[TMP27]], i64* [[TMP35]], align 8
// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK17-NEXT: store i64 [[TMP27]], i64* [[TMP37]], align 8
// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP38]], align 8
// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK17-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK17-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
// CHECK17-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]]
// CHECK17: omp_offload.failed10:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT11]]
// CHECK17: omp_offload.cont11:
// CHECK17-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK17: omp_if.else:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_IF_END]]
// CHECK17: omp_if.end:
// CHECK17-NEXT: [[TMP43:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
// CHECK17-NEXT: store i32 [[TMP43]], i32* [[CONV13]], align 4
// CHECK17-NEXT: [[TMP44:%.*]] = load i64, i64* [[A_CASTED12]], align 8
// CHECK17-NEXT: [[TMP45:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CMP14:%.*]] = icmp sgt i32 [[TMP45]], 20
// CHECK17-NEXT: br i1 [[CMP14]], label [[OMP_IF_THEN15:%.*]], label [[OMP_IF_ELSE21:%.*]]
// CHECK17: omp_if.then15:
// CHECK17-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP2]], 4
// CHECK17-NEXT: [[TMP47:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK17-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], 8
// CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK17-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64*
// CHECK17-NEXT: store i64 [[TMP44]], i64* [[TMP50]], align 8
// CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK17-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64*
// CHECK17-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8
// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK17-NEXT: store i64 4, i64* [[TMP53]], align 8
// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP54]], align 8
// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
// CHECK17-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8
// CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
// CHECK17-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8
// CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK17-NEXT: store i64 40, i64* [[TMP59]], align 8
// CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP60]], align 8
// CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2
// CHECK17-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8
// CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2
// CHECK17-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8
// CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK17-NEXT: store i64 8, i64* [[TMP65]], align 8
// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP66]], align 8
// CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3
// CHECK17-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK17-NEXT: store float* [[VLA]], float** [[TMP68]], align 8
// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3
// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK17-NEXT: store float* [[VLA]], float** [[TMP70]], align 8
// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK17-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8
// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3
// CHECK17-NEXT: store i8* null, i8** [[TMP72]], align 8
// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4
// CHECK17-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8
// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4
// CHECK17-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8
// CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK17-NEXT: store i64 400, i64* [[TMP77]], align 8
// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4
// CHECK17-NEXT: store i8* null, i8** [[TMP78]], align 8
// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5
// CHECK17-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
// CHECK17-NEXT: store i64 5, i64* [[TMP80]], align 8
// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5
// CHECK17-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
// CHECK17-NEXT: store i64 5, i64* [[TMP82]], align 8
// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK17-NEXT: store i64 8, i64* [[TMP83]], align 8
// CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5
// CHECK17-NEXT: store i8* null, i8** [[TMP84]], align 8
// CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6
// CHECK17-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64*
// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8
// CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6
// CHECK17-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8
// CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK17-NEXT: store i64 8, i64* [[TMP89]], align 8
// CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6
// CHECK17-NEXT: store i8* null, i8** [[TMP90]], align 8
// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7
// CHECK17-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8
// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7
// CHECK17-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8
// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK17-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8
// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7
// CHECK17-NEXT: store i8* null, i8** [[TMP96]], align 8
// CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8
// CHECK17-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8
// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8
// CHECK17-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8
// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK17-NEXT: store i64 16, i64* [[TMP101]], align 8
// CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8
// CHECK17-NEXT: store i8* null, i8** [[TMP102]], align 8
// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK17-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK17-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK17-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
// CHECK17: omp_offload.failed19:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT20]]
// CHECK17: omp_offload.cont20:
// CHECK17-NEXT: br label [[OMP_IF_END22:%.*]]
// CHECK17: omp_if.else21:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_IF_END22]]
// CHECK17: omp_if.end22:
// CHECK17-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK17-NEXT: ret i32 [[TMP108]]
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK17-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK17-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK17-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK17-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK17-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK17-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
// CHECK17-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK17-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK17-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK17-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK17-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]])
// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]])
// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !20
// CHECK17-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !20
// CHECK17-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !20
// CHECK17-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !20
// CHECK17-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !20
// CHECK17-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK17-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK17-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK17-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK17-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK17: omp_offload.failed.i:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK17-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK17: .omp_outlined..1.exit:
// CHECK17-NEXT: ret i32 0
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK17-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK17-SAME: (i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK17-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK17-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK17-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK17-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK17-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK17: .cancel.exit:
// CHECK17-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK17: .cancel.continue:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK17-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK17-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK17-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK17-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK17-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK17-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK17-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK17-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK17-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK17-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK17-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK17-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK17-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK17-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK17-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK17-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK17-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK17-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK17-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK17-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK17-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK17-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK17-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK17-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK17-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK17-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK17-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK17-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK17-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK17-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK17-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK17-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK17-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK17-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK17-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@_Z3bari
// CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK17-NEXT: store i32 0, i32* [[A]], align 4
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK17-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK17-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK17-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: ret i32 [[TMP8]]
//
//
// CHECK17-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK17-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK17-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
// CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK17-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK17-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK17-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK17-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK17: omp_if.then:
// CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK17-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK17-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK17-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8
// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK17-NEXT: store double* [[A]], double** [[TMP13]], align 8
// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK17-NEXT: store i64 8, i64* [[TMP14]], align 8
// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8
// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8
// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8
// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK17-NEXT: store i64 4, i64* [[TMP20]], align 8
// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
// CHECK17-NEXT: store i64 2, i64* [[TMP23]], align 8
// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
// CHECK17-NEXT: store i64 2, i64* [[TMP25]], align 8
// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK17-NEXT: store i64 8, i64* [[TMP26]], align 8
// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP27]], align 8
// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8
// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8
// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK17-NEXT: store i64 8, i64* [[TMP32]], align 8
// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8
// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8
// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK17-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8
// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK17-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK17-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK17-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK17: omp_offload.failed:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK17: omp_offload.cont:
// CHECK17-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK17: omp_if.else:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_IF_END]]
// CHECK17: omp_if.end:
// CHECK17-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]]
// CHECK17-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK17-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32
// CHECK17-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]]
// CHECK17-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK17-NEXT: ret i32 [[ADD4]]
//
//
// CHECK17-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK17-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK17-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK17-NEXT: store i32 0, i32* [[A]], align 4
// CHECK17-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK17-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK17-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1
// CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK17: omp_if.then:
// CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8
// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8
// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP16]], align 8
// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8
// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8
// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK17-NEXT: store i8* null, i8** [[TMP26]], align 8
// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK17-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK17-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK17: omp_offload.failed:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK17: omp_offload.cont:
// CHECK17-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK17: omp_if.else:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_IF_END]]
// CHECK17: omp_if.end:
// CHECK17-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: ret i32 [[TMP31]]
//
//
// CHECK17-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK17-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK17-NEXT: store i32 0, i32* [[A]], align 4
// CHECK17-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK17: omp_if.then:
// CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8
// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK17-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK17: omp_offload.failed:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK17: omp_offload.cont:
// CHECK17-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK17: omp_if.else:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_IF_END]]
// CHECK17: omp_if.end:
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK17-NEXT: ret i32 [[TMP24]]
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK17-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK17-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK17-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK17-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK17-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK17-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK17-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK17-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK17-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK17-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK17-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK17-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK17-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK17-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK17-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK17-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK17-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK17-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK17-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK17-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK17-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK17-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK18-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK18-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK18-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK18-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK18-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[A_CASTED3:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED5:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[A_CASTED12:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK18-NEXT: store i32 0, i32* [[A]], align 4
// CHECK18-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK18-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK18-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK18-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
// CHECK18-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
// CHECK18-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK18-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
// CHECK18-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK18-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
// CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
// CHECK18-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP7]])
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4
// CHECK18-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP12]]) #[[ATTR3:[0-9]+]]
// CHECK18-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK18-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2
// CHECK18-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
// CHECK18-NEXT: store i64 [[TMP14]], i64* [[TMP16]], align 8
// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK18-NEXT: store i64 [[TMP14]], i64* [[TMP18]], align 8
// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK18-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK18-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK18: omp_offload.failed:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP14]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK18: omp_offload.cont:
// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[A_CASTED3]] to i32*
// CHECK18-NEXT: store i32 [[TMP24]], i32* [[CONV4]], align 4
// CHECK18-NEXT: [[TMP25:%.*]] = load i64, i64* [[A_CASTED3]], align 8
// CHECK18-NEXT: [[TMP26:%.*]] = load i16, i16* [[AA]], align 2
// CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[AA_CASTED5]] to i16*
// CHECK18-NEXT: store i16 [[TMP26]], i16* [[CONV6]], align 2
// CHECK18-NEXT: [[TMP27:%.*]] = load i64, i64* [[AA_CASTED5]], align 8
// CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 10
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK18: omp_if.then:
// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK18-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
// CHECK18-NEXT: store i64 [[TMP25]], i64* [[TMP30]], align 8
// CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK18-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
// CHECK18-NEXT: store i64 [[TMP25]], i64* [[TMP32]], align 8
// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64*
// CHECK18-NEXT: store i64 [[TMP27]], i64* [[TMP35]], align 8
// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK18-NEXT: store i64 [[TMP27]], i64* [[TMP37]], align 8
// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP38]], align 8
// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK18-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK18-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
// CHECK18-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]]
// CHECK18: omp_offload.failed10:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT11]]
// CHECK18: omp_offload.cont11:
// CHECK18-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK18: omp_if.else:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_IF_END]]
// CHECK18: omp_if.end:
// CHECK18-NEXT: [[TMP43:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
// CHECK18-NEXT: store i32 [[TMP43]], i32* [[CONV13]], align 4
// CHECK18-NEXT: [[TMP44:%.*]] = load i64, i64* [[A_CASTED12]], align 8
// CHECK18-NEXT: [[TMP45:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CMP14:%.*]] = icmp sgt i32 [[TMP45]], 20
// CHECK18-NEXT: br i1 [[CMP14]], label [[OMP_IF_THEN15:%.*]], label [[OMP_IF_ELSE21:%.*]]
// CHECK18: omp_if.then15:
// CHECK18-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP2]], 4
// CHECK18-NEXT: [[TMP47:%.*]] = mul nuw i64 5, [[TMP5]]
// CHECK18-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], 8
// CHECK18-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK18-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64*
// CHECK18-NEXT: store i64 [[TMP44]], i64* [[TMP50]], align 8
// CHECK18-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK18-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64*
// CHECK18-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8
// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK18-NEXT: store i64 4, i64* [[TMP53]], align 8
// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP54]], align 8
// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
// CHECK18-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8
// CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
// CHECK18-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8
// CHECK18-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK18-NEXT: store i64 40, i64* [[TMP59]], align 8
// CHECK18-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP60]], align 8
// CHECK18-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2
// CHECK18-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8
// CHECK18-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2
// CHECK18-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8
// CHECK18-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK18-NEXT: store i64 8, i64* [[TMP65]], align 8
// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP66]], align 8
// CHECK18-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3
// CHECK18-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK18-NEXT: store float* [[VLA]], float** [[TMP68]], align 8
// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3
// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK18-NEXT: store float* [[VLA]], float** [[TMP70]], align 8
// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK18-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8
// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3
// CHECK18-NEXT: store i8* null, i8** [[TMP72]], align 8
// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4
// CHECK18-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8
// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4
// CHECK18-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8
// CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK18-NEXT: store i64 400, i64* [[TMP77]], align 8
// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4
// CHECK18-NEXT: store i8* null, i8** [[TMP78]], align 8
// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5
// CHECK18-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
// CHECK18-NEXT: store i64 5, i64* [[TMP80]], align 8
// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5
// CHECK18-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
// CHECK18-NEXT: store i64 5, i64* [[TMP82]], align 8
// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK18-NEXT: store i64 8, i64* [[TMP83]], align 8
// CHECK18-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5
// CHECK18-NEXT: store i8* null, i8** [[TMP84]], align 8
// CHECK18-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6
// CHECK18-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64*
// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8
// CHECK18-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6
// CHECK18-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8
// CHECK18-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK18-NEXT: store i64 8, i64* [[TMP89]], align 8
// CHECK18-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6
// CHECK18-NEXT: store i8* null, i8** [[TMP90]], align 8
// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7
// CHECK18-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8
// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7
// CHECK18-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8
// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK18-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8
// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7
// CHECK18-NEXT: store i8* null, i8** [[TMP96]], align 8
// CHECK18-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8
// CHECK18-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8
// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8
// CHECK18-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8
// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK18-NEXT: store i64 16, i64* [[TMP101]], align 8
// CHECK18-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8
// CHECK18-NEXT: store i8* null, i8** [[TMP102]], align 8
// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK18-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK18-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK18-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
// CHECK18: omp_offload.failed19:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT20]]
// CHECK18: omp_offload.cont20:
// CHECK18-NEXT: br label [[OMP_IF_END22:%.*]]
// CHECK18: omp_if.else21:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_IF_END22]]
// CHECK18: omp_if.end22:
// CHECK18-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK18-NEXT: ret i32 [[TMP108]]
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK18-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK18-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK18-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK18-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK18-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK18-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
// CHECK18-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK18-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK18-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK18-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK18-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]])
// CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]])
// CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
// CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !20
// CHECK18-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !20
// CHECK18-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !20
// CHECK18-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !20
// CHECK18-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !20
// CHECK18-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK18-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !20
// CHECK18-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK18-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK18-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK18: omp_offload.failed.i:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK18-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK18: .omp_outlined..1.exit:
// CHECK18-NEXT: ret i32 0
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK18-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK18-SAME: (i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK18-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK18-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK18-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK18-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK18-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK18: .cancel.exit:
// CHECK18-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK18: .cancel.continue:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK18-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK18-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK18-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK18-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK18-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK18-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK18-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK18-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK18-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK18-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK18-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK18-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK18-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK18-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK18-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK18-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK18-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK18-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK18-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK18-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK18-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK18-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK18-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK18-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK18-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK18-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK18-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK18-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK18-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK18-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK18-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK18-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK18-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK18-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK18-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK18-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK18-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK18-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK18-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK18-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK18-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK18-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK18-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK18-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK18-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK18-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK18-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_Z3bari
// CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK18-NEXT: store i32 0, i32* [[A]], align 4
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK18-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK18-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK18-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: ret i32 [[TMP8]]
//
//
// CHECK18-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK18-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK18-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
// CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK18-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK18-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK18-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK18-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK18-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK18: omp_if.then:
// CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK18-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK18-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK18-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8
// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK18-NEXT: store double* [[A]], double** [[TMP13]], align 8
// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK18-NEXT: store i64 8, i64* [[TMP14]], align 8
// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8
// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8
// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8
// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK18-NEXT: store i64 4, i64* [[TMP20]], align 8
// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
// CHECK18-NEXT: store i64 2, i64* [[TMP23]], align 8
// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
// CHECK18-NEXT: store i64 2, i64* [[TMP25]], align 8
// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK18-NEXT: store i64 8, i64* [[TMP26]], align 8
// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP27]], align 8
// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8
// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8
// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK18-NEXT: store i64 8, i64* [[TMP32]], align 8
// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8
// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8
// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8
// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK18-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8
// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK18-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK18-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK18-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK18: omp_offload.failed:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK18: omp_offload.cont:
// CHECK18-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK18: omp_if.else:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_IF_END]]
// CHECK18: omp_if.end:
// CHECK18-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]]
// CHECK18-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK18-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32
// CHECK18-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]]
// CHECK18-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK18-NEXT: ret i32 [[ADD4]]
//
//
// CHECK18-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK18-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK18-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK18-NEXT: store i32 0, i32* [[A]], align 4
// CHECK18-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK18-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK18-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK18-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1
// CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK18: omp_if.then:
// CHECK18-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8
// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8
// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP16]], align 8
// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8
// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8
// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8
// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK18-NEXT: store i8* null, i8** [[TMP26]], align 8
// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK18-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK18-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK18: omp_offload.failed:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK18: omp_offload.cont:
// CHECK18-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK18: omp_if.else:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_IF_END]]
// CHECK18: omp_if.end:
// CHECK18-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: ret i32 [[TMP31]]
//
//
// CHECK18-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK18-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK18-NEXT: store i32 0, i32* [[A]], align 4
// CHECK18-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK18-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK18: omp_if.then:
// CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
// CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8
// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8
// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK18-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK18-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK18: omp_offload.failed:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK18: omp_offload.cont:
// CHECK18-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK18: omp_if.else:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_IF_END]]
// CHECK18: omp_if.end:
// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK18-NEXT: ret i32 [[TMP24]]
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK18-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK18-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK18-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK18-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK18-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK18-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK18-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK18-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK18-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK18-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK18-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK18-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK18-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK18-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK18-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK18-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK18-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK18-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK18-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK18-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK18-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK18-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK18-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK18-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK18-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK18-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK18-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK18-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK18-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK19-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK19-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK19-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK19-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK19-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[A_CASTED10:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[A]], align 4
// CHECK19-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK19-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK19-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK19-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK19-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
// CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
// CHECK19-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP5]])
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP10]]) #[[ATTR3:[0-9]+]]
// CHECK19-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK19-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
// CHECK19-NEXT: store i32 [[TMP12]], i32* [[TMP14]], align 4
// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
// CHECK19-NEXT: store i32 [[TMP12]], i32* [[TMP16]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP18]], i8** [[TMP19]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK19-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK19-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK19: omp_offload.failed:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP12]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK19: omp_offload.cont:
// CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: store i32 [[TMP22]], i32* [[A_CASTED2]], align 4
// CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[A_CASTED2]], align 4
// CHECK19-NEXT: [[TMP24:%.*]] = load i16, i16* [[AA]], align 2
// CHECK19-NEXT: [[CONV4:%.*]] = bitcast i32* [[AA_CASTED3]] to i16*
// CHECK19-NEXT: store i16 [[TMP24]], i16* [[CONV4]], align 2
// CHECK19-NEXT: [[TMP25:%.*]] = load i32, i32* [[AA_CASTED3]], align 4
// CHECK19-NEXT: [[TMP26:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP26]], 10
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK19: omp_if.then:
// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
// CHECK19-NEXT: store i32 [[TMP23]], i32* [[TMP28]], align 4
// CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK19-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
// CHECK19-NEXT: store i32 [[TMP23]], i32* [[TMP30]], align 4
// CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK19-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32*
// CHECK19-NEXT: store i32 [[TMP25]], i32* [[TMP33]], align 4
// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32*
// CHECK19-NEXT: store i32 [[TMP25]], i32* [[TMP35]], align 4
// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP36]], align 4
// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK19-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK19-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
// CHECK19-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
// CHECK19: omp_offload.failed8:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT9]]
// CHECK19: omp_offload.cont9:
// CHECK19-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK19: omp_if.else:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_IF_END]]
// CHECK19: omp_if.end:
// CHECK19-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: store i32 [[TMP41]], i32* [[A_CASTED10]], align 4
// CHECK19-NEXT: [[TMP42:%.*]] = load i32, i32* [[A_CASTED10]], align 4
// CHECK19-NEXT: [[TMP43:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[TMP43]], 20
// CHECK19-NEXT: br i1 [[CMP11]], label [[OMP_IF_THEN12:%.*]], label [[OMP_IF_ELSE18:%.*]]
// CHECK19: omp_if.then12:
// CHECK19-NEXT: [[TMP44:%.*]] = mul nuw i32 [[TMP1]], 4
// CHECK19-NEXT: [[TMP45:%.*]] = sext i32 [[TMP44]] to i64
// CHECK19-NEXT: [[TMP46:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK19-NEXT: [[TMP47:%.*]] = mul nuw i32 [[TMP46]], 8
// CHECK19-NEXT: [[TMP48:%.*]] = sext i32 [[TMP47]] to i64
// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32*
// CHECK19-NEXT: store i32 [[TMP42]], i32* [[TMP50]], align 4
// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32*
// CHECK19-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4
// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK19-NEXT: store i64 4, i64* [[TMP53]], align 4
// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1
// CHECK19-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4
// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1
// CHECK19-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4
// CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK19-NEXT: store i64 40, i64* [[TMP59]], align 4
// CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP60]], align 4
// CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2
// CHECK19-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4
// CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2
// CHECK19-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4
// CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK19-NEXT: store i64 4, i64* [[TMP65]], align 4
// CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP66]], align 4
// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3
// CHECK19-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK19-NEXT: store float* [[VLA]], float** [[TMP68]], align 4
// CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3
// CHECK19-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK19-NEXT: store float* [[VLA]], float** [[TMP70]], align 4
// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK19-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4
// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3
// CHECK19-NEXT: store i8* null, i8** [[TMP72]], align 4
// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4
// CHECK19-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4
// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4
// CHECK19-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4
// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK19-NEXT: store i64 400, i64* [[TMP77]], align 4
// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4
// CHECK19-NEXT: store i8* null, i8** [[TMP78]], align 4
// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5
// CHECK19-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK19-NEXT: store i32 5, i32* [[TMP80]], align 4
// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5
// CHECK19-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32*
// CHECK19-NEXT: store i32 5, i32* [[TMP82]], align 4
// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK19-NEXT: store i64 4, i64* [[TMP83]], align 4
// CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5
// CHECK19-NEXT: store i8* null, i8** [[TMP84]], align 4
// CHECK19-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6
// CHECK19-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4
// CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6
// CHECK19-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4
// CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK19-NEXT: store i64 4, i64* [[TMP89]], align 4
// CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6
// CHECK19-NEXT: store i8* null, i8** [[TMP90]], align 4
// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7
// CHECK19-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4
// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7
// CHECK19-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4
// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK19-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4
// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7
// CHECK19-NEXT: store i8* null, i8** [[TMP96]], align 4
// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8
// CHECK19-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4
// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8
// CHECK19-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4
// CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK19-NEXT: store i64 12, i64* [[TMP101]], align 4
// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8
// CHECK19-NEXT: store i8* null, i8** [[TMP102]], align 4
// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK19-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK19-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK19-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK19: omp_offload.failed16:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK19: omp_offload.cont17:
// CHECK19-NEXT: br label [[OMP_IF_END19:%.*]]
// CHECK19: omp_if.else18:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_IF_END19]]
// CHECK19: omp_if.end19:
// CHECK19-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK19-NEXT: ret i32 [[TMP108]]
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK19-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK19-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK19-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
// CHECK19-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK19-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
// CHECK19-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK19-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK19-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK19-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]])
// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !21
// CHECK19-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !21
// CHECK19-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !21
// CHECK19-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !21
// CHECK19-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !21
// CHECK19-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK19-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK19-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK19-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK19-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK19: omp_offload.failed.i:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK19-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK19: .omp_outlined..1.exit:
// CHECK19-NEXT: ret i32 0
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK19-SAME: (i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK19-SAME: (i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK19-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK19-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK19-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK19-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK19-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK19: .cancel.exit:
// CHECK19-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK19: .cancel.continue:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK19-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK19-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK19-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK19-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK19-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK19-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK19-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK19-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK19-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK19-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK19-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK19-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK19-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK19-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK19-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK19-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK19-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK19-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK19-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK19-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK19-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK19-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK19-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK19-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK19-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK19-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK19-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK19-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK19-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK19-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK19-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK19-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK19-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK19-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@_Z3bari
// CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[A]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK19-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK19-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK19-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: ret i32 [[TMP8]]
//
//
// CHECK19-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK19-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK19-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
// CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK19-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK19-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK19-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK19: omp_if.then:
// CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK19-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK19-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
// CHECK19-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK19-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK19-NEXT: store double* [[A]], double** [[TMP13]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK19-NEXT: store i64 8, i64* [[TMP14]], align 4
// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK19-NEXT: store i64 4, i64* [[TMP20]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
// CHECK19-NEXT: store i32 2, i32* [[TMP23]], align 4
// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
// CHECK19-NEXT: store i32 2, i32* [[TMP25]], align 4
// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK19-NEXT: store i64 4, i64* [[TMP26]], align 4
// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP27]], align 4
// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4
// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4
// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK19-NEXT: store i64 4, i64* [[TMP32]], align 4
// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK19-NEXT: store i8* null, i8** [[TMP33]], align 4
// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4
// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK19-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4
// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK19-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4
// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
// CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK19-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK19-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK19-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK19: omp_offload.failed:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK19: omp_offload.cont:
// CHECK19-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK19: omp_if.else:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_IF_END]]
// CHECK19: omp_if.end:
// CHECK19-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]]
// CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK19-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32
// CHECK19-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]]
// CHECK19-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK19-NEXT: ret i32 [[ADD3]]
//
//
// CHECK19-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK19-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK19-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[A]], align 4
// CHECK19-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK19-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK19-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK19: omp_if.then:
// CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP16]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4
// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK19-NEXT: store i8* null, i8** [[TMP26]], align 4
// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK19-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK19-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK19: omp_offload.failed:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK19: omp_offload.cont:
// CHECK19-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK19: omp_if.else:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_IF_END]]
// CHECK19: omp_if.end:
// CHECK19-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: ret i32 [[TMP31]]
//
//
// CHECK19-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK19-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[A]], align 4
// CHECK19-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK19: omp_if.then:
// CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP14]], align 4
// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK19-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK19: omp_offload.failed:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK19: omp_offload.cont:
// CHECK19-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK19: omp_if.else:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_IF_END]]
// CHECK19: omp_if.end:
// CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK19-NEXT: ret i32 [[TMP24]]
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK19-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK19-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK19-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK19-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK19-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK19-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK19-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK19-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK19-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK19-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK19-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK19-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK19-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK19-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK19-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK19-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK19-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK19-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK19-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK19-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK19-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK19-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK19-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK19-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK20-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK20-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK20-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK20-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK20-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[A_CASTED10:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[A]], align 4
// CHECK20-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK20-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK20-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK20-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
// CHECK20-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
// CHECK20-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
// CHECK20-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP5]])
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP10]]) #[[ATTR3:[0-9]+]]
// CHECK20-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK20-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
// CHECK20-NEXT: store i32 [[TMP12]], i32* [[TMP14]], align 4
// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
// CHECK20-NEXT: store i32 [[TMP12]], i32* [[TMP16]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110.region_id, i32 1, i8** [[TMP18]], i8** [[TMP19]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK20-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK20-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK20: omp_offload.failed:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP12]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK20: omp_offload.cont:
// CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: store i32 [[TMP22]], i32* [[A_CASTED2]], align 4
// CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[A_CASTED2]], align 4
// CHECK20-NEXT: [[TMP24:%.*]] = load i16, i16* [[AA]], align 2
// CHECK20-NEXT: [[CONV4:%.*]] = bitcast i32* [[AA_CASTED3]] to i16*
// CHECK20-NEXT: store i16 [[TMP24]], i16* [[CONV4]], align 2
// CHECK20-NEXT: [[TMP25:%.*]] = load i32, i32* [[AA_CASTED3]], align 4
// CHECK20-NEXT: [[TMP26:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP26]], 10
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK20: omp_if.then:
// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
// CHECK20-NEXT: store i32 [[TMP23]], i32* [[TMP28]], align 4
// CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK20-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
// CHECK20-NEXT: store i32 [[TMP23]], i32* [[TMP30]], align 4
// CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK20-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32*
// CHECK20-NEXT: store i32 [[TMP25]], i32* [[TMP33]], align 4
// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32*
// CHECK20-NEXT: store i32 [[TMP25]], i32* [[TMP35]], align 4
// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP36]], align 4
// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK20-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119.region_id, i32 2, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK20-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
// CHECK20-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
// CHECK20: omp_offload.failed8:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT9]]
// CHECK20: omp_offload.cont9:
// CHECK20-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK20: omp_if.else:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP23]], i32 [[TMP25]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_IF_END]]
// CHECK20: omp_if.end:
// CHECK20-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: store i32 [[TMP41]], i32* [[A_CASTED10]], align 4
// CHECK20-NEXT: [[TMP42:%.*]] = load i32, i32* [[A_CASTED10]], align 4
// CHECK20-NEXT: [[TMP43:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[TMP43]], 20
// CHECK20-NEXT: br i1 [[CMP11]], label [[OMP_IF_THEN12:%.*]], label [[OMP_IF_ELSE18:%.*]]
// CHECK20: omp_if.then12:
// CHECK20-NEXT: [[TMP44:%.*]] = mul nuw i32 [[TMP1]], 4
// CHECK20-NEXT: [[TMP45:%.*]] = sext i32 [[TMP44]] to i64
// CHECK20-NEXT: [[TMP46:%.*]] = mul nuw i32 5, [[TMP3]]
// CHECK20-NEXT: [[TMP47:%.*]] = mul nuw i32 [[TMP46]], 8
// CHECK20-NEXT: [[TMP48:%.*]] = sext i32 [[TMP47]] to i64
// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32*
// CHECK20-NEXT: store i32 [[TMP42]], i32* [[TMP50]], align 4
// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32*
// CHECK20-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4
// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK20-NEXT: store i64 4, i64* [[TMP53]], align 4
// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1
// CHECK20-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]**
// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4
// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1
// CHECK20-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]**
// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4
// CHECK20-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK20-NEXT: store i64 40, i64* [[TMP59]], align 4
// CHECK20-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP60]], align 4
// CHECK20-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2
// CHECK20-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4
// CHECK20-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2
// CHECK20-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4
// CHECK20-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK20-NEXT: store i64 4, i64* [[TMP65]], align 4
// CHECK20-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP66]], align 4
// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3
// CHECK20-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float**
// CHECK20-NEXT: store float* [[VLA]], float** [[TMP68]], align 4
// CHECK20-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3
// CHECK20-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float**
// CHECK20-NEXT: store float* [[VLA]], float** [[TMP70]], align 4
// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK20-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4
// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3
// CHECK20-NEXT: store i8* null, i8** [[TMP72]], align 4
// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4
// CHECK20-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]**
// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4
// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4
// CHECK20-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]**
// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4
// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK20-NEXT: store i64 400, i64* [[TMP77]], align 4
// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4
// CHECK20-NEXT: store i8* null, i8** [[TMP78]], align 4
// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5
// CHECK20-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK20-NEXT: store i32 5, i32* [[TMP80]], align 4
// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5
// CHECK20-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32*
// CHECK20-NEXT: store i32 5, i32* [[TMP82]], align 4
// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
// CHECK20-NEXT: store i64 4, i64* [[TMP83]], align 4
// CHECK20-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5
// CHECK20-NEXT: store i8* null, i8** [[TMP84]], align 4
// CHECK20-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6
// CHECK20-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4
// CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6
// CHECK20-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4
// CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
// CHECK20-NEXT: store i64 4, i64* [[TMP89]], align 4
// CHECK20-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6
// CHECK20-NEXT: store i8* null, i8** [[TMP90]], align 4
// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7
// CHECK20-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double**
// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4
// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7
// CHECK20-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4
// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
// CHECK20-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4
// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7
// CHECK20-NEXT: store i8* null, i8** [[TMP96]], align 4
// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8
// CHECK20-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT**
// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4
// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8
// CHECK20-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT**
// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4
// CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
// CHECK20-NEXT: store i64 12, i64* [[TMP101]], align 4
// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8
// CHECK20-NEXT: store i8* null, i8** [[TMP102]], align 4
// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK20-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK20-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0
// CHECK20-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK20: omp_offload.failed16:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK20: omp_offload.cont17:
// CHECK20-NEXT: br label [[OMP_IF_END19:%.*]]
// CHECK20: omp_if.else18:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_IF_END19]]
// CHECK20: omp_if.end19:
// CHECK20-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP109]])
// CHECK20-NEXT: ret i32 [[TMP108]]
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK20-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_task_entry.
// CHECK20-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK20-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
// CHECK20-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
// CHECK20-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
// CHECK20-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK20-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
// CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK20-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK20-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
// CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]])
// CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
// CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !21
// CHECK20-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !21
// CHECK20-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !21
// CHECK20-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !21
// CHECK20-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !21
// CHECK20-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK20-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !21
// CHECK20-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) #[[ATTR3]]
// CHECK20-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK20-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
// CHECK20: omp_offload.failed.i:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100() #[[ATTR3]]
// CHECK20-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]]
// CHECK20: .omp_outlined..1.exit:
// CHECK20-NEXT: ret i32 0
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
// CHECK20-SAME: (i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK20-SAME: (i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK20-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK20-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK20-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK20-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK20-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK20: .cancel.exit:
// CHECK20-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK20: .cancel.continue:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK20-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK20-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK20-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK20-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK20-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK20-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK20-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK20-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK20-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK20-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK20-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK20-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK20-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK20-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK20-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK20-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK20-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK20-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK20-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK20-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK20-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK20-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK20-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK20-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK20-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK20-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK20-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK20-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK20-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK20-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK20-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK20-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK20-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK20-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK20-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK20-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK20-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK20-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK20-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK20-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK20-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@_Z3bari
// CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[A]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK20-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK20-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK20-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: ret i32 [[TMP8]]
//
//
// CHECK20-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK20-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK20-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
// CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK20-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK20-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK20-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK20: omp_if.then:
// CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK20-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK20-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
// CHECK20-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
// CHECK20-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
// CHECK20-NEXT: store double* [[A]], double** [[TMP13]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK20-NEXT: store i64 8, i64* [[TMP14]], align 4
// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK20-NEXT: store i64 4, i64* [[TMP20]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
// CHECK20-NEXT: store i32 2, i32* [[TMP23]], align 4
// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
// CHECK20-NEXT: store i32 2, i32* [[TMP25]], align 4
// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK20-NEXT: store i64 4, i64* [[TMP26]], align 4
// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP27]], align 4
// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4
// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4
// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
// CHECK20-NEXT: store i64 4, i64* [[TMP32]], align 4
// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK20-NEXT: store i8* null, i8** [[TMP33]], align 4
// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4
// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK20-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4
// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
// CHECK20-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4
// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
// CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK20-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK20-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK20-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK20: omp_offload.failed:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK20: omp_offload.cont:
// CHECK20-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK20: omp_if.else:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_IF_END]]
// CHECK20: omp_if.end:
// CHECK20-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]]
// CHECK20-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK20-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32
// CHECK20-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4
// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]]
// CHECK20-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP48]])
// CHECK20-NEXT: ret i32 [[ADD3]]
//
//
// CHECK20-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK20-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK20-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[A]], align 4
// CHECK20-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK20-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK20-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK20-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK20: omp_if.then:
// CHECK20-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP16]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4
// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4
// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK20-NEXT: store i8* null, i8** [[TMP26]], align 4
// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK20-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK20-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK20: omp_offload.failed:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK20: omp_offload.cont:
// CHECK20-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK20: omp_if.else:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_IF_END]]
// CHECK20: omp_if.end:
// CHECK20-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: ret i32 [[TMP31]]
//
//
// CHECK20-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK20-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[A]], align 4
// CHECK20-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK20-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK20: omp_if.then:
// CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP14]], align 4
// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK20-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK20-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK20: omp_offload.failed:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK20: omp_offload.cont:
// CHECK20-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK20: omp_if.else:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_IF_END]]
// CHECK20: omp_if.end:
// CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4
// CHECK20-NEXT: ret i32 [[TMP24]]
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK20-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK20-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK20-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK20-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK20-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK20-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK20-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK20-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK20-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK20-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK20-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK20-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK20-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK20-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK20-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK20-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK20-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK20-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK20-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK20-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK20-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK20-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK20-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK20-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK20-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK21-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK21-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK21-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK21-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK21-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK21-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK21-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK21-NEXT: store i32 0, i32* [[A]], align 4
// CHECK21-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK21-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK21-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK21-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK21-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK21-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK21-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK21-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK21-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK21-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK21-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK21-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK21-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK21-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK21-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK21-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK21-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK21-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK21-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK21-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK21-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK21-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK21-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK21-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK21-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK21-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK21-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK21-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK21-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK21-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK21-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK21-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK21-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK21-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK21-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK21-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK21-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK21-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK21-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK21-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK21-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK21-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK21-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK21-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK21-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK21-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK21-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK21-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK21-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK21-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK21-NEXT: ret i32 [[TMP18]]
//
//
// CHECK21-LABEL: define {{[^@]+}}@_Z3bari
// CHECK21-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK21-NEXT: store i32 0, i32* [[A]], align 4
// CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK21-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK21-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK21-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK21-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK21-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: ret i32 [[TMP8]]
//
//
// CHECK21-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK21-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK21-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK21-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK21-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK21-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK21-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK21-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK21-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK21-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK21-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK21-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK21-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK21-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK21-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK21-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK21-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK21-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK21-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK21-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK21-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK21-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK21-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK21-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK21-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK21-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK21-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK21-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK21-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK21-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK21-NEXT: ret i32 [[ADD9]]
//
//
// CHECK21-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK21-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK21-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK21-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK21-NEXT: store i32 0, i32* [[A]], align 4
// CHECK21-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK21-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK21-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK21-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK21-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK21-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK21-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK21-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK21-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK21-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK21-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: ret i32 [[TMP4]]
//
//
// CHECK21-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK21-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK21-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK21-NEXT: store i32 0, i32* [[A]], align 4
// CHECK21-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK21-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK21-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK21-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK21-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK21-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: ret i32 [[TMP3]]
//
//
// CHECK22-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK22-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK22-NEXT: entry:
// CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK22-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK22-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK22-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK22-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK22-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK22-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK22-NEXT: store i32 0, i32* [[A]], align 4
// CHECK22-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK22-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK22-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK22-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK22-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK22-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK22-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK22-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK22-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK22-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK22-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK22-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK22-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK22-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK22-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK22-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK22-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK22-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK22-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK22-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK22-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK22-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK22-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK22-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK22-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK22-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK22-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK22-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK22-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK22-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK22-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK22-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK22-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK22-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK22-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK22-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK22-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK22-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK22-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK22-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK22-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK22-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK22-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK22-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK22-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK22-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK22-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK22-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK22-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK22-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK22-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK22-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK22-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK22-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK22-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK22-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK22-NEXT: ret i32 [[TMP18]]
//
//
// CHECK22-LABEL: define {{[^@]+}}@_Z3bari
// CHECK22-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK22-NEXT: entry:
// CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK22-NEXT: store i32 0, i32* [[A]], align 4
// CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK22-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK22-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK22-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK22-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK22-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK22-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: ret i32 [[TMP8]]
//
//
// CHECK22-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK22-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK22-NEXT: entry:
// CHECK22-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK22-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK22-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK22-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK22-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK22-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK22-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK22-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK22-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK22-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK22-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK22-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK22-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK22-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK22-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK22-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK22-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK22-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK22-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK22-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK22-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK22-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK22-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK22-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK22-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK22-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK22-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK22-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK22-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK22-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK22-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK22-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK22-NEXT: ret i32 [[ADD9]]
//
//
// CHECK22-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK22-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK22-NEXT: entry:
// CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK22-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK22-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK22-NEXT: store i32 0, i32* [[A]], align 4
// CHECK22-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK22-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK22-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK22-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK22-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK22-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK22-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK22-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK22-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK22-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK22-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK22-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK22-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK22-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK22-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: ret i32 [[TMP4]]
//
//
// CHECK22-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK22-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK22-NEXT: entry:
// CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK22-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK22-NEXT: store i32 0, i32* [[A]], align 4
// CHECK22-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK22-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK22-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK22-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK22-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK22-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK22-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK22-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK22-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: ret i32 [[TMP3]]
//
//
// CHECK23-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK23-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK23-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK23-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK23-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK23-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK23-NEXT: store i32 0, i32* [[A]], align 4
// CHECK23-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK23-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK23-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK23-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK23-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK23-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK23-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK23-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK23-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK23-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK23-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK23-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK23-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK23-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK23-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK23-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK23-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK23-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK23-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK23-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK23-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK23-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK23-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK23-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK23-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK23-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK23-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK23-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK23-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK23-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK23-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK23-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK23-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK23-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK23-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK23-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK23-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK23-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK23-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK23-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK23-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK23-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK23-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK23-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK23-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK23-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK23-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK23-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK23-NEXT: ret i32 [[TMP16]]
//
//
// CHECK23-LABEL: define {{[^@]+}}@_Z3bari
// CHECK23-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK23-NEXT: store i32 0, i32* [[A]], align 4
// CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK23-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK23-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK23-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK23-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK23-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: ret i32 [[TMP8]]
//
//
// CHECK23-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK23-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK23-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK23-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK23-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK23-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK23-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK23-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK23-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK23-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK23-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK23-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK23-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK23-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK23-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK23-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK23-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK23-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK23-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK23-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK23-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK23-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK23-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK23-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK23-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK23-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK23-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK23-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK23-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK23-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK23-NEXT: ret i32 [[ADD9]]
//
//
// CHECK23-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK23-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK23-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK23-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK23-NEXT: store i32 0, i32* [[A]], align 4
// CHECK23-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK23-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK23-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK23-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK23-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK23-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK23-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK23-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK23-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK23-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK23-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: ret i32 [[TMP4]]
//
//
// CHECK23-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK23-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK23-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK23-NEXT: store i32 0, i32* [[A]], align 4
// CHECK23-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK23-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK23-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK23-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK23-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK23-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK23-NEXT: ret i32 [[TMP3]]
//
//
// CHECK24-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK24-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK24-NEXT: entry:
// CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK24-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK24-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK24-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK24-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK24-NEXT: store i32 0, i32* [[A]], align 4
// CHECK24-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK24-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK24-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK24-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK24-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK24-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK24-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK24-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK24-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK24-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK24-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK24-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK24-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK24-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK24-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK24-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK24-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK24-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK24-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK24-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK24-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK24-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK24-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK24-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK24-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK24-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK24-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK24-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK24-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK24-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK24-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK24-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK24-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK24-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK24-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK24-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK24-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK24-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK24-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK24-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK24-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK24-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK24-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK24-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK24-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK24-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK24-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK24-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK24-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK24-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK24-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK24-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK24-NEXT: ret i32 [[TMP16]]
//
//
// CHECK24-LABEL: define {{[^@]+}}@_Z3bari
// CHECK24-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK24-NEXT: entry:
// CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK24-NEXT: store i32 0, i32* [[A]], align 4
// CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK24-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK24-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK24-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK24-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK24-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: ret i32 [[TMP8]]
//
//
// CHECK24-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK24-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK24-NEXT: entry:
// CHECK24-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK24-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK24-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK24-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK24-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK24-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK24-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK24-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK24-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK24-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK24-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK24-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK24-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK24-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK24-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK24-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK24-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK24-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK24-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK24-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK24-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK24-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK24-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK24-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK24-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK24-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK24-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK24-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK24-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK24-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK24-NEXT: ret i32 [[ADD9]]
//
//
// CHECK24-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK24-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK24-NEXT: entry:
// CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK24-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK24-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK24-NEXT: store i32 0, i32* [[A]], align 4
// CHECK24-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK24-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK24-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK24-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK24-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK24-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK24-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK24-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK24-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK24-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK24-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK24-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK24-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK24-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: ret i32 [[TMP4]]
//
//
// CHECK24-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK24-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK24-NEXT: entry:
// CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK24-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK24-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK24-NEXT: store i32 0, i32* [[A]], align 4
// CHECK24-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK24-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK24-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK24-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK24-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK24-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK24-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK24-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK24-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK24-NEXT: ret i32 [[TMP3]]
//
//
// CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK25-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK25-SAME: (i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK25-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK25-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK25-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK25-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK25-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK25-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK25-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK25: .cancel.exit:
// CHECK25-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK25: .cancel.continue:
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK25-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK25-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK25-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK25-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK25-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK25-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK25-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK25-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK25-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK25-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK25-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK25-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK25-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK25-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK25-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK25-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK25-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK25-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK25-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK25-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK25-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK25-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK25-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK25-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK25-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK25-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK25-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK25-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK25-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK25-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK25-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK25-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK25-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK25-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK25-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK25-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK25-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK25-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK25-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK25-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK25-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK25-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK25-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK25-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK25-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK25-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK25-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK25-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK25-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK25-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK25-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK25-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK25-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK25-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK25-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK25-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK25-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK25-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK25-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK25-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK25-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK25-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK25-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK25-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK25-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK25-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK25-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK25-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK25-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK25-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK25-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK25-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK25-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK25-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK25-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK25-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK25-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK25-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK25-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK25-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK25-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK25-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK25-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK25-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK25-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK25-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK25-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK25-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK25-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK25-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK25-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK25-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK25-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK25-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK25-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK25-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK25-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK25-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK25-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK25-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK25-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK25-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK25-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK25-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK25-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK25-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK25-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK25-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK25-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK25-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK25-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK26-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK26-SAME: (i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK26-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8
// CHECK26-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK26-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK26-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK26-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK26-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK26-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK26: .cancel.exit:
// CHECK26-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK26: .cancel.continue:
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK26-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK26-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4
// CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK26-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2
// CHECK26-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK26-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK26-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
// CHECK26-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK26-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK26-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK26-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK26-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK26-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK26-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK26-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK26-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK26-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK26-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK26-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK26-NEXT: store i32 [[TMP8]], i32* [[CONV5]], align 4
// CHECK26-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
// CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8
// CHECK26-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
// CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8
// CHECK26-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8
// CHECK26-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
// CHECK26-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8
// CHECK26-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
// CHECK26-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
// CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
// CHECK26-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
// CHECK26-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
// CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK26-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
// CHECK26-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK26-NEXT: [[CONV5:%.*]] = fpext float [[TMP9]] to double
// CHECK26-NEXT: [[ADD6:%.*]] = fadd double [[CONV5]], 1.000000e+00
// CHECK26-NEXT: [[CONV7:%.*]] = fptrunc double [[ADD6]] to float
// CHECK26-NEXT: store float [[CONV7]], float* [[ARRAYIDX]], align 4
// CHECK26-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
// CHECK26-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK26-NEXT: [[CONV9:%.*]] = fpext float [[TMP10]] to double
// CHECK26-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK26-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK26-NEXT: store float [[CONV11]], float* [[ARRAYIDX8]], align 4
// CHECK26-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
// CHECK26-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX12]], i64 0, i64 2
// CHECK26-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX13]], align 8
// CHECK26-NEXT: [[ADD14:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK26-NEXT: store double [[ADD14]], double* [[ARRAYIDX13]], align 8
// CHECK26-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]]
// CHECK26-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP12]]
// CHECK26-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX15]], i64 3
// CHECK26-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX16]], align 8
// CHECK26-NEXT: [[ADD17:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK26-NEXT: store double [[ADD17]], double* [[ARRAYIDX16]], align 8
// CHECK26-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK26-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 8
// CHECK26-NEXT: [[ADD18:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK26-NEXT: store i64 [[ADD18]], i64* [[X]], align 8
// CHECK26-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK26-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 8
// CHECK26-NEXT: [[CONV19:%.*]] = sext i8 [[TMP15]] to i32
// CHECK26-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV19]], 1
// CHECK26-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK26-NEXT: store i8 [[CONV21]], i8* [[Y]], align 8
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK26-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK26-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4
// CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK26-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK26-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK26-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2
// CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK26-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK26-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
// CHECK26-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1
// CHECK26-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
// CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
// CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK26-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK26-NEXT: [[CONV3:%.*]] = sext i16 [[TMP2]] to i32
// CHECK26-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK26-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK26-NEXT: store i16 [[CONV5]], i16* [[CONV1]], align 8
// CHECK26-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV2]], align 8
// CHECK26-NEXT: [[CONV6:%.*]] = sext i8 [[TMP3]] to i32
// CHECK26-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], 1
// CHECK26-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i8
// CHECK26-NEXT: store i8 [[CONV8]], i8* [[CONV2]], align 8
// CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK26-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK26-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX]], align 4
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK26-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK26-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK26-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
// CHECK26-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8
// CHECK26-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
// CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK26-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
// CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[CONV3:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK26-NEXT: [[ADD:%.*]] = fadd double [[CONV3]], 1.500000e+00
// CHECK26-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK26-NEXT: store double [[ADD]], double* [[A]], align 8
// CHECK26-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK26-NEXT: [[TMP5:%.*]] = load double, double* [[A4]], align 8
// CHECK26-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK26-NEXT: store double [[INC]], double* [[A4]], align 8
// CHECK26-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16
// CHECK26-NEXT: [[TMP6:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP6]]
// CHECK26-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK26-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK26-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK26-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4
// CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK26-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK26-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2
// CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK26-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8
// CHECK26-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8
// CHECK26-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK26-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK26-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK26-NEXT: store i16 [[CONV4]], i16* [[CONV1]], align 8
// CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK26-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK26-NEXT: store i32 [[ADD5]], i32* [[ARRAYIDX]], align 4
// CHECK26-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK27-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK27-SAME: (i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK27-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK27-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK27-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK27-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK27-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK27: .cancel.exit:
// CHECK27-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK27: .cancel.continue:
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK27-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK27-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK27-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK27-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK27-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK27-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK27-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK27-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK27-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK27-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK27-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK27-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK27-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK27-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK27-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK27-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK27-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK27-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK27-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK27-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK27-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK27-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK27-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK27-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK27-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK27-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK27-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK27-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK27-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK27-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK27-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK27-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK27-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK27-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK27-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK27-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK27-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK27-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK27-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK27-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK27-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK27-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK27-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK27-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK27-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK27-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK27-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK27-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK27-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK27-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK27-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK27-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK27-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK27-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK27-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK27-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK27-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK27-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK27-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK27-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK27-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK27-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK27-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK27-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK27-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK27-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK27-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK27-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK27-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK27-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK27-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK27-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK27-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK27-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK27-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK27-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK27-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK27-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK27-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK27-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK27-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK27-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK27-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK27-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK27-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK27-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK27-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK27-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK27-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK27-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK27-NEXT: ret void
//
//
// CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK27-NEXT: entry:
// CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK27-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK27-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK27-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK27-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK27-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK27-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK27-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK27-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK27-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100
// CHECK28-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110
// CHECK28-SAME: (i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK28-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK28-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
// CHECK28-NEXT: store i16 [[CONV2]], i16* [[CONV]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1)
// CHECK28-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK28-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK28: .cancel.exit:
// CHECK28-NEXT: br label [[DOTCANCEL_CONTINUE]]
// CHECK28: .cancel.continue:
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119
// CHECK28-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK28-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK28-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
// CHECK28-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK28-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK28-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144
// CHECK28-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK28-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK28-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK28-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK28-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK28-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK28-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK28-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK28-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4
// CHECK28-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
// CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4
// CHECK28-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
// CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4
// CHECK28-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4
// CHECK28-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
// CHECK28-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4
// CHECK28-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
// CHECK28-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
// CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
// CHECK28-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
// CHECK28-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
// CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK28-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
// CHECK28-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double
// CHECK28-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
// CHECK28-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
// CHECK28-NEXT: store float [[CONV6]], float* [[ARRAYIDX]], align 4
// CHECK28-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
// CHECK28-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK28-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double
// CHECK28-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
// CHECK28-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
// CHECK28-NEXT: store float [[CONV10]], float* [[ARRAYIDX7]], align 4
// CHECK28-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
// CHECK28-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i32 0, i32 2
// CHECK28-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX12]], align 8
// CHECK28-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK28-NEXT: store double [[ADD13]], double* [[ARRAYIDX12]], align 8
// CHECK28-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]]
// CHECK28-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP12]]
// CHECK28-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX14]], i32 3
// CHECK28-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX15]], align 8
// CHECK28-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK28-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8
// CHECK28-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
// CHECK28-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK28-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK28-NEXT: store i64 [[ADD17]], i64* [[X]], align 4
// CHECK28-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
// CHECK28-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK28-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32
// CHECK28-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
// CHECK28-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
// CHECK28-NEXT: store i8 [[CONV20]], i8* [[Y]], align 4
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198
// CHECK28-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK28-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2
// CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK28-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK28-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
// CHECK28-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1
// CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
// CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
// CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
// CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK28-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
// CHECK28-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
// CHECK28-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
// CHECK28-NEXT: store i16 [[CONV4]], i16* [[CONV]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV1]], align 4
// CHECK28-NEXT: [[CONV5:%.*]] = sext i8 [[TMP3]] to i32
// CHECK28-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK28-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i8
// CHECK28-NEXT: store i8 [[CONV7]], i8* [[CONV1]], align 4
// CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK28-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK28-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
// CHECK28-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK28-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK28-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK28-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4
// CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
// CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4
// CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4
// CHECK28-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
// CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK28-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK28-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK28-NEXT: store double [[ADD]], double* [[A]], align 4
// CHECK28-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
// CHECK28-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK28-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK28-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK28-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK28-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP6]]
// CHECK28-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK28-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181
// CHECK28-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK28-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK28-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2
// CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
// CHECK28-NEXT: ret void
//
//
// CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK28-NEXT: entry:
// CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK28-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK28-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4
// CHECK28-NEXT: [[CONV1:%.*]] = sext i16 [[TMP2]] to i32
// CHECK28-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK28-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK28-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4
// CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK28-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK28-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK28-NEXT: ret void
//
//
// CHECK29-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK29-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK29-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK29-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK29-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK29-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK29-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK29-NEXT: store i32 0, i32* [[A]], align 4
// CHECK29-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK29-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK29-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK29-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK29-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK29-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK29-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK29-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK29-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK29-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK29-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK29-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK29-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK29-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK29-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK29-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK29-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK29-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK29-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK29-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK29-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK29-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK29-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK29-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK29-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK29-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK29-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK29-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK29-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK29-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK29-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK29-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK29-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK29-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK29-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK29-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK29-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK29-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK29-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK29-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK29-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK29-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK29-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK29-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK29-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK29-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK29-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK29-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK29-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK29-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK29-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK29-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK29-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK29-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK29-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK29-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK29-NEXT: ret i32 [[TMP18]]
//
//
// CHECK29-LABEL: define {{[^@]+}}@_Z3bari
// CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK29-NEXT: store i32 0, i32* [[A]], align 4
// CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK29-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK29-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK29-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK29-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK29-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK29-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK29-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: ret i32 [[TMP8]]
//
//
// CHECK29-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK29-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK29-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK29-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK29-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK29-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK29-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK29-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK29-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK29-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK29-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK29-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK29-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK29-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK29-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK29-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK29-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK29-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK29-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK29-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK29-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK29-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK29-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK29-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK29-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK29-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK29-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK29-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK29-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK29-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK29-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK29-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK29-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK29-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK29-NEXT: ret i32 [[ADD9]]
//
//
// CHECK29-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK29-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK29-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK29-NEXT: store i32 0, i32* [[A]], align 4
// CHECK29-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK29-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK29-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK29-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK29-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK29-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK29-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK29-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK29-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK29-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK29-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK29-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK29-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK29-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK29-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: ret i32 [[TMP4]]
//
//
// CHECK29-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK29-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK29-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK29-NEXT: store i32 0, i32* [[A]], align 4
// CHECK29-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK29-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK29-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK29-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK29-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK29-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK29-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK29-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK29-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK29-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK29-NEXT: ret i32 [[TMP3]]
//
//
// CHECK30-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK30-NEXT: entry:
// CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK30-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK30-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK30-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK30-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK30-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
// CHECK30-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
// CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK30-NEXT: store i32 0, i32* [[A]], align 4
// CHECK30-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK30-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK30-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK30-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK30-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK30-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
// CHECK30-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
// CHECK30-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
// CHECK30-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK30-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK30-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
// CHECK30-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK30-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK30-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK30-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK30-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
// CHECK30-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
// CHECK30-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK30-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK30-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK30-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK30-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK30-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
// CHECK30-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK30-NEXT: [[CONV9:%.*]] = fpext float [[TMP11]] to double
// CHECK30-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK30-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK30-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK30-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
// CHECK30-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK30-NEXT: [[CONV13:%.*]] = fpext float [[TMP12]] to double
// CHECK30-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK30-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK30-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK30-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
// CHECK30-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
// CHECK30-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK30-NEXT: [[ADD18:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK30-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK30-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP4]]
// CHECK30-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP14]]
// CHECK30-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
// CHECK30-NEXT: [[TMP15:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK30-NEXT: [[ADD21:%.*]] = fadd double [[TMP15]], 1.000000e+00
// CHECK30-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK30-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK30-NEXT: [[TMP16:%.*]] = load i64, i64* [[X]], align 8
// CHECK30-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP16]], 1
// CHECK30-NEXT: store i64 [[ADD22]], i64* [[X]], align 8
// CHECK30-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK30-NEXT: [[TMP17:%.*]] = load i8, i8* [[Y]], align 8
// CHECK30-NEXT: [[CONV23:%.*]] = sext i8 [[TMP17]] to i32
// CHECK30-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK30-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK30-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8
// CHECK30-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[TMP19:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK30-NEXT: call void @llvm.stackrestore(i8* [[TMP19]])
// CHECK30-NEXT: ret i32 [[TMP18]]
//
//
// CHECK30-LABEL: define {{[^@]+}}@_Z3bari
// CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK30-NEXT: entry:
// CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
// CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK30-NEXT: store i32 0, i32* [[A]], align 4
// CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]])
// CHECK30-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK30-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]])
// CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK30-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]])
// CHECK30-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK30-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]])
// CHECK30-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK30-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: ret i32 [[TMP8]]
//
//
// CHECK30-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK30-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK30-NEXT: entry:
// CHECK30-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK30-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK30-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
// CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
// CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK30-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK30-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK30-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK30-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK30-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK30-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
// CHECK30-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
// CHECK30-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK30-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK30-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
// CHECK30-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK30-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK30-NEXT: store double [[ADD2]], double* [[A]], align 8
// CHECK30-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK30-NEXT: [[TMP6:%.*]] = load double, double* [[A3]], align 8
// CHECK30-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
// CHECK30-NEXT: store double [[INC]], double* [[A3]], align 8
// CHECK30-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK30-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK30-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP7]]
// CHECK30-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
// CHECK30-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK30-NEXT: [[TMP8:%.*]] = mul nsw i64 1, [[TMP2]]
// CHECK30-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP8]]
// CHECK30-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i64 1
// CHECK30-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK30-NEXT: [[CONV8:%.*]] = sext i16 [[TMP9]] to i32
// CHECK30-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4
// CHECK30-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]]
// CHECK30-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK30-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
// CHECK30-NEXT: ret i32 [[ADD9]]
//
//
// CHECK30-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] {
// CHECK30-NEXT: entry:
// CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK30-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK30-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK30-NEXT: store i32 0, i32* [[A]], align 4
// CHECK30-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK30-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK30-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK30-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK30-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK30-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK30-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK30-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK30-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK30-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK30-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK30-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK30-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK30-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK30-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK30-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: ret i32 [[TMP4]]
//
//
// CHECK30-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK30-NEXT: entry:
// CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK30-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK30-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK30-NEXT: store i32 0, i32* [[A]], align 4
// CHECK30-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK30-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK30-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK30-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK30-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK30-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK30-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK30-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
// CHECK30-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK30-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK30-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK30-NEXT: ret i32 [[TMP3]]
//
//
// CHECK31-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK31-NEXT: entry:
// CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK31-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK31-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK31-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK31-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK31-NEXT: store i32 0, i32* [[A]], align 4
// CHECK31-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK31-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK31-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK31-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK31-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK31-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK31-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK31-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK31-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK31-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK31-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK31-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK31-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK31-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK31-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK31-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK31-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK31-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK31-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK31-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK31-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK31-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK31-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK31-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK31-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK31-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK31-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK31-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK31-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK31-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK31-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK31-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK31-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK31-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK31-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK31-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK31-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK31-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK31-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK31-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK31-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK31-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK31-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK31-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK31-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK31-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK31-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK31-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK31-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK31-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK31-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK31-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK31-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK31-NEXT: ret i32 [[TMP16]]
//
//
// CHECK31-LABEL: define {{[^@]+}}@_Z3bari
// CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK31-NEXT: entry:
// CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK31-NEXT: store i32 0, i32* [[A]], align 4
// CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK31-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK31-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK31-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK31-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK31-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK31-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK31-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK31-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: ret i32 [[TMP8]]
//
//
// CHECK31-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK31-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK31-NEXT: entry:
// CHECK31-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK31-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK31-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK31-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK31-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK31-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK31-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK31-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK31-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK31-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK31-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK31-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK31-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK31-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK31-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK31-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK31-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK31-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK31-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK31-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK31-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK31-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK31-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK31-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK31-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK31-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK31-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK31-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK31-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK31-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK31-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK31-NEXT: ret i32 [[ADD9]]
//
//
// CHECK31-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK31-NEXT: entry:
// CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK31-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK31-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK31-NEXT: store i32 0, i32* [[A]], align 4
// CHECK31-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK31-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK31-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK31-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK31-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK31-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK31-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK31-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK31-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK31-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK31-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK31-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK31-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK31-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK31-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: ret i32 [[TMP4]]
//
//
// CHECK31-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK31-NEXT: entry:
// CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK31-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK31-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK31-NEXT: store i32 0, i32* [[A]], align 4
// CHECK31-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK31-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK31-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK31-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK31-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK31-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK31-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK31-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK31-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK31-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK31-NEXT: ret i32 [[TMP3]]
//
//
// CHECK32-LABEL: define {{[^@]+}}@_Z3fooi
// CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK32-NEXT: entry:
// CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK32-NEXT: [[B:%.*]] = alloca [10 x float], align 4
// CHECK32-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK32-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
// CHECK32-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
// CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK32-NEXT: store i32 0, i32* [[A]], align 4
// CHECK32-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK32-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK32-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
// CHECK32-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
// CHECK32-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
// CHECK32-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
// CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK32-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2
// CHECK32-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32
// CHECK32-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
// CHECK32-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK32-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2
// CHECK32-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK32-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
// CHECK32-NEXT: [[CONV5:%.*]] = sext i16 [[TMP7]] to i32
// CHECK32-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
// CHECK32-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK32-NEXT: store i16 [[CONV7]], i16* [[AA]], align 2
// CHECK32-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK32-NEXT: store i32 [[ADD8]], i32* [[A]], align 4
// CHECK32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
// CHECK32-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK32-NEXT: [[CONV9:%.*]] = fpext float [[TMP9]] to double
// CHECK32-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
// CHECK32-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
// CHECK32-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4
// CHECK32-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
// CHECK32-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX12]], align 4
// CHECK32-NEXT: [[CONV13:%.*]] = fpext float [[TMP10]] to double
// CHECK32-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
// CHECK32-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
// CHECK32-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4
// CHECK32-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
// CHECK32-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i32 0, i32 2
// CHECK32-NEXT: [[TMP11:%.*]] = load double, double* [[ARRAYIDX17]], align 8
// CHECK32-NEXT: [[ADD18:%.*]] = fadd double [[TMP11]], 1.000000e+00
// CHECK32-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8
// CHECK32-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP2]]
// CHECK32-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP12]]
// CHECK32-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i32 3
// CHECK32-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX20]], align 8
// CHECK32-NEXT: [[ADD21:%.*]] = fadd double [[TMP13]], 1.000000e+00
// CHECK32-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8
// CHECK32-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
// CHECK32-NEXT: [[TMP14:%.*]] = load i64, i64* [[X]], align 4
// CHECK32-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK32-NEXT: store i64 [[ADD22]], i64* [[X]], align 4
// CHECK32-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
// CHECK32-NEXT: [[TMP15:%.*]] = load i8, i8* [[Y]], align 4
// CHECK32-NEXT: [[CONV23:%.*]] = sext i8 [[TMP15]] to i32
// CHECK32-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
// CHECK32-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
// CHECK32-NEXT: store i8 [[CONV25]], i8* [[Y]], align 4
// CHECK32-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[TMP17:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK32-NEXT: call void @llvm.stackrestore(i8* [[TMP17]])
// CHECK32-NEXT: ret i32 [[TMP16]]
//
//
// CHECK32-LABEL: define {{[^@]+}}@_Z3bari
// CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK32-NEXT: entry:
// CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
// CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK32-NEXT: store i32 0, i32* [[A]], align 4
// CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]])
// CHECK32-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
// CHECK32-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]])
// CHECK32-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
// CHECK32-NEXT: store i32 [[ADD2]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]])
// CHECK32-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
// CHECK32-NEXT: store i32 [[ADD4]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]])
// CHECK32-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
// CHECK32-NEXT: store i32 [[ADD6]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: ret i32 [[TMP8]]
//
//
// CHECK32-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
// CHECK32-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK32-NEXT: entry:
// CHECK32-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK32-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK32-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
// CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
// CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK32-NEXT: store i32 [[ADD]], i32* [[B]], align 4
// CHECK32-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK32-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK32-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
// CHECK32-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
// CHECK32-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
// CHECK32-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
// CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK32-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double
// CHECK32-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
// CHECK32-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK32-NEXT: store double [[ADD2]], double* [[A]], align 4
// CHECK32-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
// CHECK32-NEXT: [[TMP5:%.*]] = load double, double* [[A3]], align 4
// CHECK32-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00
// CHECK32-NEXT: store double [[INC]], double* [[A3]], align 4
// CHECK32-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
// CHECK32-NEXT: [[TMP6:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP6]]
// CHECK32-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
// CHECK32-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2
// CHECK32-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP1]]
// CHECK32-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP7]]
// CHECK32-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX6]], i32 1
// CHECK32-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
// CHECK32-NEXT: [[CONV8:%.*]] = sext i16 [[TMP8]] to i32
// CHECK32-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4
// CHECK32-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]]
// CHECK32-NEXT: [[TMP10:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK32-NEXT: call void @llvm.stackrestore(i8* [[TMP10]])
// CHECK32-NEXT: ret i32 [[ADD9]]
//
//
// CHECK32-LABEL: define {{[^@]+}}@_ZL7fstatici
// CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] {
// CHECK32-NEXT: entry:
// CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK32-NEXT: [[AAA:%.*]] = alloca i8, align 1
// CHECK32-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK32-NEXT: store i32 0, i32* [[A]], align 4
// CHECK32-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK32-NEXT: store i8 0, i8* [[AAA]], align 1
// CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK32-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK32-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK32-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK32-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK32-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK32-NEXT: [[TMP2:%.*]] = load i8, i8* [[AAA]], align 1
// CHECK32-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK32-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK32-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
// CHECK32-NEXT: store i8 [[CONV5]], i8* [[AAA]], align 1
// CHECK32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK32-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK32-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: ret i32 [[TMP4]]
//
//
// CHECK32-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
// CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat {
// CHECK32-NEXT: entry:
// CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK32-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK32-NEXT: [[B:%.*]] = alloca [10 x i32], align 4
// CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK32-NEXT: store i32 0, i32* [[A]], align 4
// CHECK32-NEXT: store i16 0, i16* [[AA]], align 2
// CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
// CHECK32-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK32-NEXT: [[TMP1:%.*]] = load i16, i16* [[AA]], align 2
// CHECK32-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK32-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK32-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK32-NEXT: store i16 [[CONV2]], i16* [[AA]], align 2
// CHECK32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
// CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK32-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK32-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4
// CHECK32-NEXT: ret i32 [[TMP3]]
//