[flang] Allow to pass an async id to allocate the descriptor (#118713)
This is a patch in preparation for the support stream ordered memory allocator in CUDA Fortran. This patch adds an asynchronous id to the AllocatableAllocate runtime function and to Descriptor::Allocate so it can be passed down to the registered allocator. It is up to the allocator to use this value or not. A follow up patch will implement that asynchronous allocator for CUDA Fortran.
This commit is contained in:
parent
970d6d2096
commit
7d1c661381
@ -19,16 +19,16 @@ extern "C" {
|
|||||||
void RTDECL(CUFRegisterAllocator)();
|
void RTDECL(CUFRegisterAllocator)();
|
||||||
}
|
}
|
||||||
|
|
||||||
void *CUFAllocPinned(std::size_t);
|
void *CUFAllocPinned(std::size_t, std::int64_t);
|
||||||
void CUFFreePinned(void *);
|
void CUFFreePinned(void *);
|
||||||
|
|
||||||
void *CUFAllocDevice(std::size_t);
|
void *CUFAllocDevice(std::size_t, std::int64_t);
|
||||||
void CUFFreeDevice(void *);
|
void CUFFreeDevice(void *);
|
||||||
|
|
||||||
void *CUFAllocManaged(std::size_t);
|
void *CUFAllocManaged(std::size_t, std::int64_t);
|
||||||
void CUFFreeManaged(void *);
|
void CUFFreeManaged(void *);
|
||||||
|
|
||||||
void *CUFAllocUnified(std::size_t);
|
void *CUFAllocUnified(std::size_t, std::int64_t);
|
||||||
void CUFFreeUnified(void *);
|
void CUFFreeUnified(void *);
|
||||||
|
|
||||||
} // namespace Fortran::runtime::cuda
|
} // namespace Fortran::runtime::cuda
|
||||||
|
@ -23,6 +23,9 @@ static constexpr unsigned kHostToDevice = 0;
|
|||||||
static constexpr unsigned kDeviceToHost = 1;
|
static constexpr unsigned kDeviceToHost = 1;
|
||||||
static constexpr unsigned kDeviceToDevice = 2;
|
static constexpr unsigned kDeviceToDevice = 2;
|
||||||
|
|
||||||
|
/// Value used for asyncId when no specific stream is specified.
|
||||||
|
static constexpr std::int64_t kCudaNoStream = -1;
|
||||||
|
|
||||||
#define CUDA_REPORT_IF_ERROR(expr) \
|
#define CUDA_REPORT_IF_ERROR(expr) \
|
||||||
[](cudaError_t err) { \
|
[](cudaError_t err) { \
|
||||||
if (err == cudaSuccess) \
|
if (err == cudaSuccess) \
|
||||||
|
@ -94,9 +94,9 @@ int RTDECL(AllocatableCheckLengthParameter)(Descriptor &,
|
|||||||
// Successfully allocated memory is initialized if the allocatable has a
|
// Successfully allocated memory is initialized if the allocatable has a
|
||||||
// derived type, and is always initialized by AllocatableAllocateSource().
|
// derived type, and is always initialized by AllocatableAllocateSource().
|
||||||
// Performs all necessary coarray synchronization and validation actions.
|
// Performs all necessary coarray synchronization and validation actions.
|
||||||
int RTDECL(AllocatableAllocate)(Descriptor &, bool hasStat = false,
|
int RTDECL(AllocatableAllocate)(Descriptor &, std::int64_t asyncId = -1,
|
||||||
const Descriptor *errMsg = nullptr, const char *sourceFile = nullptr,
|
bool hasStat = false, const Descriptor *errMsg = nullptr,
|
||||||
int sourceLine = 0);
|
const char *sourceFile = nullptr, int sourceLine = 0);
|
||||||
int RTDECL(AllocatableAllocateSource)(Descriptor &, const Descriptor &source,
|
int RTDECL(AllocatableAllocateSource)(Descriptor &, const Descriptor &source,
|
||||||
bool hasStat = false, const Descriptor *errMsg = nullptr,
|
bool hasStat = false, const Descriptor *errMsg = nullptr,
|
||||||
const char *sourceFile = nullptr, int sourceLine = 0);
|
const char *sourceFile = nullptr, int sourceLine = 0);
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#define FORTRAN_RUNTIME_ALLOCATOR_REGISTRY_H_
|
#define FORTRAN_RUNTIME_ALLOCATOR_REGISTRY_H_
|
||||||
|
|
||||||
#include "flang/Common/api-attrs.h"
|
#include "flang/Common/api-attrs.h"
|
||||||
|
#include <cstdint>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
@ -25,7 +26,7 @@ static constexpr unsigned kUnifiedAllocatorPos = 4;
|
|||||||
|
|
||||||
namespace Fortran::runtime {
|
namespace Fortran::runtime {
|
||||||
|
|
||||||
using AllocFct = void *(*)(std::size_t);
|
using AllocFct = void *(*)(std::size_t, std::int64_t);
|
||||||
using FreeFct = void (*)(void *);
|
using FreeFct = void (*)(void *);
|
||||||
|
|
||||||
typedef struct Allocator_t {
|
typedef struct Allocator_t {
|
||||||
@ -33,10 +34,11 @@ typedef struct Allocator_t {
|
|||||||
FreeFct free{nullptr};
|
FreeFct free{nullptr};
|
||||||
} Allocator_t;
|
} Allocator_t;
|
||||||
|
|
||||||
#ifdef RT_DEVICE_COMPILATION
|
static RT_API_ATTRS void *MallocWrapper(
|
||||||
static RT_API_ATTRS void *MallocWrapper(std::size_t size) {
|
std::size_t size, [[maybe_unused]] std::int64_t) {
|
||||||
return std::malloc(size);
|
return std::malloc(size);
|
||||||
}
|
}
|
||||||
|
#ifdef RT_DEVICE_COMPILATION
|
||||||
static RT_API_ATTRS void FreeWrapper(void *p) { return std::free(p); }
|
static RT_API_ATTRS void FreeWrapper(void *p) { return std::free(p); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -46,7 +48,7 @@ struct AllocatorRegistry {
|
|||||||
: allocators{{&MallocWrapper, &FreeWrapper}} {}
|
: allocators{{&MallocWrapper, &FreeWrapper}} {}
|
||||||
#else
|
#else
|
||||||
constexpr AllocatorRegistry() {
|
constexpr AllocatorRegistry() {
|
||||||
allocators[kDefaultAllocator] = {&std::malloc, &std::free};
|
allocators[kDefaultAllocator] = {&MallocWrapper, &std::free};
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
RT_API_ATTRS void Register(int, Allocator_t);
|
RT_API_ATTRS void Register(int, Allocator_t);
|
||||||
|
@ -374,7 +374,7 @@ public:
|
|||||||
// before calling. It (re)computes the byte strides after
|
// before calling. It (re)computes the byte strides after
|
||||||
// allocation. Does not allocate automatic components or
|
// allocation. Does not allocate automatic components or
|
||||||
// perform default component initialization.
|
// perform default component initialization.
|
||||||
RT_API_ATTRS int Allocate();
|
RT_API_ATTRS int Allocate(std::int64_t asyncId = -1);
|
||||||
RT_API_ATTRS void SetByteStrides();
|
RT_API_ATTRS void SetByteStrides();
|
||||||
|
|
||||||
// Deallocates storage; does not call FINAL subroutines or
|
// Deallocates storage; does not call FINAL subroutines or
|
||||||
|
@ -184,9 +184,14 @@ static mlir::Value genRuntimeAllocate(fir::FirOpBuilder &builder,
|
|||||||
? fir::runtime::getRuntimeFunc<mkRTKey(PointerAllocate)>(loc, builder)
|
? fir::runtime::getRuntimeFunc<mkRTKey(PointerAllocate)>(loc, builder)
|
||||||
: fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc,
|
: fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc,
|
||||||
builder);
|
builder);
|
||||||
llvm::SmallVector<mlir::Value> args{
|
llvm::SmallVector<mlir::Value> args{box.getAddr()};
|
||||||
box.getAddr(), errorManager.hasStat, errorManager.errMsgAddr,
|
if (!box.isPointer())
|
||||||
errorManager.sourceFile, errorManager.sourceLine};
|
args.push_back(
|
||||||
|
builder.createIntegerConstant(loc, builder.getI64Type(), -1));
|
||||||
|
args.push_back(errorManager.hasStat);
|
||||||
|
args.push_back(errorManager.errMsgAddr);
|
||||||
|
args.push_back(errorManager.sourceFile);
|
||||||
|
args.push_back(errorManager.sourceLine);
|
||||||
llvm::SmallVector<mlir::Value> operands;
|
llvm::SmallVector<mlir::Value> operands;
|
||||||
for (auto [fst, snd] : llvm::zip(args, callee.getFunctionType().getInputs()))
|
for (auto [fst, snd] : llvm::zip(args, callee.getFunctionType().getInputs()))
|
||||||
operands.emplace_back(builder.createConvert(loc, snd, fst));
|
operands.emplace_back(builder.createConvert(loc, snd, fst));
|
||||||
|
@ -76,16 +76,19 @@ void fir::runtime::genAllocatableAllocate(fir::FirOpBuilder &builder,
|
|||||||
mlir::func::FuncOp func{
|
mlir::func::FuncOp func{
|
||||||
fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc, builder)};
|
fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc, builder)};
|
||||||
mlir::FunctionType fTy{func.getFunctionType()};
|
mlir::FunctionType fTy{func.getFunctionType()};
|
||||||
|
mlir::Value asyncId =
|
||||||
|
builder.createIntegerConstant(loc, builder.getI64Type(), -1);
|
||||||
mlir::Value sourceFile{fir::factory::locationToFilename(builder, loc)};
|
mlir::Value sourceFile{fir::factory::locationToFilename(builder, loc)};
|
||||||
mlir::Value sourceLine{
|
mlir::Value sourceLine{
|
||||||
fir::factory::locationToLineNo(builder, loc, fTy.getInput(4))};
|
fir::factory::locationToLineNo(builder, loc, fTy.getInput(5))};
|
||||||
if (!hasStat)
|
if (!hasStat)
|
||||||
hasStat = builder.createBool(loc, false);
|
hasStat = builder.createBool(loc, false);
|
||||||
if (!errMsg) {
|
if (!errMsg) {
|
||||||
mlir::Type boxNoneTy = fir::BoxType::get(builder.getNoneType());
|
mlir::Type boxNoneTy = fir::BoxType::get(builder.getNoneType());
|
||||||
errMsg = builder.create<fir::AbsentOp>(loc, boxNoneTy).getResult();
|
errMsg = builder.create<fir::AbsentOp>(loc, boxNoneTy).getResult();
|
||||||
}
|
}
|
||||||
llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
|
llvm::SmallVector<mlir::Value> args{
|
||||||
builder, loc, fTy, desc, hasStat, errMsg, sourceFile, sourceLine)};
|
fir::runtime::createArguments(builder, loc, fTy, desc, asyncId, hasStat,
|
||||||
|
errMsg, sourceFile, sourceLine)};
|
||||||
builder.create<fir::CallOp>(loc, func, args);
|
builder.create<fir::CallOp>(loc, func, args);
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ int RTDEF(CUFAllocatableAllocate)(Descriptor &desc, int64_t stream,
|
|||||||
}
|
}
|
||||||
// Perform the standard allocation.
|
// Perform the standard allocation.
|
||||||
int stat{RTNAME(AllocatableAllocate)(
|
int stat{RTNAME(AllocatableAllocate)(
|
||||||
desc, hasStat, errMsg, sourceFile, sourceLine)};
|
desc, stream, hasStat, errMsg, sourceFile, sourceLine)};
|
||||||
return stat;
|
return stat;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,8 @@ void RTDEF(CUFRegisterAllocator)() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void *CUFAllocPinned(std::size_t sizeInBytes) {
|
void *CUFAllocPinned(
|
||||||
|
std::size_t sizeInBytes, [[maybe_unused]] std::int64_t asyncId) {
|
||||||
void *p;
|
void *p;
|
||||||
CUDA_REPORT_IF_ERROR(cudaMallocHost((void **)&p, sizeInBytes));
|
CUDA_REPORT_IF_ERROR(cudaMallocHost((void **)&p, sizeInBytes));
|
||||||
return p;
|
return p;
|
||||||
@ -41,7 +42,8 @@ void *CUFAllocPinned(std::size_t sizeInBytes) {
|
|||||||
|
|
||||||
void CUFFreePinned(void *p) { CUDA_REPORT_IF_ERROR(cudaFreeHost(p)); }
|
void CUFFreePinned(void *p) { CUDA_REPORT_IF_ERROR(cudaFreeHost(p)); }
|
||||||
|
|
||||||
void *CUFAllocDevice(std::size_t sizeInBytes) {
|
void *CUFAllocDevice(
|
||||||
|
std::size_t sizeInBytes, [[maybe_unused]] std::int64_t asyncId) {
|
||||||
void *p;
|
void *p;
|
||||||
CUDA_REPORT_IF_ERROR(cudaMalloc(&p, sizeInBytes));
|
CUDA_REPORT_IF_ERROR(cudaMalloc(&p, sizeInBytes));
|
||||||
return p;
|
return p;
|
||||||
@ -49,7 +51,8 @@ void *CUFAllocDevice(std::size_t sizeInBytes) {
|
|||||||
|
|
||||||
void CUFFreeDevice(void *p) { CUDA_REPORT_IF_ERROR(cudaFree(p)); }
|
void CUFFreeDevice(void *p) { CUDA_REPORT_IF_ERROR(cudaFree(p)); }
|
||||||
|
|
||||||
void *CUFAllocManaged(std::size_t sizeInBytes) {
|
void *CUFAllocManaged(
|
||||||
|
std::size_t sizeInBytes, [[maybe_unused]] std::int64_t asyncId) {
|
||||||
void *p;
|
void *p;
|
||||||
CUDA_REPORT_IF_ERROR(
|
CUDA_REPORT_IF_ERROR(
|
||||||
cudaMallocManaged((void **)&p, sizeInBytes, cudaMemAttachGlobal));
|
cudaMallocManaged((void **)&p, sizeInBytes, cudaMemAttachGlobal));
|
||||||
@ -58,9 +61,10 @@ void *CUFAllocManaged(std::size_t sizeInBytes) {
|
|||||||
|
|
||||||
void CUFFreeManaged(void *p) { CUDA_REPORT_IF_ERROR(cudaFree(p)); }
|
void CUFFreeManaged(void *p) { CUDA_REPORT_IF_ERROR(cudaFree(p)); }
|
||||||
|
|
||||||
void *CUFAllocUnified(std::size_t sizeInBytes) {
|
void *CUFAllocUnified(
|
||||||
|
std::size_t sizeInBytes, [[maybe_unused]] std::int64_t asyncId) {
|
||||||
// Call alloc managed for the time being.
|
// Call alloc managed for the time being.
|
||||||
return CUFAllocManaged(sizeInBytes);
|
return CUFAllocManaged(sizeInBytes, asyncId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CUFFreeUnified(void *p) {
|
void CUFFreeUnified(void *p) {
|
||||||
|
@ -19,7 +19,8 @@ RT_EXT_API_GROUP_BEGIN
|
|||||||
|
|
||||||
Descriptor *RTDEF(CUFAllocDesciptor)(
|
Descriptor *RTDEF(CUFAllocDesciptor)(
|
||||||
std::size_t sizeInBytes, const char *sourceFile, int sourceLine) {
|
std::size_t sizeInBytes, const char *sourceFile, int sourceLine) {
|
||||||
return reinterpret_cast<Descriptor *>(CUFAllocManaged(sizeInBytes));
|
return reinterpret_cast<Descriptor *>(
|
||||||
|
CUFAllocManaged(sizeInBytes, kCudaNoStream));
|
||||||
}
|
}
|
||||||
|
|
||||||
void RTDEF(CUFFreeDesciptor)(
|
void RTDEF(CUFFreeDesciptor)(
|
||||||
|
@ -133,15 +133,17 @@ void RTDEF(AllocatableApplyMold)(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int RTDEF(AllocatableAllocate)(Descriptor &descriptor, bool hasStat,
|
int RTDEF(AllocatableAllocate)(Descriptor &descriptor, std::int64_t asyncId,
|
||||||
const Descriptor *errMsg, const char *sourceFile, int sourceLine) {
|
bool hasStat, const Descriptor *errMsg, const char *sourceFile,
|
||||||
|
int sourceLine) {
|
||||||
Terminator terminator{sourceFile, sourceLine};
|
Terminator terminator{sourceFile, sourceLine};
|
||||||
if (!descriptor.IsAllocatable()) {
|
if (!descriptor.IsAllocatable()) {
|
||||||
return ReturnError(terminator, StatInvalidDescriptor, errMsg, hasStat);
|
return ReturnError(terminator, StatInvalidDescriptor, errMsg, hasStat);
|
||||||
} else if (descriptor.IsAllocated()) {
|
} else if (descriptor.IsAllocated()) {
|
||||||
return ReturnError(terminator, StatBaseNotNull, errMsg, hasStat);
|
return ReturnError(terminator, StatBaseNotNull, errMsg, hasStat);
|
||||||
} else {
|
} else {
|
||||||
int stat{ReturnError(terminator, descriptor.Allocate(), errMsg, hasStat)};
|
int stat{
|
||||||
|
ReturnError(terminator, descriptor.Allocate(asyncId), errMsg, hasStat)};
|
||||||
if (stat == StatOk) {
|
if (stat == StatOk) {
|
||||||
if (const DescriptorAddendum * addendum{descriptor.Addendum()}) {
|
if (const DescriptorAddendum * addendum{descriptor.Addendum()}) {
|
||||||
if (const auto *derived{addendum->derivedType()}) {
|
if (const auto *derived{addendum->derivedType()}) {
|
||||||
@ -160,7 +162,7 @@ int RTDEF(AllocatableAllocateSource)(Descriptor &alloc,
|
|||||||
const Descriptor &source, bool hasStat, const Descriptor *errMsg,
|
const Descriptor &source, bool hasStat, const Descriptor *errMsg,
|
||||||
const char *sourceFile, int sourceLine) {
|
const char *sourceFile, int sourceLine) {
|
||||||
int stat{RTNAME(AllocatableAllocate)(
|
int stat{RTNAME(AllocatableAllocate)(
|
||||||
alloc, hasStat, errMsg, sourceFile, sourceLine)};
|
alloc, /*asyncId=*/-1, hasStat, errMsg, sourceFile, sourceLine)};
|
||||||
if (stat == StatOk) {
|
if (stat == StatOk) {
|
||||||
Terminator terminator{sourceFile, sourceLine};
|
Terminator terminator{sourceFile, sourceLine};
|
||||||
DoFromSourceAssign(alloc, source, terminator);
|
DoFromSourceAssign(alloc, source, terminator);
|
||||||
|
@ -50,8 +50,8 @@ static RT_API_ATTRS void AllocateOrReallocateVectorIfNeeded(
|
|||||||
initialAllocationSize(fromElements, to.ElementBytes())};
|
initialAllocationSize(fromElements, to.ElementBytes())};
|
||||||
to.GetDimension(0).SetBounds(1, allocationSize);
|
to.GetDimension(0).SetBounds(1, allocationSize);
|
||||||
RTNAME(AllocatableAllocate)
|
RTNAME(AllocatableAllocate)
|
||||||
(to, /*hasStat=*/false, /*errMsg=*/nullptr, vector.sourceFile,
|
(to, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr,
|
||||||
vector.sourceLine);
|
vector.sourceFile, vector.sourceLine);
|
||||||
to.GetDimension(0).SetBounds(1, fromElements);
|
to.GetDimension(0).SetBounds(1, fromElements);
|
||||||
vector.actualAllocationSize = allocationSize;
|
vector.actualAllocationSize = allocationSize;
|
||||||
} else {
|
} else {
|
||||||
@ -59,8 +59,8 @@ static RT_API_ATTRS void AllocateOrReallocateVectorIfNeeded(
|
|||||||
// first value: there should be no reallocation.
|
// first value: there should be no reallocation.
|
||||||
RUNTIME_CHECK(terminator, previousToElements >= fromElements);
|
RUNTIME_CHECK(terminator, previousToElements >= fromElements);
|
||||||
RTNAME(AllocatableAllocate)
|
RTNAME(AllocatableAllocate)
|
||||||
(to, /*hasStat=*/false, /*errMsg=*/nullptr, vector.sourceFile,
|
(to, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr,
|
||||||
vector.sourceLine);
|
vector.sourceFile, vector.sourceLine);
|
||||||
vector.actualAllocationSize = previousToElements;
|
vector.actualAllocationSize = previousToElements;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -163,7 +163,7 @@ RT_API_ATTRS static inline int MapAllocIdx(const Descriptor &desc) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
RT_API_ATTRS int Descriptor::Allocate() {
|
RT_API_ATTRS int Descriptor::Allocate(std::int64_t asyncId) {
|
||||||
std::size_t elementBytes{ElementBytes()};
|
std::size_t elementBytes{ElementBytes()};
|
||||||
if (static_cast<std::int64_t>(elementBytes) < 0) {
|
if (static_cast<std::int64_t>(elementBytes) < 0) {
|
||||||
// F'2023 7.4.4.2 p5: "If the character length parameter value evaluates
|
// F'2023 7.4.4.2 p5: "If the character length parameter value evaluates
|
||||||
@ -175,7 +175,7 @@ RT_API_ATTRS int Descriptor::Allocate() {
|
|||||||
// Zero size allocation is possible in Fortran and the resulting
|
// Zero size allocation is possible in Fortran and the resulting
|
||||||
// descriptor must be allocated/associated. Since std::malloc(0)
|
// descriptor must be allocated/associated. Since std::malloc(0)
|
||||||
// result is implementation defined, always allocate at least one byte.
|
// result is implementation defined, always allocate at least one byte.
|
||||||
void *p{alloc(byteSize ? byteSize : 1)};
|
void *p{alloc(byteSize ? byteSize : 1, asyncId)};
|
||||||
if (!p) {
|
if (!p) {
|
||||||
return CFI_ERROR_MEM_ALLOCATION;
|
return CFI_ERROR_MEM_ALLOCATION;
|
||||||
}
|
}
|
||||||
|
@ -192,7 +192,7 @@ func.func @test_polymorphic(%arg0: !fir.class<!fir.type<_QMtypesTt>> {fir.bindc_
|
|||||||
// CHECK: %[[VAL_35:.*]] = fir.absent !fir.box<none>
|
// CHECK: %[[VAL_35:.*]] = fir.absent !fir.box<none>
|
||||||
// CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_4]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
|
// CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_4]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
// CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_31]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
|
// CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_31]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
|
||||||
// CHECK: %[[VAL_38:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_36]], %[[VAL_34]], %[[VAL_35]], %[[VAL_37]], %[[VAL_33]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
// CHECK: %[[VAL_38:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_36]], %{{.*}}, %[[VAL_34]], %[[VAL_35]], %[[VAL_37]], %[[VAL_33]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
// CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
|
// CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
|
||||||
// CHECK: %[[VAL_40:.*]] = arith.constant 1 : index
|
// CHECK: %[[VAL_40:.*]] = arith.constant 1 : index
|
||||||
// CHECK: fir.do_loop %[[VAL_41:.*]] = %[[VAL_40]] to %[[EX1]] step %[[VAL_40]] unordered {
|
// CHECK: fir.do_loop %[[VAL_41:.*]] = %[[VAL_40]] to %[[EX1]] step %[[VAL_40]] unordered {
|
||||||
@ -276,7 +276,7 @@ func.func @test_polymorphic_expr(%arg0: !fir.class<!fir.type<_QMtypesTt>> {fir.b
|
|||||||
// CHECK: %[[VAL_36:.*]] = fir.absent !fir.box<none>
|
// CHECK: %[[VAL_36:.*]] = fir.absent !fir.box<none>
|
||||||
// CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_5]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
|
// CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_5]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
// CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_32]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
|
// CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_32]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
|
||||||
// CHECK: %[[VAL_39:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_37]], %[[VAL_35]], %[[VAL_36]], %[[VAL_38]], %[[VAL_34]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
// CHECK: %[[VAL_39:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_37]], %{{.*}}, %[[VAL_35]], %[[VAL_36]], %[[VAL_38]], %[[VAL_34]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
// CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
|
// CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
|
||||||
// CHECK: %[[VAL_41:.*]] = arith.constant 1 : index
|
// CHECK: %[[VAL_41:.*]] = arith.constant 1 : index
|
||||||
// CHECK: fir.do_loop %[[VAL_42:.*]] = %[[VAL_41]] to %[[VAL_3]] step %[[VAL_41]] unordered {
|
// CHECK: fir.do_loop %[[VAL_42:.*]] = %[[VAL_41]] to %[[VAL_3]] step %[[VAL_41]] unordered {
|
||||||
@ -329,7 +329,7 @@ func.func @test_polymorphic_expr(%arg0: !fir.class<!fir.type<_QMtypesTt>> {fir.b
|
|||||||
// CHECK: %[[VAL_85:.*]] = fir.absent !fir.box<none>
|
// CHECK: %[[VAL_85:.*]] = fir.absent !fir.box<none>
|
||||||
// CHECK: %[[VAL_86:.*]] = fir.convert %[[VAL_4]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
|
// CHECK: %[[VAL_86:.*]] = fir.convert %[[VAL_4]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
// CHECK: %[[VAL_87:.*]] = fir.convert %[[VAL_81]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
|
// CHECK: %[[VAL_87:.*]] = fir.convert %[[VAL_81]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
|
||||||
// CHECK: %[[VAL_88:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_86]], %[[VAL_84]], %[[VAL_85]], %[[VAL_87]], %[[VAL_83]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
// CHECK: %[[VAL_88:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_86]], %{{.*}}, %[[VAL_84]], %[[VAL_85]], %[[VAL_87]], %[[VAL_83]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
// CHECK: %[[VAL_89:.*]] = fir.load %[[VAL_63]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
|
// CHECK: %[[VAL_89:.*]] = fir.load %[[VAL_63]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
|
||||||
// CHECK: %[[VAL_90:.*]] = arith.constant 1 : index
|
// CHECK: %[[VAL_90:.*]] = arith.constant 1 : index
|
||||||
// CHECK: fir.do_loop %[[VAL_91:.*]] = %[[VAL_90]] to %[[VAL_3]] step %[[VAL_90]] unordered {
|
// CHECK: fir.do_loop %[[VAL_91:.*]] = %[[VAL_90]] to %[[VAL_3]] step %[[VAL_90]] unordered {
|
||||||
|
@ -469,6 +469,6 @@ contains
|
|||||||
end module
|
end module
|
||||||
|
|
||||||
! CHECK-LABEL: func.func @_QMacc_declare_post_action_statPinit()
|
! CHECK-LABEL: func.func @_QMacc_declare_post_action_statPinit()
|
||||||
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEx_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEx_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
! CHECK: fir.if
|
! CHECK: fir.if
|
||||||
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEy_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEy_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
@ -267,7 +267,7 @@ contains
|
|||||||
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
|
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
|
||||||
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[P_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> none
|
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[P_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> none
|
||||||
! CHECK: %[[P_CAST:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[P_CAST:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[P_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[P_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
! CHECK: %[[TYPE_DESC_P1:.*]] = fir.type_desc !fir.type<_QMpolyTp1{a:i32,b:i32}>
|
! CHECK: %[[TYPE_DESC_P1:.*]] = fir.type_desc !fir.type<_QMpolyTp1{a:i32,b:i32}>
|
||||||
! CHECK: %[[C1_CAST:.*]] = fir.convert %[[C1_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C1_CAST:.*]] = fir.convert %[[C1_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
@ -276,7 +276,7 @@ contains
|
|||||||
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
|
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
|
||||||
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C1_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> none
|
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C1_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> none
|
||||||
! CHECK: %[[C1_CAST:.*]] = fir.convert %[[C1_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C1_CAST:.*]] = fir.convert %[[C1_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C1_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C1_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
! CHECK: %[[TYPE_DESC_P2:.*]] = fir.type_desc !fir.type<_QMpolyTp2{p1:!fir.type<_QMpolyTp1{a:i32,b:i32}>,c:i32}>
|
! CHECK: %[[TYPE_DESC_P2:.*]] = fir.type_desc !fir.type<_QMpolyTp2{p1:!fir.type<_QMpolyTp1{a:i32,b:i32}>,c:i32}>
|
||||||
! CHECK: %[[C2_CAST:.*]] = fir.convert %[[C2_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C2_CAST:.*]] = fir.convert %[[C2_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
@ -285,7 +285,7 @@ contains
|
|||||||
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
|
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
|
||||||
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C2_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> none
|
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C2_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> none
|
||||||
! CHECK: %[[C2_CAST:.*]] = fir.convert %[[C2_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C2_CAST:.*]] = fir.convert %[[C2_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C2_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C2_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
! CHECK: %[[TYPE_DESC_P1:.*]] = fir.type_desc !fir.type<_QMpolyTp1{a:i32,b:i32}>
|
! CHECK: %[[TYPE_DESC_P1:.*]] = fir.type_desc !fir.type<_QMpolyTp1{a:i32,b:i32}>
|
||||||
! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
@ -300,7 +300,7 @@ contains
|
|||||||
! CHECK: %[[C10_I64:.*]] = fir.convert %[[C10]] : (i32) -> i64
|
! CHECK: %[[C10_I64:.*]] = fir.convert %[[C10]] : (i32) -> i64
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[C3_CAST]], %[[C0]], %[[C1_I64]], %[[C10_I64]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[C3_CAST]], %[[C0]], %[[C1_I64]], %[[C10_I64]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
||||||
! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
! CHECK: %[[TYPE_DESC_P2:.*]] = fir.type_desc !fir.type<_QMpolyTp2{p1:!fir.type<_QMpolyTp1{a:i32,b:i32}>,c:i32}>
|
! CHECK: %[[TYPE_DESC_P2:.*]] = fir.type_desc !fir.type<_QMpolyTp2{p1:!fir.type<_QMpolyTp1{a:i32,b:i32}>,c:i32}>
|
||||||
! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
@ -316,7 +316,7 @@ contains
|
|||||||
! CHECK: %[[C20_I64:.*]] = fir.convert %[[C20]] : (i32) -> i64
|
! CHECK: %[[C20_I64:.*]] = fir.convert %[[C20]] : (i32) -> i64
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[C4_CAST]], %[[C0]], %[[C1_I64]], %[[C20_I64]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[C4_CAST]], %[[C0]], %[[C1_I64]], %[[C20_I64]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
||||||
! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
! CHECK: %[[C1_LOAD1:.*]] = fir.load %[[C1_DECL]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>
|
! CHECK: %[[C1_LOAD1:.*]] = fir.load %[[C1_DECL]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>
|
||||||
! CHECK: fir.dispatch "proc1"(%[[C1_LOAD1]] : !fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>)
|
! CHECK: fir.dispatch "proc1"(%[[C1_LOAD1]] : !fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>)
|
||||||
@ -390,7 +390,7 @@ contains
|
|||||||
! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32
|
! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableInitIntrinsicForAllocate(%[[BOX_NONE]], %[[CAT]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, i32, i32, i32, i32) -> none
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableInitIntrinsicForAllocate(%[[BOX_NONE]], %[[CAT]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, i32, i32, i32, i32) -> none
|
||||||
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<none>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<none>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[PTR_DECL]]#1 : (!fir.ref<!fir.class<!fir.ptr<none>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[PTR_DECL]]#1 : (!fir.ref<!fir.class<!fir.ptr<none>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %[[CAT:.*]] = arith.constant 1 : i32
|
! CHECK: %[[CAT:.*]] = arith.constant 1 : i32
|
||||||
@ -573,7 +573,7 @@ contains
|
|||||||
! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32
|
! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[A_NONE]], %[[LEN]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i32, i32, i32) -> none
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[A_NONE]], %[[LEN]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i32, i32, i32) -> none
|
||||||
! CHECK: %[[A_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<none>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[A_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref<!fir.class<!fir.heap<none>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
end module
|
end module
|
||||||
|
|
||||||
@ -592,17 +592,17 @@ end
|
|||||||
! LLVM-LABEL: define void @_QMpolyPtest_allocatable()
|
! LLVM-LABEL: define void @_QMpolyPtest_allocatable()
|
||||||
|
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0)
|
||||||
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 -1, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0)
|
||||||
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 -1, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 0, i32 0)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 0, i32 0)
|
||||||
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 -1, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 1, i32 0)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 1, i32 0)
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 10)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 10)
|
||||||
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 -1, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 1, i32 0)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 1, i32 0)
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 20)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 20)
|
||||||
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 -1, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
||||||
! LLVM-COUNT-2: call void %{{[0-9]*}}()
|
! LLVM-COUNT-2: call void %{{[0-9]*}}()
|
||||||
|
|
||||||
! LLVM: call void @llvm.memcpy.p0.p0.i32
|
! LLVM: call void @llvm.memcpy.p0.p0.i32
|
||||||
@ -683,5 +683,5 @@ end
|
|||||||
! LLVM: store { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QMpolyTp1, ptr null, i32 1) to i64), i32 20240719, i8 0, i8 42, i8 2, i8 1, ptr @_QMpolyEXdtXp1, [1 x i64] zeroinitializer }, ptr %[[ALLOCA1:[0-9]*]]
|
! LLVM: store { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QMpolyTp1, ptr null, i32 1) to i64), i32 20240719, i8 0, i8 42, i8 2, i8 1, ptr @_QMpolyEXdtXp1, [1 x i64] zeroinitializer }, ptr %[[ALLOCA1:[0-9]*]]
|
||||||
! LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[ALLOCA2:[0-9]+]], ptr %[[ALLOCA1]], i32 40, i1 false)
|
! LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[ALLOCA2:[0-9]+]], ptr %[[ALLOCA1]], i32 40, i1 false)
|
||||||
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %[[ALLOCA2]], ptr @_QMpolyEXdtXp1, i32 0, i32 0)
|
! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %[[ALLOCA2]], ptr @_QMpolyEXdtXp1, i32 0, i32 0)
|
||||||
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %[[ALLOCA2]], i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %[[ALLOCA2]], i64 -1, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
||||||
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableDeallocatePolymorphic(ptr %[[ALLOCA2]], ptr {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableDeallocatePolymorphic(ptr %[[ALLOCA2]], ptr {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
|
||||||
|
@ -31,7 +31,7 @@ subroutine foo()
|
|||||||
! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[xBoxCast2]], %c0{{.*}}, %[[xlbCast]], %[[xubCast]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[xBoxCast2]], %c0{{.*}}, %[[xlbCast]], %[[xubCast]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
||||||
! CHECK-DAG: %[[xBoxCast3:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK-DAG: %[[xBoxCast3:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK-DAG: %[[sourceFile:.*]] = fir.convert %{{.*}} -> !fir.ref<i8>
|
! CHECK-DAG: %[[sourceFile:.*]] = fir.convert %{{.*}} -> !fir.ref<i8>
|
||||||
! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[xBoxCast3]], %false{{.*}}, %[[errMsg]], %[[sourceFile]], %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[xBoxCast3]], %c-1{{.*}}, %false{{.*}}, %[[errMsg]], %[[sourceFile]], %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
! Simply check that we are emitting the right numebr of set bound for y and z. Otherwise, this is just like x.
|
! Simply check that we are emitting the right numebr of set bound for y and z. Otherwise, this is just like x.
|
||||||
! CHECK: fir.convert %[[yBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: fir.convert %[[yBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
@ -180,4 +180,4 @@ end subroutine
|
|||||||
! CHECK: %[[M_BOX_NONE:.*]] = fir.convert %[[EMBOX_M]] : (!fir.box<!fir.array<10xi32>>) -> !fir.box<none>
|
! CHECK: %[[M_BOX_NONE:.*]] = fir.convert %[[EMBOX_M]] : (!fir.box<!fir.array<10xi32>>) -> !fir.box<none>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[M_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32) -> none
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[M_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32) -> none
|
||||||
! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
@ -16,7 +16,7 @@ end subroutine
|
|||||||
! CHECK: %[[A_REF_BOX_NONE1:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[A_REF_BOX_NONE1:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_REF_BOX_NONE1]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32) -> none
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_REF_BOX_NONE1]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32) -> none
|
||||||
! CHECK: %[[A_REF_BOX_NONE2:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[A_REF_BOX_NONE2:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_REF_BOX_NONE2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_REF_BOX_NONE2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
|
||||||
subroutine array_scalar_mold_allocation()
|
subroutine array_scalar_mold_allocation()
|
||||||
real, allocatable :: a(:)
|
real, allocatable :: a(:)
|
||||||
@ -40,4 +40,4 @@ end subroutine array_scalar_mold_allocation
|
|||||||
! CHECK: %[[REF_BOX_A1:.*]] = fir.convert %1 : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[REF_BOX_A1:.*]] = fir.convert %1 : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[REF_BOX_A1]], {{.*}},{{.*}}, {{.*}}) fastmath<contract> : (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[REF_BOX_A1]], {{.*}},{{.*}}, {{.*}}) fastmath<contract> : (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
|
||||||
! CHECK: %[[REF_BOX_A2:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[REF_BOX_A2:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[REF_BOX_A2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[REF_BOX_A2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
|
@ -1154,11 +1154,11 @@ end program
|
|||||||
! CHECK-LABEL: func.func @_QQmain() attributes {fir.bindc_name = "test"} {
|
! CHECK-LABEL: func.func @_QQmain() attributes {fir.bindc_name = "test"} {
|
||||||
! CHECK: %[[ADDR_O:.*]] = fir.address_of(@_QFEo) : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>
|
! CHECK: %[[ADDR_O:.*]] = fir.address_of(@_QFEo) : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>
|
||||||
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ADDR_O]] : (!fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>) -> !fir.ref<!fir.box<none>>
|
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ADDR_O]] : (!fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>) -> !fir.ref<!fir.box<none>>
|
||||||
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
|
||||||
! CHECK: %[[O:.*]] = fir.load %[[ADDR_O]] : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>
|
! CHECK: %[[O:.*]] = fir.load %[[ADDR_O]] : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>
|
||||||
! CHECK: %[[FIELD_INNER:.*]] = fir.field_index inner, !fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>
|
! CHECK: %[[FIELD_INNER:.*]] = fir.field_index inner, !fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>
|
||||||
! CHECK: %[[COORD_INNER:.*]] = fir.coordinate_of %[[O]], %[[FIELD_INNER]] : (!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>, !fir.field) -> !fir.ref<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>
|
! CHECK: %[[COORD_INNER:.*]] = fir.coordinate_of %[[O]], %[[FIELD_INNER]] : (!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>, !fir.field) -> !fir.ref<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>
|
||||||
! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} unordered iter_args(%arg1 = %9) -> (!fir.array<5x!fir.logical<4>>) {
|
! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} unordered iter_args(%arg1 = %{{.*}}) -> (!fir.array<5x!fir.logical<4>>) {
|
||||||
! CHECK: %[[EMBOXED:.*]] = fir.embox %[[COORD_INNER]] : (!fir.ref<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>) -> !fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>
|
! CHECK: %[[EMBOXED:.*]] = fir.embox %[[COORD_INNER]] : (!fir.ref<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>) -> !fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>
|
||||||
! CHECK: %{{.*}} = fir.call @_QMpolymorphic_testPlt(%17, %[[EMBOXED]]) {{.*}} : (!fir.ref<i32>, !fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>) -> !fir.logical<4>
|
! CHECK: %{{.*}} = fir.call @_QMpolymorphic_testPlt(%{{.*}}, %[[EMBOXED]]) {{.*}} : (!fir.ref<i32>, !fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>) -> !fir.logical<4>
|
||||||
! CHECK: }
|
! CHECK: }
|
||||||
|
@ -42,7 +42,8 @@ TEST(AllocatableCUFTest, SimpleDeviceAllocatable) {
|
|||||||
CUDA_REPORT_IF_ERROR(cudaMalloc(&device_desc, a->SizeInBytes()));
|
CUDA_REPORT_IF_ERROR(cudaMalloc(&device_desc, a->SizeInBytes()));
|
||||||
|
|
||||||
RTNAME(AllocatableAllocate)
|
RTNAME(AllocatableAllocate)
|
||||||
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
(*a, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__,
|
||||||
|
__LINE__);
|
||||||
EXPECT_TRUE(a->IsAllocated());
|
EXPECT_TRUE(a->IsAllocated());
|
||||||
RTNAME(CUFDescriptorSync)(device_desc, a.get(), __FILE__, __LINE__);
|
RTNAME(CUFDescriptorSync)(device_desc, a.get(), __FILE__, __LINE__);
|
||||||
cudaDeviceSynchronize();
|
cudaDeviceSynchronize();
|
||||||
|
@ -35,7 +35,8 @@ TEST(AllocatableCUFTest, SimpleDeviceAllocate) {
|
|||||||
EXPECT_FALSE(a->HasAddendum());
|
EXPECT_FALSE(a->HasAddendum());
|
||||||
RTNAME(AllocatableSetBounds)(*a, 0, 1, 10);
|
RTNAME(AllocatableSetBounds)(*a, 0, 1, 10);
|
||||||
RTNAME(AllocatableAllocate)
|
RTNAME(AllocatableAllocate)
|
||||||
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
(*a, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__,
|
||||||
|
__LINE__);
|
||||||
EXPECT_TRUE(a->IsAllocated());
|
EXPECT_TRUE(a->IsAllocated());
|
||||||
RTNAME(AllocatableDeallocate)
|
RTNAME(AllocatableDeallocate)
|
||||||
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
||||||
@ -53,7 +54,8 @@ TEST(AllocatableCUFTest, SimplePinnedAllocate) {
|
|||||||
EXPECT_FALSE(a->HasAddendum());
|
EXPECT_FALSE(a->HasAddendum());
|
||||||
RTNAME(AllocatableSetBounds)(*a, 0, 1, 10);
|
RTNAME(AllocatableSetBounds)(*a, 0, 1, 10);
|
||||||
RTNAME(AllocatableAllocate)
|
RTNAME(AllocatableAllocate)
|
||||||
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
(*a, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__,
|
||||||
|
__LINE__);
|
||||||
EXPECT_TRUE(a->IsAllocated());
|
EXPECT_TRUE(a->IsAllocated());
|
||||||
RTNAME(AllocatableDeallocate)
|
RTNAME(AllocatableDeallocate)
|
||||||
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
||||||
|
@ -51,7 +51,8 @@ TEST(MemoryCUFTest, CUFDataTransferDescDesc) {
|
|||||||
EXPECT_EQ((int)kDeviceAllocatorPos, dev->GetAllocIdx());
|
EXPECT_EQ((int)kDeviceAllocatorPos, dev->GetAllocIdx());
|
||||||
RTNAME(AllocatableSetBounds)(*dev, 0, 1, 10);
|
RTNAME(AllocatableSetBounds)(*dev, 0, 1, 10);
|
||||||
RTNAME(AllocatableAllocate)
|
RTNAME(AllocatableAllocate)
|
||||||
(*dev, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
|
(*dev, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__,
|
||||||
|
__LINE__);
|
||||||
EXPECT_TRUE(dev->IsAllocated());
|
EXPECT_TRUE(dev->IsAllocated());
|
||||||
|
|
||||||
// Create temp array to transfer to device.
|
// Create temp array to transfer to device.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user