
This prepares it for the regalloc work. Part of it is making model evaluation accross 'development' and 'release' scenarios more reusable. This patch: - extends support to tensors of any shape (not just scalars, like we had in the inliner -Oz case). While the tensor shape can be anything, we assume row-major layout and expose the tensor as a buffer. - exposes the NoInferenceModelRunner, which we use in the 'development' mode to keep the evaluation code path consistent and simplify logging, as we'll want to reuse it in the regalloc case. Differential Revision: https://reviews.llvm.org/D115306
33 lines
1.3 KiB
C++
33 lines
1.3 KiB
C++
//===- NoInferenceModelRunner.cpp - noop ML model runner ----------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// A pseudo model runner. We use it to store feature values when collecting
|
|
// logs for the default policy, in 'development' mode, but never ask it to
|
|
// 'run'.
|
|
//===----------------------------------------------------------------------===//
|
|
#include "llvm/Config/config.h"
|
|
#if defined(LLVM_HAVE_TF_API)
|
|
|
|
#include "llvm/Analysis/NoInferenceModelRunner.h"
|
|
#include "llvm/Analysis/Utils/TFUtils.h"
|
|
|
|
using namespace llvm;
|
|
|
|
NoInferenceModelRunner::NoInferenceModelRunner(
|
|
LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs)
|
|
: MLModelRunner(Ctx) {
|
|
ValuesBuffer.reserve(Inputs.size());
|
|
for (const auto &TS : Inputs)
|
|
ValuesBuffer.push_back(std::make_unique<char[]>(TS.getElementCount() *
|
|
TS.getElementByteSize()));
|
|
}
|
|
|
|
void *NoInferenceModelRunner::getTensorUntyped(size_t Index) {
|
|
return ValuesBuffer[Index].get();
|
|
}
|
|
#endif // defined(LLVM_HAVE_TF_API)
|