//===-- lib/Evaluate/fold-integer.cpp -------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "fold-implementation.h" #include "fold-reduction.h" #include "flang/Evaluate/check-expression.h" namespace Fortran::evaluate { // Given a collection of ConstantSubscripts values, package them as a Constant. // Return scalar value if asScalar == true and shape-dim array otherwise. template Expr PackageConstantBounds( const ConstantSubscripts &&bounds, bool asScalar = false) { if (asScalar) { return Expr{Constant{bounds.at(0)}}; } else { // As rank-dim array const int rank{GetRank(bounds)}; std::vector> packed(rank); std::transform(bounds.begin(), bounds.end(), packed.begin(), [](ConstantSubscript x) { return Scalar(x); }); return Expr{Constant{std::move(packed), ConstantSubscripts{rank}}}; } } // Class to retrieve the constant bound of an expression which is an // array that devolves to a type of Constant class GetConstantArrayBoundHelper { public: template static Expr GetLbound( const Expr &array, std::optional dim) { return PackageConstantBounds( GetConstantArrayBoundHelper(dim, /*getLbound=*/true).Get(array), dim.has_value()); } template static Expr GetUbound( const Expr &array, std::optional dim) { return PackageConstantBounds( GetConstantArrayBoundHelper(dim, /*getLbound=*/false).Get(array), dim.has_value()); } private: GetConstantArrayBoundHelper( std::optional dim, bool getLbound) : dim_{dim}, getLbound_{getLbound} {} template ConstantSubscripts Get(const T &) { // The method is needed for template expansion, but we should never get // here in practice. CHECK(false); return {0}; } template ConstantSubscripts Get(const Constant &x) { if (getLbound_) { // Return the lower bound if (dim_) { return {x.lbounds().at(*dim_)}; } else { return x.lbounds(); } } else { // Return the upper bound if (arrayFromParenthesesExpr) { // Underlying array comes from (x) expression - return shapes if (dim_) { return {x.shape().at(*dim_)}; } else { return x.shape(); } } else { return x.ComputeUbounds(dim_); } } } template ConstantSubscripts Get(const Parentheses &x) { // Cause of temp variable inside parentheses - return [1, ... 1] for lower // bounds and shape for upper bounds if (getLbound_) { return ConstantSubscripts(x.Rank(), ConstantSubscript{1}); } else { // Indicate that underlying array comes from parentheses expression. // Continue to unwrap expression until we hit a constant arrayFromParenthesesExpr = true; return Get(x.left()); } } template ConstantSubscripts Get(const Expr &x) { // recurse through Expr'a until we hit a constant return common::visit([&](const auto &inner) { return Get(inner); }, // [&](const auto &) { return 0; }, x.u); } const std::optional dim_; const bool getLbound_; bool arrayFromParenthesesExpr{false}; }; template Expr> LBOUND(FoldingContext &context, FunctionRef> &&funcRef) { using T = Type; ActualArguments &args{funcRef.arguments()}; if (const auto *array{UnwrapExpr>(args[0])}) { if (int rank{array->Rank()}; rank > 0) { std::optional dim; if (funcRef.Rank() == 0) { // Optional DIM= argument is present: result is scalar. if (auto dim64{GetInt64Arg(args[1])}) { if (*dim64 < 1 || *dim64 > rank) { context.messages().Say("DIM=%jd dimension is out of range for " "rank-%d array"_err_en_US, *dim64, rank); return MakeInvalidIntrinsic(std::move(funcRef)); } else { dim = *dim64 - 1; // 1-based to 0-based } } else { // DIM= is present but not constant return Expr{std::move(funcRef)}; } } bool lowerBoundsAreOne{true}; if (auto named{ExtractNamedEntity(*array)}) { const Symbol &symbol{named->GetLastSymbol()}; if (symbol.Rank() == rank) { lowerBoundsAreOne = false; if (dim) { if (auto lb{GetLBOUND(context, *named, *dim)}) { return Fold(context, ConvertToType(std::move(*lb))); } } else if (auto extents{ AsExtentArrayExpr(GetLBOUNDs(context, *named))}) { return Fold(context, ConvertToType(Expr{std::move(*extents)})); } } else { lowerBoundsAreOne = symbol.Rank() == 0; // LBOUND(array%component) } } if (IsActuallyConstant(*array)) { return GetConstantArrayBoundHelper::GetLbound(*array, dim); } if (lowerBoundsAreOne) { ConstantSubscripts ones(rank, ConstantSubscript{1}); return PackageConstantBounds(std::move(ones), dim.has_value()); } } } return Expr{std::move(funcRef)}; } template Expr> UBOUND(FoldingContext &context, FunctionRef> &&funcRef) { using T = Type; ActualArguments &args{funcRef.arguments()}; if (auto *array{UnwrapExpr>(args[0])}) { if (int rank{array->Rank()}; rank > 0) { std::optional dim; if (funcRef.Rank() == 0) { // Optional DIM= argument is present: result is scalar. if (auto dim64{GetInt64Arg(args[1])}) { if (*dim64 < 1 || *dim64 > rank) { context.messages().Say("DIM=%jd dimension is out of range for " "rank-%d array"_err_en_US, *dim64, rank); return MakeInvalidIntrinsic(std::move(funcRef)); } else { dim = *dim64 - 1; // 1-based to 0-based } } else { // DIM= is present but not constant return Expr{std::move(funcRef)}; } } bool takeBoundsFromShape{true}; if (auto named{ExtractNamedEntity(*array)}) { const Symbol &symbol{named->GetLastSymbol()}; if (symbol.Rank() == rank) { takeBoundsFromShape = false; if (dim) { if (semantics::IsAssumedSizeArray(symbol) && *dim == rank - 1) { context.messages().Say("DIM=%jd dimension is out of range for " "rank-%d assumed-size array"_err_en_US, rank, rank); return MakeInvalidIntrinsic(std::move(funcRef)); } else if (auto ub{GetUBOUND(context, *named, *dim)}) { return Fold(context, ConvertToType(std::move(*ub))); } } else { Shape ubounds{GetUBOUNDs(context, *named)}; if (semantics::IsAssumedSizeArray(symbol)) { CHECK(!ubounds.back()); ubounds.back() = ExtentExpr{-1}; } if (auto extents{AsExtentArrayExpr(ubounds)}) { return Fold(context, ConvertToType(Expr{std::move(*extents)})); } } } else { takeBoundsFromShape = symbol.Rank() == 0; // UBOUND(array%component) } } if (IsActuallyConstant(*array)) { return GetConstantArrayBoundHelper::GetUbound(*array, dim); } if (takeBoundsFromShape) { if (auto shape{GetContextFreeShape(context, *array)}) { if (dim) { if (auto &dimSize{shape->at(*dim)}) { return Fold(context, ConvertToType(Expr{std::move(*dimSize)})); } } else if (auto shapeExpr{AsExtentArrayExpr(*shape)}) { return Fold(context, ConvertToType(std::move(*shapeExpr))); } } } } } return Expr{std::move(funcRef)}; } // COUNT() template static Expr FoldCount(FoldingContext &context, FunctionRef &&ref) { static_assert(T::category == TypeCategory::Integer); ActualArguments &arg{ref.arguments()}; if (const Constant *mask{arg.empty() ? nullptr : Folder{context}.Folding(arg[0])}) { std::optional dim; if (CheckReductionDIM(dim, context, arg, 1, mask->Rank())) { auto accumulator{[&](Scalar &element, const ConstantSubscripts &at) { if (mask->At(at).IsTrue()) { element = element.AddSigned(Scalar{1}).value; } }}; return Expr{DoReduction(*mask, dim, Scalar{}, accumulator)}; } } return Expr{std::move(ref)}; } // FINDLOC(), MAXLOC(), & MINLOC() enum class WhichLocation { Findloc, Maxloc, Minloc }; template class LocationHelper { public: LocationHelper( DynamicType &&type, ActualArguments &arg, FoldingContext &context) : type_{type}, arg_{arg}, context_{context} {} using Result = std::optional>; using Types = std::conditional_t; template Result Test() const { if (T::category != type_.category() || T::kind != type_.kind()) { return std::nullopt; } CHECK(arg_.size() == (WHICH == WhichLocation::Findloc ? 6 : 5)); Folder folder{context_}; Constant *array{folder.Folding(arg_[0])}; if (!array) { return std::nullopt; } std::optional> value; if constexpr (WHICH == WhichLocation::Findloc) { if (const Constant *p{folder.Folding(arg_[1])}) { value.emplace(*p); } else { return std::nullopt; } } std::optional dim; Constant *mask{ GetReductionMASK(arg_[maskArg], array->shape(), context_)}; if ((!mask && arg_[maskArg]) || !CheckReductionDIM(dim, context_, arg_, dimArg, array->Rank())) { return std::nullopt; } bool back{false}; if (arg_[backArg]) { const auto *backConst{ Folder{context_}.Folding(arg_[backArg])}; if (backConst) { back = backConst->GetScalarValue().value().IsTrue(); } else { return std::nullopt; } } const RelationalOperator relation{WHICH == WhichLocation::Findloc ? RelationalOperator::EQ : WHICH == WhichLocation::Maxloc ? (back ? RelationalOperator::GE : RelationalOperator::GT) : back ? RelationalOperator::LE : RelationalOperator::LT}; // Use lower bounds of 1 exclusively. array->SetLowerBoundsToOne(); ConstantSubscripts at{array->lbounds()}, maskAt, resultIndices, resultShape; if (mask) { if (auto scalarMask{mask->GetScalarValue()}) { // Convert into array in case of scalar MASK= (for // MAXLOC/MINLOC/FINDLOC mask should be be conformable) ConstantSubscript n{GetSize(array->shape())}; std::vector> mask_elements( n, Scalar{scalarMask.value()}); *mask = Constant{ std::move(mask_elements), ConstantSubscripts{n}}; } mask->SetLowerBoundsToOne(); maskAt = mask->lbounds(); } if (dim) { // DIM= if (*dim < 1 || *dim > array->Rank()) { context_.messages().Say("DIM=%d is out of range"_err_en_US, *dim); return std::nullopt; } int zbDim{*dim - 1}; resultShape = array->shape(); resultShape.erase( resultShape.begin() + zbDim); // scalar if array is vector ConstantSubscript dimLength{array->shape()[zbDim]}; ConstantSubscript n{GetSize(resultShape)}; for (ConstantSubscript j{0}; j < n; ++j) { ConstantSubscript hit{0}; if constexpr (WHICH == WhichLocation::Maxloc || WHICH == WhichLocation::Minloc) { value.reset(); } for (ConstantSubscript k{0}; k < dimLength; ++k, ++at[zbDim], mask && ++maskAt[zbDim]) { if ((!mask || mask->At(maskAt).IsTrue()) && IsHit(array->At(at), value, relation)) { hit = at[zbDim]; if constexpr (WHICH == WhichLocation::Findloc) { if (!back) { break; } } } } resultIndices.emplace_back(hit); at[zbDim] = std::max(dimLength, 1); array->IncrementSubscripts(at); at[zbDim] = 1; if (mask) { maskAt[zbDim] = mask->lbounds()[zbDim] + std::max(dimLength, 1) - 1; mask->IncrementSubscripts(maskAt); maskAt[zbDim] = mask->lbounds()[zbDim]; } } } else { // no DIM= resultShape = ConstantSubscripts{array->Rank()}; // always a vector ConstantSubscript n{GetSize(array->shape())}; resultIndices = ConstantSubscripts(array->Rank(), 0); for (ConstantSubscript j{0}; j < n; ++j, array->IncrementSubscripts(at), mask && mask->IncrementSubscripts(maskAt)) { if ((!mask || mask->At(maskAt).IsTrue()) && IsHit(array->At(at), value, relation)) { resultIndices = at; if constexpr (WHICH == WhichLocation::Findloc) { if (!back) { break; } } } } } std::vector> resultElements; for (ConstantSubscript j : resultIndices) { resultElements.emplace_back(j); } return Constant{ std::move(resultElements), std::move(resultShape)}; } private: template bool IsHit(typename Constant::Element element, std::optional> &value, [[maybe_unused]] RelationalOperator relation) const { std::optional> cmp; bool result{true}; if (value) { if constexpr (T::category == TypeCategory::Logical) { // array(at) .EQV. value? static_assert(WHICH == WhichLocation::Findloc); cmp.emplace(ConvertToType( Expr{LogicalOperation{LogicalOperator::Eqv, Expr{Constant{element}}, Expr{Constant{*value}}}})); } else { // compare array(at) to value cmp.emplace(PackageRelation(relation, Expr{Constant{element}}, Expr{Constant{*value}})); } Expr folded{Fold(context_, std::move(*cmp))}; result = GetScalarConstantValue(folded).value().IsTrue(); } else { // first unmasked element for MAXLOC/MINLOC - always take it } if constexpr (WHICH == WhichLocation::Maxloc || WHICH == WhichLocation::Minloc) { if (result) { value.emplace(std::move(element)); } } return result; } static constexpr int dimArg{WHICH == WhichLocation::Findloc ? 2 : 1}; static constexpr int maskArg{dimArg + 1}; static constexpr int backArg{maskArg + 2}; DynamicType type_; ActualArguments &arg_; FoldingContext &context_; }; template static std::optional> FoldLocationCall( ActualArguments &arg, FoldingContext &context) { if (arg[0]) { if (auto type{arg[0]->GetType()}) { if constexpr (which == WhichLocation::Findloc) { // Both ARRAY and VALUE are susceptible to conversion to a common // comparison type. if (arg[1]) { if (auto valType{arg[1]->GetType()}) { if (auto compareType{ComparisonType(*type, *valType)}) { type = compareType; } } } } return common::SearchTypes( LocationHelper{std::move(*type), arg, context}); } } return std::nullopt; } template static Expr FoldLocation(FoldingContext &context, FunctionRef &&ref) { static_assert(T::category == TypeCategory::Integer); if (std::optional> found{ FoldLocationCall(ref.arguments(), context)}) { return Expr{Fold( context, ConvertToType(Expr{std::move(*found)}))}; } else { return Expr{std::move(ref)}; } } // for IALL, IANY, & IPARITY template static Expr FoldBitReduction(FoldingContext &context, FunctionRef &&ref, Scalar (Scalar::*operation)(const Scalar &) const, Scalar identity) { static_assert(T::category == TypeCategory::Integer); std::optional dim; if (std::optional> array{ ProcessReductionArgs(context, ref.arguments(), dim, identity, /*ARRAY=*/0, /*DIM=*/1, /*MASK=*/2)}) { auto accumulator{[&](Scalar &element, const ConstantSubscripts &at) { element = (element.*operation)(array->At(at)); }}; return Expr{DoReduction(*array, dim, identity, accumulator)}; } return Expr{std::move(ref)}; } template Expr> FoldIntrinsicFunction( FoldingContext &context, FunctionRef> &&funcRef) { using T = Type; using Int4 = Type; ActualArguments &args{funcRef.arguments()}; auto *intrinsic{std::get_if(&funcRef.proc().u)}; CHECK(intrinsic); std::string name{intrinsic->name}; if (name == "abs") { // incl. babs, iiabs, jiaabs, & kiabs return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([&context](const Scalar &i) -> Scalar { typename Scalar::ValueWithOverflow j{i.ABS()}; if (j.overflow) { context.messages().Say( "abs(integer(kind=%d)) folding overflowed"_warn_en_US, KIND); } return j.value; })); } else if (name == "bit_size") { return Expr{Scalar::bits}; } else if (name == "ceiling" || name == "floor" || name == "nint") { if (const auto *cx{UnwrapExpr>(args[0])}) { // NINT rounds ties away from zero, not to even common::RoundingMode mode{name == "ceiling" ? common::RoundingMode::Up : name == "floor" ? common::RoundingMode::Down : common::RoundingMode::TiesAwayFromZero}; return common::visit( [&](const auto &kx) { using TR = ResultType; return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([&](const Scalar &x) { auto y{x.template ToInteger>(mode)}; if (y.flags.test(RealFlag::Overflow)) { context.messages().Say( "%s intrinsic folding overflow"_warn_en_US, name); } return y.value; })); }, cx->u); } } else if (name == "count") { return FoldCount(context, std::move(funcRef)); } else if (name == "digits") { if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar>::DIGITS; }, cx->u)}; } else if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar>::DIGITS; }, cx->u)}; } else if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar::Part>::DIGITS; }, cx->u)}; } } else if (name == "dim") { return FoldElementalIntrinsic( context, std::move(funcRef), &Scalar::DIM); } else if (name == "dot_product") { return FoldDotProduct(context, std::move(funcRef)); } else if (name == "dshiftl" || name == "dshiftr") { const auto fptr{ name == "dshiftl" ? &Scalar::DSHIFTL : &Scalar::DSHIFTR}; // Third argument can be of any kind. However, it must be smaller or equal // than BIT_SIZE. It can be converted to Int4 to simplify. return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc( [&fptr](const Scalar &i, const Scalar &j, const Scalar &shift) -> Scalar { return std::invoke(fptr, i, j, static_cast(shift.ToInt64())); })); } else if (name == "exponent") { if (auto *sx{UnwrapExpr>(args[0])}) { return common::visit( [&funcRef, &context](const auto &x) -> Expr { using TR = typename std::decay_t::Result; return FoldElementalIntrinsic(context, std::move(funcRef), &Scalar::template EXPONENT>); }, sx->u); } else { DIE("exponent argument must be real"); } } else if (name == "findloc") { return FoldLocation(context, std::move(funcRef)); } else if (name == "huge") { return Expr{Scalar::HUGE()}; } else if (name == "iachar" || name == "ichar") { auto *someChar{UnwrapExpr>(args[0])}; CHECK(someChar); if (auto len{ToInt64(someChar->LEN())}) { if (len.value() != 1) { // Do not die, this was not checked before context.messages().Say( "Character in intrinsic function %s must have length one"_warn_en_US, name); } else { return common::visit( [&funcRef, &context](const auto &str) -> Expr { using Char = typename std::decay_t::Result; return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([](const Scalar &c) { return Scalar{CharacterUtils::ICHAR(c)}; })); }, someChar->u); } } } else if (name == "iand" || name == "ior" || name == "ieor") { auto fptr{&Scalar::IAND}; if (name == "iand") { // done in fptr declaration } else if (name == "ior") { fptr = &Scalar::IOR; } else if (name == "ieor") { fptr = &Scalar::IEOR; } else { common::die("missing case to fold intrinsic function %s", name.c_str()); } return FoldElementalIntrinsic( context, std::move(funcRef), ScalarFunc(fptr)); } else if (name == "iall") { return FoldBitReduction( context, std::move(funcRef), &Scalar::IAND, Scalar{}.NOT()); } else if (name == "iany") { return FoldBitReduction( context, std::move(funcRef), &Scalar::IOR, Scalar{}); } else if (name == "ibclr" || name == "ibset") { // Second argument can be of any kind. However, it must be smaller // than BIT_SIZE. It can be converted to Int4 to simplify. auto fptr{&Scalar::IBCLR}; if (name == "ibclr") { // done in fptr definition } else if (name == "ibset") { fptr = &Scalar::IBSET; } else { common::die("missing case to fold intrinsic function %s", name.c_str()); } return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([&](const Scalar &i, const Scalar &pos) -> Scalar { auto posVal{static_cast(pos.ToInt64())}; if (posVal < 0) { context.messages().Say( "bit position for %s (%d) is negative"_err_en_US, name, posVal); } else if (posVal >= i.bits) { context.messages().Say( "bit position for %s (%d) is not less than %d"_err_en_US, name, posVal, i.bits); } return std::invoke(fptr, i, posVal); })); } else if (name == "ibits") { return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([&](const Scalar &i, const Scalar &pos, const Scalar &len) -> Scalar { auto posVal{static_cast(pos.ToInt64())}; auto lenVal{static_cast(len.ToInt64())}; if (posVal < 0) { context.messages().Say( "bit position for IBITS(POS=%d,LEN=%d) is negative"_err_en_US, posVal, lenVal); } else if (lenVal < 0) { context.messages().Say( "bit length for IBITS(POS=%d,LEN=%d) is negative"_err_en_US, posVal, lenVal); } else if (posVal + lenVal > i.bits) { context.messages().Say( "IBITS(POS=%d,LEN=%d) must have POS+LEN no greater than %d"_err_en_US, posVal + lenVal, i.bits); } return i.IBITS(posVal, lenVal); })); } else if (name == "index" || name == "scan" || name == "verify") { if (auto *charExpr{UnwrapExpr>(args[0])}) { return common::visit( [&](const auto &kch) -> Expr { using TC = typename std::decay_t::Result; if (UnwrapExpr>(args[2])) { // BACK= return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc{ [&name](const Scalar &str, const Scalar &other, const Scalar &back) -> Scalar { return name == "index" ? CharacterUtils::INDEX( str, other, back.IsTrue()) : name == "scan" ? CharacterUtils::SCAN( str, other, back.IsTrue()) : CharacterUtils::VERIFY( str, other, back.IsTrue()); }}); } else { return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc{ [&name](const Scalar &str, const Scalar &other) -> Scalar { return name == "index" ? CharacterUtils::INDEX(str, other) : name == "scan" ? CharacterUtils::SCAN(str, other) : CharacterUtils::VERIFY(str, other); }}); } }, charExpr->u); } else { DIE("first argument must be CHARACTER"); } } else if (name == "int") { if (auto *expr{UnwrapExpr>(args[0])}) { return common::visit( [&](auto &&x) -> Expr { using From = std::decay_t; if constexpr (std::is_same_v || IsNumericCategoryExpr()) { return Fold(context, ConvertToType(std::move(x))); } DIE("int() argument type not valid"); }, std::move(expr->u)); } } else if (name == "int_ptr_kind") { return Expr{8}; } else if (name == "kind") { if constexpr (common::HasMember) { return Expr{args[0].value().GetType()->kind()}; } else { DIE("kind() result not integral"); } } else if (name == "iparity") { return FoldBitReduction( context, std::move(funcRef), &Scalar::IEOR, Scalar{}); } else if (name == "ishft") { return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([&](const Scalar &i, const Scalar &pos) -> Scalar { auto posVal{static_cast(pos.ToInt64())}; if (posVal < -i.bits) { context.messages().Say( "SHIFT=%d count for ishft is less than %d"_err_en_US, posVal, -i.bits); } else if (posVal > i.bits) { context.messages().Say( "SHIFT=%d count for ishft is greater than %d"_err_en_US, posVal, i.bits); } return i.ISHFT(posVal); })); } else if (name == "ishftc") { if (args.at(2)) { // SIZE= is present return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc( [&](const Scalar &i, const Scalar &shift, const Scalar &size) -> Scalar { // Errors are caught in intrinsics.cpp auto shiftVal{static_cast(shift.ToInt64())}; auto sizeVal{static_cast(size.ToInt64())}; return i.ISHFTC(shiftVal, sizeVal); })); } else { // no SIZE= return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc( [&](const Scalar &i, const Scalar &count) -> Scalar { auto countVal{static_cast(count.ToInt64())}; return i.ISHFTC(countVal); })); } } else if (name == "izext" || name == "jzext") { if (args.size() == 1) { if (auto *expr{UnwrapExpr>(args[0])}) { // Rewrite to IAND(INT(n,k),255_k) for k=KIND(T) intrinsic->name = "iand"; auto converted{ConvertToType(std::move(*expr))}; *expr = Fold(context, Expr{std::move(converted)}); args.emplace_back(AsGenericExpr(Expr{Scalar{255}})); return FoldIntrinsicFunction(context, std::move(funcRef)); } } } else if (name == "lbound") { return LBOUND(context, std::move(funcRef)); } else if (name == "leadz" || name == "trailz" || name == "poppar" || name == "popcnt") { if (auto *sn{UnwrapExpr>(args[0])}) { return common::visit( [&funcRef, &context, &name](const auto &n) -> Expr { using TI = typename std::decay_t::Result; if (name == "poppar") { return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([](const Scalar &i) -> Scalar { return Scalar{i.POPPAR() ? 1 : 0}; })); } auto fptr{&Scalar::LEADZ}; if (name == "leadz") { // done in fptr definition } else if (name == "trailz") { fptr = &Scalar::TRAILZ; } else if (name == "popcnt") { fptr = &Scalar::POPCNT; } else { common::die( "missing case to fold intrinsic function %s", name.c_str()); } return FoldElementalIntrinsic(context, std::move(funcRef), // `i` should be declared as `const Scalar&`. // We declare it as `auto` to workaround an msvc bug: // https://developercommunity.visualstudio.com/t/Regression:-nested-closure-assumes-wrong/10130223 ScalarFunc([&fptr](const auto &i) -> Scalar { return Scalar{std::invoke(fptr, i)}; })); }, sn->u); } else { DIE("leadz argument must be integer"); } } else if (name == "len") { if (auto *charExpr{UnwrapExpr>(args[0])}) { return common::visit( [&](auto &kx) { if (auto len{kx.LEN()}) { if (IsScopeInvariantExpr(*len)) { return Fold(context, ConvertToType(*std::move(len))); } else { return Expr{std::move(funcRef)}; } } else { return Expr{std::move(funcRef)}; } }, charExpr->u); } else { DIE("len() argument must be of character type"); } } else if (name == "len_trim") { if (auto *charExpr{UnwrapExpr>(args[0])}) { return common::visit( [&](const auto &kch) -> Expr { using TC = typename std::decay_t::Result; return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc{[](const Scalar &str) -> Scalar { return CharacterUtils::LEN_TRIM(str); }}); }, charExpr->u); } else { DIE("len_trim() argument must be of character type"); } } else if (name == "maskl" || name == "maskr") { // Argument can be of any kind but value has to be smaller than BIT_SIZE. // It can be safely converted to Int4 to simplify. const auto fptr{name == "maskl" ? &Scalar::MASKL : &Scalar::MASKR}; return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([&fptr](const Scalar &places) -> Scalar { return fptr(static_cast(places.ToInt64())); })); } else if (name == "max") { return FoldMINorMAX(context, std::move(funcRef), Ordering::Greater); } else if (name == "max0" || name == "max1") { return RewriteSpecificMINorMAX(context, std::move(funcRef)); } else if (name == "maxexponent") { if (auto *sx{UnwrapExpr>(args[0])}) { return common::visit( [](const auto &x) { using TR = typename std::decay_t::Result; return Expr{Scalar::MAXEXPONENT}; }, sx->u); } } else if (name == "maxloc") { return FoldLocation(context, std::move(funcRef)); } else if (name == "maxval") { return FoldMaxvalMinval(context, std::move(funcRef), RelationalOperator::GT, T::Scalar::Least()); } else if (name == "merge") { return FoldMerge(context, std::move(funcRef)); } else if (name == "merge_bits") { return FoldElementalIntrinsic( context, std::move(funcRef), &Scalar::MERGE_BITS); } else if (name == "min") { return FoldMINorMAX(context, std::move(funcRef), Ordering::Less); } else if (name == "min0" || name == "min1") { return RewriteSpecificMINorMAX(context, std::move(funcRef)); } else if (name == "minexponent") { if (auto *sx{UnwrapExpr>(args[0])}) { return common::visit( [](const auto &x) { using TR = typename std::decay_t::Result; return Expr{Scalar::MINEXPONENT}; }, sx->u); } } else if (name == "minloc") { return FoldLocation(context, std::move(funcRef)); } else if (name == "minval") { return FoldMaxvalMinval( context, std::move(funcRef), RelationalOperator::LT, T::Scalar::HUGE()); } else if (name == "mod") { return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFuncWithContext( [](FoldingContext &context, const Scalar &x, const Scalar &y) -> Scalar { auto quotRem{x.DivideSigned(y)}; if (quotRem.divisionByZero) { context.messages().Say("mod() by zero"_warn_en_US); } else if (quotRem.overflow) { context.messages().Say("mod() folding overflowed"_warn_en_US); } return quotRem.remainder; })); } else if (name == "modulo") { return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFuncWithContext( [](FoldingContext &context, const Scalar &x, const Scalar &y) -> Scalar { auto result{x.MODULO(y)}; if (result.overflow) { context.messages().Say( "modulo() folding overflowed"_warn_en_US); } return result.value; })); } else if (name == "not") { return FoldElementalIntrinsic( context, std::move(funcRef), &Scalar::NOT); } else if (name == "precision") { if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar>::PRECISION; }, cx->u)}; } else if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar::Part>::PRECISION; }, cx->u)}; } } else if (name == "product") { return FoldProduct(context, std::move(funcRef), Scalar{1}); } else if (name == "radix") { return Expr{2}; } else if (name == "range") { if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar>::RANGE; }, cx->u)}; } else if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar>::RANGE; }, cx->u)}; } else if (const auto *cx{UnwrapExpr>(args[0])}) { return Expr{common::visit( [](const auto &kx) { return Scalar::Part>::RANGE; }, cx->u)}; } } else if (name == "rank") { if (const auto *array{UnwrapExpr>(args[0])}) { if (auto named{ExtractNamedEntity(*array)}) { const Symbol &symbol{named->GetLastSymbol()}; if (IsAssumedRank(symbol)) { // DescriptorInquiry can only be placed in expression of kind // DescriptorInquiry::Result::kind. return ConvertToType(Expr< Type>{ DescriptorInquiry{*named, DescriptorInquiry::Field::Rank}}); } } return Expr{args[0].value().Rank()}; } return Expr{args[0].value().Rank()}; } else if (name == "selected_char_kind") { if (const auto *chCon{UnwrapExpr>>(args[0])}) { if (std::optional value{chCon->GetScalarValue()}) { int defaultKind{ context.defaults().GetDefaultKind(TypeCategory::Character)}; return Expr{SelectedCharKind(*value, defaultKind)}; } } } else if (name == "selected_int_kind") { if (auto p{GetInt64Arg(args[0])}) { return Expr{context.targetCharacteristics().SelectedIntKind(*p)}; } } else if (name == "selected_real_kind" || name == "__builtin_ieee_selected_real_kind") { if (auto p{GetInt64ArgOr(args[0], 0)}) { if (auto r{GetInt64ArgOr(args[1], 0)}) { if (auto radix{GetInt64ArgOr(args[2], 2)}) { return Expr{ context.targetCharacteristics().SelectedRealKind(*p, *r, *radix)}; } } } } else if (name == "shape") { if (auto shape{GetContextFreeShape(context, args[0])}) { if (auto shapeExpr{AsExtentArrayExpr(*shape)}) { return Fold(context, ConvertToType(std::move(*shapeExpr))); } } } else if (name == "shifta" || name == "shiftr" || name == "shiftl") { // Second argument can be of any kind. However, it must be smaller or // equal than BIT_SIZE. It can be converted to Int4 to simplify. auto fptr{&Scalar::SHIFTA}; if (name == "shifta") { // done in fptr definition } else if (name == "shiftr") { fptr = &Scalar::SHIFTR; } else if (name == "shiftl") { fptr = &Scalar::SHIFTL; } else { common::die("missing case to fold intrinsic function %s", name.c_str()); } return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc([&](const Scalar &i, const Scalar &pos) -> Scalar { auto posVal{static_cast(pos.ToInt64())}; if (posVal < 0) { context.messages().Say( "SHIFT=%d count for %s is negative"_err_en_US, posVal, name); } else if (posVal > i.bits) { context.messages().Say( "SHIFT=%d count for %s is greater than %d"_err_en_US, posVal, name, i.bits); } return std::invoke(fptr, i, posVal); })); } else if (name == "sign") { return FoldElementalIntrinsic(context, std::move(funcRef), ScalarFunc( [&context](const Scalar &j, const Scalar &k) -> Scalar { typename Scalar::ValueWithOverflow result{j.SIGN(k)}; if (result.overflow) { context.messages().Say( "sign(integer(kind=%d)) folding overflowed"_warn_en_US, KIND); } return result.value; })); } else if (name == "size") { if (auto shape{GetContextFreeShape(context, args[0])}) { if (auto &dimArg{args[1]}) { // DIM= is present, get one extent if (auto dim{GetInt64Arg(args[1])}) { int rank{GetRank(*shape)}; if (*dim >= 1 && *dim <= rank) { const Symbol *symbol{UnwrapWholeSymbolDataRef(args[0])}; if (symbol && IsAssumedSizeArray(*symbol) && *dim == rank) { context.messages().Say( "size(array,dim=%jd) of last dimension is not available for rank-%d assumed-size array dummy argument"_err_en_US, *dim, rank); return MakeInvalidIntrinsic(std::move(funcRef)); } else if (auto &extent{shape->at(*dim - 1)}) { return Fold(context, ConvertToType(std::move(*extent))); } } else { context.messages().Say( "size(array,dim=%jd) dimension is out of range for rank-%d array"_warn_en_US, *dim, rank); } } } else if (auto extents{common::AllElementsPresent(std::move(*shape))}) { // DIM= is absent; compute PRODUCT(SHAPE()) ExtentExpr product{1}; for (auto &&extent : std::move(*extents)) { product = std::move(product) * std::move(extent); } return Expr{ConvertToType(Fold(context, std::move(product)))}; } } } else if (name == "sizeof") { // in bytes; extension if (auto info{ characteristics::TypeAndShape::Characterize(args[0], context)}) { if (auto bytes{info->MeasureSizeInBytes(context)}) { return Expr{Fold(context, ConvertToType(std::move(*bytes)))}; } } } else if (name == "storage_size") { // in bits if (auto info{ characteristics::TypeAndShape::Characterize(args[0], context)}) { if (auto bytes{info->MeasureElementSizeInBytes(context, true)}) { return Expr{ Fold(context, Expr{8} * ConvertToType(std::move(*bytes)))}; } } } else if (name == "sum") { return FoldSum(context, std::move(funcRef)); } else if (name == "ubound") { return UBOUND(context, std::move(funcRef)); } // TODO: dot_product, matmul, sign return Expr{std::move(funcRef)}; } // Substitutes a bare type parameter reference with its value if it has one now // in an instantiation. Bare LEN type parameters are substituted only when // the known value is constant. Expr FoldOperation( FoldingContext &context, TypeParamInquiry &&inquiry) { std::optional base{inquiry.base()}; parser::CharBlock parameterName{inquiry.parameter().name()}; if (base) { // Handling "designator%typeParam". Get the value of the type parameter // from the instantiation of the base if (const semantics::DeclTypeSpec * declType{base->GetLastSymbol().GetType()}) { if (const semantics::ParamValue * paramValue{ declType->derivedTypeSpec().FindParameter(parameterName)}) { const semantics::MaybeIntExpr ¶mExpr{paramValue->GetExplicit()}; if (paramExpr && IsConstantExpr(*paramExpr)) { Expr intExpr{*paramExpr}; return Fold(context, ConvertToType(std::move(intExpr))); } } } } else { // A "bare" type parameter: replace with its value, if that's now known // in a current derived type instantiation, for KIND type parameters. if (const auto *pdt{context.pdtInstance()}) { bool isLen{false}; if (const semantics::Scope * scope{context.pdtInstance()->scope()}) { auto iter{scope->find(parameterName)}; if (iter != scope->end()) { const Symbol &symbol{*iter->second}; const auto *details{symbol.detailsIf()}; if (details) { isLen = details->attr() == common::TypeParamAttr::Len; const semantics::MaybeIntExpr &initExpr{details->init()}; if (initExpr && IsConstantExpr(*initExpr) && (!isLen || ToInt64(*initExpr))) { Expr expr{*initExpr}; return Fold(context, ConvertToType(std::move(expr))); } } } } if (const auto *value{pdt->FindParameter(parameterName)}) { if (value->isExplicit()) { auto folded{Fold(context, AsExpr(ConvertToType( Expr{value->GetExplicit().value()})))}; if (!isLen || ToInt64(folded)) { return folded; } } } } } return AsExpr(std::move(inquiry)); } std::optional ToInt64(const Expr &expr) { return common::visit( [](const auto &kindExpr) { return ToInt64(kindExpr); }, expr.u); } std::optional ToInt64(const Expr &expr) { if (const auto *intExpr{UnwrapExpr>(expr)}) { return ToInt64(*intExpr); } else { return std::nullopt; } } #ifdef _MSC_VER // disable bogus warning about missing definitions #pragma warning(disable : 4661) #endif FOR_EACH_INTEGER_KIND(template class ExpressionBase, ) template class ExpressionBase; } // namespace Fortran::evaluate