MCAsmBackend::applyFixup: Change Data
to indicate the relocated location
`Data` now references the first byte of the fixup offset within the current fragment. MCAssembler::layout asserts that the fixup offset is within either the fixed-size content or the optional variable-size tail, as this is the most the generic code can validate without knowing the target-specific fixup size. Many backends applyFixup assert ``` assert(Offset + Size <= F.getSize() && "Invalid fixup offset!"); ``` This refactoring allows a subsequent change to move the fixed-size content outside of MCSection::ContentStorage, fixing the -fsanitize=pointer-overflow issue of #150846 Pull Request: https://github.com/llvm/llvm-project/pull/151724
This commit is contained in:
parent
300e41d72f
commit
d3589edafc
@ -117,14 +117,13 @@ public:
|
||||
void maybeAddReloc(const MCFragment &, const MCFixup &, const MCValue &,
|
||||
uint64_t &Value, bool IsResolved);
|
||||
|
||||
/// Determine if a relocation is required. In addition,
|
||||
/// Apply the \p Value for given \p Fixup into the provided data fragment, at
|
||||
/// the offset specified by the fixup and following the fixup kind as
|
||||
/// appropriate. Errors (such as an out of range fixup value) should be
|
||||
/// reported via \p Ctx.
|
||||
// Determine if a relocation is required. In addition, apply `Value` to the
|
||||
// `Data` fragment at the specified fixup offset if applicable. `Data` points
|
||||
// to the first byte of the fixup offset, which may be at the content's end if
|
||||
// the fixup is zero-sized.
|
||||
virtual void applyFixup(const MCFragment &, const MCFixup &,
|
||||
const MCValue &Target, MutableArrayRef<char> Data,
|
||||
uint64_t Value, bool IsResolved) = 0;
|
||||
const MCValue &Target, uint8_t *Data, uint64_t Value,
|
||||
bool IsResolved) = 0;
|
||||
|
||||
/// @}
|
||||
|
||||
|
@ -99,8 +99,7 @@ private:
|
||||
/// \param RecordReloc Record relocation if needed.
|
||||
/// relocation.
|
||||
bool evaluateFixup(const MCFragment &F, MCFixup &Fixup, MCValue &Target,
|
||||
uint64_t &Value, bool RecordReloc,
|
||||
MutableArrayRef<char> Contents) const;
|
||||
uint64_t &Value, bool RecordReloc, uint8_t *Data) const;
|
||||
|
||||
/// Check whether a fixup can be satisfied, or whether it needs to be relaxed
|
||||
/// (increased in size, in order to hold its value correctly).
|
||||
|
@ -141,8 +141,7 @@ bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
|
||||
|
||||
bool MCAssembler::evaluateFixup(const MCFragment &F, MCFixup &Fixup,
|
||||
MCValue &Target, uint64_t &Value,
|
||||
bool RecordReloc,
|
||||
MutableArrayRef<char> Contents) const {
|
||||
bool RecordReloc, uint8_t *Data) const {
|
||||
if (RecordReloc)
|
||||
++stats::Fixups;
|
||||
|
||||
@ -187,7 +186,7 @@ bool MCAssembler::evaluateFixup(const MCFragment &F, MCFixup &Fixup,
|
||||
|
||||
if (IsResolved && mc::isRelocRelocation(Fixup.getKind()))
|
||||
IsResolved = false;
|
||||
getBackend().applyFixup(F, Fixup, Target, Contents, Value, IsResolved);
|
||||
getBackend().applyFixup(F, Fixup, Target, Data, Value, IsResolved);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -705,21 +704,25 @@ void MCAssembler::layout() {
|
||||
for (MCFixup &Fixup : F.getFixups()) {
|
||||
uint64_t FixedValue;
|
||||
MCValue Target;
|
||||
assert(mc::isRelocRelocation(Fixup.getKind()) ||
|
||||
Fixup.getOffset() <= F.getFixedSize());
|
||||
auto *Data =
|
||||
reinterpret_cast<uint8_t *>(Contents.data() + Fixup.getOffset());
|
||||
evaluateFixup(F, Fixup, Target, FixedValue,
|
||||
/*RecordReloc=*/true, Contents);
|
||||
/*RecordReloc=*/true, Data);
|
||||
}
|
||||
if (F.getVarFixups().size()) {
|
||||
// In the variable part, fixup offsets are relative to the fixed part's
|
||||
// start. Extend the variable contents to the left to account for the
|
||||
// fixed part size.
|
||||
Contents = MutableArrayRef(F.getParent()->ContentStorage)
|
||||
.slice(F.VarContentStart - Contents.size(), F.getSize());
|
||||
for (MCFixup &Fixup : F.getVarFixups()) {
|
||||
uint64_t FixedValue;
|
||||
MCValue Target;
|
||||
evaluateFixup(F, Fixup, Target, FixedValue,
|
||||
/*RecordReloc=*/true, Contents);
|
||||
}
|
||||
// In the variable part, fixup offsets are relative to the fixed part's
|
||||
// start.
|
||||
for (MCFixup &Fixup : F.getVarFixups()) {
|
||||
uint64_t FixedValue;
|
||||
MCValue Target;
|
||||
assert(mc::isRelocRelocation(Fixup.getKind()) ||
|
||||
(Fixup.getOffset() >= F.getFixedSize() &&
|
||||
Fixup.getOffset() <= F.getSize()));
|
||||
auto *Data = reinterpret_cast<uint8_t *>(
|
||||
F.getVarContents().data() + (Fixup.getOffset() - F.getFixedSize()));
|
||||
evaluateFixup(F, Fixup, Target, FixedValue,
|
||||
/*RecordReloc=*/true, Data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,8 +79,7 @@ public:
|
||||
}
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
bool fixupNeedsRelaxation(const MCFixup &Fixup,
|
||||
uint64_t Value) const override;
|
||||
@ -421,9 +420,8 @@ static bool shouldForceRelocation(const MCFixup &Fixup) {
|
||||
}
|
||||
|
||||
void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (shouldForceRelocation(Fixup))
|
||||
IsResolved = false;
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
@ -460,8 +458,8 @@ void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Shift the value into position.
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// Used to point to big endian bytes.
|
||||
unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
|
||||
@ -471,15 +469,16 @@ void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
if (FulleSizeInBytes == 0) {
|
||||
// Handle as little-endian
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
Data[i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
} else {
|
||||
// Handle as big-endian
|
||||
assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
|
||||
assert(Fixup.getOffset() + FulleSizeInBytes <= F.getSize() &&
|
||||
"Invalid fixup size!");
|
||||
assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
unsigned Idx = FulleSizeInBytes - 1 - i;
|
||||
Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
@ -492,9 +491,9 @@ void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// If the immediate is negative, generate MOVN else MOVZ.
|
||||
// (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
|
||||
if (SignedValue < 0)
|
||||
Data[Offset + 3] &= ~(1 << 6);
|
||||
Data[3] &= ~(1 << 6);
|
||||
else
|
||||
Data[Offset + 3] |= (1 << 6);
|
||||
Data[3] |= (1 << 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,8 +33,7 @@ public:
|
||||
AMDGPUAsmBackend(const Target &T) : MCAsmBackend(llvm::endianness::little) {}
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
bool fixupNeedsRelaxation(const MCFixup &Fixup,
|
||||
uint64_t Value) const override;
|
||||
|
||||
@ -129,9 +128,8 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
|
||||
}
|
||||
|
||||
void AMDGPUAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (Target.getSpecifier())
|
||||
IsResolved = false;
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
@ -148,13 +146,13 @@ void AMDGPUAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
|
||||
uint32_t Offset = Fixup.getOffset();
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the bits from
|
||||
// the fixup value.
|
||||
for (unsigned i = 0; i != NumBytes; ++i)
|
||||
Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
|
||||
Data[i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
|
||||
std::optional<MCFixupKind>
|
||||
|
@ -1108,9 +1108,8 @@ std::optional<bool> ARMAsmBackend::evaluateFixup(const MCFragment &F,
|
||||
}
|
||||
|
||||
void ARMAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (IsResolved && shouldForceRelocation(Fixup, Target))
|
||||
IsResolved = false;
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
@ -1124,14 +1123,15 @@ void ARMAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
return; // Doesn't change encoding.
|
||||
const unsigned NumBytes = getFixupKindNumBytes(Kind);
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// Used to point to big endian bytes.
|
||||
unsigned FullSizeBytes;
|
||||
if (Endian == llvm::endianness::big) {
|
||||
FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
|
||||
assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
|
||||
assert(Fixup.getOffset() + FullSizeBytes <= F.getSize() &&
|
||||
"Invalid fixup size!");
|
||||
assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
|
||||
}
|
||||
|
||||
@ -1141,7 +1141,7 @@ void ARMAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
unsigned Idx =
|
||||
Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i);
|
||||
Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,8 +40,7 @@ public:
|
||||
std::optional<bool> evaluateFixup(const MCFragment &, MCFixup &, MCValue &,
|
||||
uint64_t &) override;
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
unsigned getRelaxedOpcode(unsigned Op, const MCSubtargetInfo &STI) const;
|
||||
|
||||
|
@ -368,9 +368,8 @@ AVRAsmBackend::createObjectTargetWriter() const {
|
||||
}
|
||||
|
||||
void AVRAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
// AVR sets the fixup value to bypass the assembly time overflow with a
|
||||
// relocation.
|
||||
if (IsResolved) {
|
||||
@ -397,14 +396,14 @@ void AVRAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Shift the value into position.
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
for (unsigned i = 0; i < NumBytes; ++i) {
|
||||
uint8_t mask = (((Value >> (i * 8)) & 0xff));
|
||||
Data[Offset + i] |= mask;
|
||||
Data[i] |= mask;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,8 +38,7 @@ public:
|
||||
createObjectTargetWriter() const override;
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
|
||||
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
|
||||
|
@ -27,8 +27,7 @@ public:
|
||||
~BPFAsmBackend() override = default;
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
createObjectTargetWriter() const override;
|
||||
@ -66,35 +65,32 @@ bool BPFAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
|
||||
}
|
||||
|
||||
void BPFAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
if (Fixup.getKind() == FK_SecRel_8) {
|
||||
// The Value is 0 for global variables, and the in-section offset
|
||||
// for static variables. Write to the immediate field of the inst.
|
||||
assert(Value <= UINT32_MAX);
|
||||
support::endian::write<uint32_t>(&Data[Fixup.getOffset() + 4],
|
||||
static_cast<uint32_t>(Value),
|
||||
support::endian::write<uint32_t>(Data + 4, static_cast<uint32_t>(Value),
|
||||
Endian);
|
||||
} else if (Fixup.getKind() == FK_Data_4 && !Fixup.isPCRel()) {
|
||||
support::endian::write<uint32_t>(&Data[Fixup.getOffset()], Value, Endian);
|
||||
support::endian::write<uint32_t>(Data, Value, Endian);
|
||||
} else if (Fixup.getKind() == FK_Data_8) {
|
||||
support::endian::write<uint64_t>(&Data[Fixup.getOffset()], Value, Endian);
|
||||
support::endian::write<uint64_t>(Data, Value, Endian);
|
||||
} else if (Fixup.getKind() == FK_Data_4 && Fixup.isPCRel()) {
|
||||
Value = (uint32_t)((Value - 8) / 8);
|
||||
if (Endian == llvm::endianness::little) {
|
||||
Data[Fixup.getOffset() + 1] = 0x10;
|
||||
support::endian::write32le(&Data[Fixup.getOffset() + 4], Value);
|
||||
Data[1] = 0x10;
|
||||
support::endian::write32le(Data + 4, Value);
|
||||
} else {
|
||||
Data[Fixup.getOffset() + 1] = 0x1;
|
||||
support::endian::write32be(&Data[Fixup.getOffset() + 4], Value);
|
||||
Data[1] = 0x1;
|
||||
support::endian::write32be(Data + 4, Value);
|
||||
}
|
||||
} else if (Fixup.getKind() == BPF::FK_BPF_PCRel_4) {
|
||||
// The input Value represents the number of bytes.
|
||||
Value = (uint32_t)((Value - 8) / 8);
|
||||
support::endian::write<uint32_t>(&Data[Fixup.getOffset() + 4], Value,
|
||||
Endian);
|
||||
support::endian::write<uint32_t>(Data + 4, Value, Endian);
|
||||
} else {
|
||||
assert(Fixup.getKind() == FK_Data_2 && Fixup.isPCRel());
|
||||
|
||||
@ -103,8 +99,7 @@ void BPFAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
report_fatal_error("Branch target out of insn range");
|
||||
|
||||
Value = (uint16_t)((Value - 8) / 8);
|
||||
support::endian::write<uint16_t>(&Data[Fixup.getOffset() + 2], Value,
|
||||
Endian);
|
||||
support::endian::write<uint16_t>(Data + 2, Value, Endian);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -197,9 +197,8 @@ std::optional<bool> CSKYAsmBackend::evaluateFixup(const MCFragment &F,
|
||||
}
|
||||
|
||||
void CSKYAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (IsResolved && shouldForceRelocation(Fixup, Target))
|
||||
IsResolved = false;
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
@ -217,10 +216,10 @@ void CSKYAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Shift the value into position.
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
|
||||
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
@ -228,14 +227,14 @@ void CSKYAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
bool IsInstFixup = (Kind >= FirstTargetFixupKind);
|
||||
|
||||
if (IsLittleEndian && IsInstFixup && (NumBytes == 4)) {
|
||||
Data[Offset + 0] |= uint8_t((Value >> 16) & 0xff);
|
||||
Data[Offset + 1] |= uint8_t((Value >> 24) & 0xff);
|
||||
Data[Offset + 2] |= uint8_t(Value & 0xff);
|
||||
Data[Offset + 3] |= uint8_t((Value >> 8) & 0xff);
|
||||
Data[0] |= uint8_t((Value >> 16) & 0xff);
|
||||
Data[1] |= uint8_t((Value >> 24) & 0xff);
|
||||
Data[2] |= uint8_t(Value & 0xff);
|
||||
Data[3] |= uint8_t((Value >> 8) & 0xff);
|
||||
} else {
|
||||
for (unsigned I = 0; I != NumBytes; I++) {
|
||||
unsigned Idx = IsLittleEndian ? I : (NumBytes - 1 - I);
|
||||
Data[Offset + Idx] |= uint8_t((Value >> (I * 8)) & 0xff);
|
||||
Data[Idx] |= uint8_t((Value >> (I * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,8 +25,7 @@ public:
|
||||
std::optional<bool> evaluateFixup(const MCFragment &, MCFixup &, MCValue &,
|
||||
uint64_t &) override;
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
|
||||
|
||||
|
@ -78,8 +78,7 @@ public:
|
||||
~DXILAsmBackend() override = default;
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override {}
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override {}
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
createObjectTargetWriter() const override {
|
||||
|
@ -402,8 +402,7 @@ public:
|
||||
}
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &,
|
||||
MutableArrayRef<char> Data, uint64_t FixupValue,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t FixupValue, bool IsResolved) override;
|
||||
|
||||
bool isInstRelaxable(MCInst const &HMI) const {
|
||||
const MCInstrDesc &MCID = HexagonMCInstrInfo::getDesc(*MCII, HMI);
|
||||
@ -649,8 +648,7 @@ public:
|
||||
} // namespace
|
||||
|
||||
void HexagonAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data,
|
||||
const MCValue &Target, uint8_t *InstAddr,
|
||||
uint64_t FixupValue, bool IsResolved) {
|
||||
if (IsResolved && shouldForceRelocation(Fixup))
|
||||
IsResolved = false;
|
||||
@ -667,10 +665,9 @@ void HexagonAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
|
||||
// LLVM gives us an encoded value, we have to convert it back
|
||||
// to a real offset before we can use it.
|
||||
uint32_t Offset = Fixup.getOffset();
|
||||
unsigned NumBytes = getFixupKindNumBytes(Kind);
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
char *InstAddr = Data.data() + Offset;
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
Value = adjustFixupValue(Kind, FixupValue);
|
||||
if (!Value)
|
||||
@ -757,8 +754,8 @@ void HexagonAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
uint32_t OldData = 0; for (unsigned i = 0; i < NumBytes; i++) OldData |=
|
||||
(InstAddr[i] << (i * 8)) & (0xff << (i * 8));
|
||||
dbgs() << "\tBValue=0x"; dbgs().write_hex(Value) << ": AValue=0x";
|
||||
dbgs().write_hex(FixupValue)
|
||||
<< ": Offset=" << Offset << ": Size=" << Data.size() << ": OInst=0x";
|
||||
dbgs().write_hex(FixupValue) << ": Offset=" << Fixup.getOffset()
|
||||
<< ": Size=" << F.getSize() << ": OInst=0x";
|
||||
dbgs().write_hex(OldData) << ": Reloc=0x"; dbgs().write_hex(Reloc););
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
|
@ -48,8 +48,7 @@ public:
|
||||
: MCAsmBackend(llvm::endianness::big), OSType(OST) {}
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
createObjectTargetWriter() const override;
|
||||
@ -72,9 +71,8 @@ bool LanaiAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
|
||||
}
|
||||
|
||||
void LanaiAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (!IsResolved)
|
||||
Asm->getWriter().recordRelocation(F, Fixup, Target, Value);
|
||||
|
||||
@ -85,7 +83,6 @@ void LanaiAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
|
||||
// Where in the object and where the number of bytes that need
|
||||
// fixing up
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
|
||||
unsigned FullSize = 4;
|
||||
|
||||
@ -95,8 +92,7 @@ void LanaiAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Load instruction and apply value
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
unsigned Idx = (FullSize - 1 - i);
|
||||
CurVal |= static_cast<uint64_t>(static_cast<uint8_t>(Data[Offset + Idx]))
|
||||
<< (i * 8);
|
||||
CurVal |= static_cast<uint64_t>(static_cast<uint8_t>(Data[Idx])) << (i * 8);
|
||||
}
|
||||
|
||||
uint64_t Mask =
|
||||
@ -106,7 +102,7 @@ void LanaiAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Write out the fixed up bytes back to the code/data bits.
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
unsigned Idx = (FullSize - 1 - i);
|
||||
Data[Offset + Idx] = static_cast<uint8_t>((CurVal >> (i * 8)) & 0xff);
|
||||
Data[Idx] = static_cast<uint8_t>((CurVal >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,19 +131,18 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
|
||||
}
|
||||
}
|
||||
|
||||
static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup,
|
||||
MutableArrayRef<char> Data, uint64_t Value) {
|
||||
static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup, uint8_t *Data,
|
||||
uint64_t Value) {
|
||||
unsigned I;
|
||||
for (I = 0; I != Data.size() && Value; ++I, Value >>= 7)
|
||||
for (I = 0; Value; ++I, Value >>= 7)
|
||||
Data[I] |= uint8_t(Value & 0x7f);
|
||||
if (Value)
|
||||
Ctx.reportError(Fixup.getLoc(), "Invalid uleb128 value!");
|
||||
}
|
||||
|
||||
void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (IsResolved && shouldForceRelocation(Fixup, Target))
|
||||
IsResolved = false;
|
||||
IsResolved = addReloc(F, Fixup, Target, Value, IsResolved);
|
||||
@ -166,14 +165,14 @@ void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Shift the value into position.
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
|
||||
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
for (unsigned I = 0; I != NumBytes; ++I) {
|
||||
Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff);
|
||||
Data[I] |= uint8_t((Value >> (I * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,8 +42,7 @@ public:
|
||||
uint64_t &FixedValue, bool IsResolved);
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
bool shouldForceRelocation(const MCFixup &Fixup, const MCValue &Target);
|
||||
|
||||
|
@ -53,8 +53,7 @@ public:
|
||||
.Default(false)) {}
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
bool mayNeedRelaxation(unsigned Opcode, ArrayRef<MCOperand> Operands,
|
||||
const MCSubtargetInfo &STI) const override;
|
||||
@ -78,9 +77,8 @@ public:
|
||||
} // end anonymous namespace
|
||||
|
||||
void M68kAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (!IsResolved)
|
||||
Asm->getWriter().recordRelocation(F, Fixup, Target, Value);
|
||||
|
||||
@ -95,8 +93,7 @@ void M68kAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
|
||||
// Write in Big Endian
|
||||
for (unsigned i = 0; i != Size; ++i)
|
||||
Data[Fixup.getOffset() + i] =
|
||||
uint8_t(static_cast<int64_t>(Value) >> ((Size - i - 1) * 8));
|
||||
Data[i] = uint8_t(static_cast<int64_t>(Value) >> ((Size - i - 1) * 8));
|
||||
}
|
||||
|
||||
/// cc—Carry clear GE—Greater than or equal
|
||||
|
@ -36,8 +36,7 @@ public:
|
||||
~MSP430AsmBackend() override = default;
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
createObjectTargetWriter() const override {
|
||||
@ -105,9 +104,8 @@ uint64_t MSP430AsmBackend::adjustFixupValue(const MCFixup &Fixup,
|
||||
}
|
||||
|
||||
void MSP430AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
Value = adjustFixupValue(Fixup, Value, getContext());
|
||||
MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
|
||||
@ -117,15 +115,14 @@ void MSP430AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Shift the value into position.
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
|
||||
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
Data[i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,9 +283,8 @@ static bool shouldForceRelocation(const MCFixup &Fixup) {
|
||||
/// data fragment, at the offset specified by the fixup and following the
|
||||
/// fixup kind as appropriate.
|
||||
void MipsAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (shouldForceRelocation(Fixup))
|
||||
IsResolved = false;
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
@ -297,7 +296,6 @@ void MipsAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
return; // Doesn't change encoding.
|
||||
|
||||
// Where do we start in the object
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
// Number of bytes we need to fixup
|
||||
unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
|
||||
// Used to point to big endian bytes
|
||||
@ -328,7 +326,7 @@ void MipsAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
unsigned Idx = Endian == llvm::endianness::little
|
||||
? (microMipsLEByteOrder ? calculateMMLEIndex(i) : i)
|
||||
: (FullSize - 1 - i);
|
||||
CurVal |= (uint64_t)((uint8_t)Data[Offset + Idx]) << (i*8);
|
||||
CurVal |= (uint64_t)((uint8_t)Data[Idx]) << (i * 8);
|
||||
}
|
||||
|
||||
uint64_t Mask = ((uint64_t)(-1) >>
|
||||
@ -340,7 +338,7 @@ void MipsAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
unsigned Idx = Endian == llvm::endianness::little
|
||||
? (microMipsLEByteOrder ? calculateMMLEIndex(i) : i)
|
||||
: (FullSize - 1 - i);
|
||||
Data[Offset + Idx] = (uint8_t)((CurVal >> (i*8)) & 0xff);
|
||||
Data[Idx] = (uint8_t)((CurVal >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,8 +40,7 @@ public:
|
||||
createObjectTargetWriter() const override;
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
|
||||
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
|
||||
|
@ -93,8 +93,8 @@ public:
|
||||
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &Fixup,
|
||||
const MCValue &Target, MutableArrayRef<char> Data,
|
||||
uint64_t Value, bool IsResolved) override;
|
||||
const MCValue &Target, uint8_t *Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
|
||||
bool shouldForceRelocation(const MCFixup &Fixup, const MCValue &Target) {
|
||||
// If there is a @ specifier, unless it is optimized out (e.g. constant @l),
|
||||
@ -185,9 +185,8 @@ MCFixupKindInfo PPCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
|
||||
}
|
||||
|
||||
void PPCAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &TargetVal,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &TargetVal, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
// In PPC64 ELFv1, .quad .TOC.@tocbase in the .opd section is expected to
|
||||
// reference the null symbol.
|
||||
auto Target = TargetVal;
|
||||
@ -205,7 +204,6 @@ void PPCAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
if (!Value)
|
||||
return; // Doesn't change encoding.
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned NumBytes = getFixupKindNumBytes(Kind);
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the bits
|
||||
@ -213,7 +211,7 @@ void PPCAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// bitfields above.
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
unsigned Idx = Endian == llvm::endianness::little ? i : (NumBytes - 1 - i);
|
||||
Data[Offset + i] |= uint8_t((Value >> (Idx * 8)) & 0xff);
|
||||
Data[i] |= uint8_t((Value >> (Idx * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -881,9 +881,8 @@ bool RISCVAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup,
|
||||
}
|
||||
|
||||
void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
IsResolved = addReloc(F, Fixup, Target, Value, IsResolved);
|
||||
MCFixupKind Kind = Fixup.getKind();
|
||||
if (mc::isRelocation(Kind))
|
||||
@ -898,15 +897,14 @@ void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
// Shift the value into position.
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
|
||||
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
Data[i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,8 +46,7 @@ public:
|
||||
void maybeAddVendorReloc(const MCFragment &, const MCFixup &);
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
createObjectTargetWriter() const override;
|
||||
|
@ -21,8 +21,7 @@ public:
|
||||
SPIRVAsmBackend(llvm::endianness Endian) : MCAsmBackend(Endian) {}
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override {}
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override {}
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
createObjectTargetWriter() const override {
|
||||
|
@ -127,8 +127,7 @@ public:
|
||||
std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
|
||||
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
bool writeNopData(raw_ostream &OS, uint64_t Count,
|
||||
const MCSubtargetInfo *STI) const override {
|
||||
@ -253,21 +252,19 @@ MCFixupKindInfo SparcAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
|
||||
}
|
||||
|
||||
void SparcAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
if (!IsResolved)
|
||||
return;
|
||||
Value = adjustFixupValue(Fixup.getKind(), Value);
|
||||
|
||||
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
unsigned Idx = Endian == llvm::endianness::little ? i : (NumBytes - 1) - i;
|
||||
Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,8 +113,7 @@ public:
|
||||
std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
|
||||
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
bool writeNopData(raw_ostream &OS, uint64_t Count,
|
||||
const MCSubtargetInfo *STI) const override;
|
||||
};
|
||||
@ -152,20 +151,18 @@ MCFixupKindInfo SystemZMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
|
||||
}
|
||||
|
||||
void SystemZMCAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (Target.getSpecifier())
|
||||
IsResolved = false;
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
MCFixupKind Kind = Fixup.getKind();
|
||||
if (mc::isRelocation(Kind))
|
||||
return;
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned BitSize = getFixupKindInfo(Kind).TargetSize;
|
||||
unsigned Size = (BitSize + 7) / 8;
|
||||
|
||||
assert(Offset + Size <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + Size <= F.getSize() && "Invalid fixup offset!");
|
||||
|
||||
// Big-endian insertion of Size bytes.
|
||||
Value = extractBitsForFixup(Kind, Value, Fixup, getContext());
|
||||
@ -173,7 +170,7 @@ void SystemZMCAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
Value &= ((uint64_t)1 << BitSize) - 1;
|
||||
unsigned ShiftValue = (Size * 8) - 8;
|
||||
for (unsigned I = 0; I != Size; ++I) {
|
||||
Data[Offset + I] |= uint8_t(Value >> ShiftValue);
|
||||
Data[I] |= uint8_t(Value >> ShiftValue);
|
||||
ShiftValue -= 8;
|
||||
}
|
||||
}
|
||||
|
@ -112,8 +112,7 @@ public:
|
||||
}
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &,
|
||||
MutableArrayRef<char>, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
bool mayNeedRelaxation(unsigned Opcode, ArrayRef<MCOperand> Operands,
|
||||
const MCSubtargetInfo &STI) const override {
|
||||
@ -152,7 +151,7 @@ public:
|
||||
} // end anonymous namespace
|
||||
|
||||
void VEAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target, MutableArrayRef<char> Data,
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
switch (Fixup.getKind()) {
|
||||
case VE::fixup_ve_tls_gd_hi32:
|
||||
@ -173,14 +172,14 @@ void VEAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
// For each byte of the fragment that the fixup touches, mask in the bits
|
||||
// from the fixup value. The Value has been "split up" into the
|
||||
// appropriate bitfields above.
|
||||
for (unsigned i = 0; i != NumBytes; ++i) {
|
||||
unsigned Idx = Endian == llvm::endianness::little ? i : (NumBytes - 1) - i;
|
||||
Data[Offset + Idx] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
|
||||
Data[Idx] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
|
||||
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value, bool) override;
|
||||
uint8_t *Data, uint64_t Value, bool) override;
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
createObjectTargetWriter() const override;
|
||||
@ -80,8 +80,7 @@ bool WebAssemblyAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
|
||||
|
||||
void WebAssemblyAsmBackend::applyFixup(const MCFragment &F,
|
||||
const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data,
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
if (!IsResolved)
|
||||
Asm->getWriter().recordRelocation(F, Fixup, Target, Value);
|
||||
@ -96,13 +95,13 @@ void WebAssemblyAsmBackend::applyFixup(const MCFragment &F,
|
||||
// Shift the value into position.
|
||||
Value <<= Info.TargetOffset;
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
|
||||
assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
|
||||
"Invalid fixup offset!");
|
||||
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
for (unsigned I = 0; I != NumBytes; ++I)
|
||||
Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff);
|
||||
Data[I] |= uint8_t((Value >> (I * 8)) & 0xff);
|
||||
}
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
|
@ -174,8 +174,7 @@ public:
|
||||
std::optional<bool> evaluateFixup(const MCFragment &, MCFixup &, MCValue &,
|
||||
uint64_t &) override;
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
|
||||
bool mayNeedRelaxation(unsigned Opcode, ArrayRef<MCOperand> Operands,
|
||||
const MCSubtargetInfo &STI) const override;
|
||||
@ -676,9 +675,8 @@ std::optional<bool> X86AsmBackend::evaluateFixup(const MCFragment &,
|
||||
}
|
||||
|
||||
void X86AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
// Force relocation when there is a specifier. This might be too conservative
|
||||
// - GAS doesn't emit a relocation for call local@plt; local:.
|
||||
if (Target.getSpecifier())
|
||||
@ -710,7 +708,7 @@ void X86AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i != Size; ++i)
|
||||
Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
|
||||
Data[i] = uint8_t(Value >> (i * 8));
|
||||
}
|
||||
|
||||
bool X86AsmBackend::mayNeedRelaxation(unsigned Opcode,
|
||||
|
@ -37,8 +37,7 @@ public:
|
||||
std::optional<bool> evaluateFixup(const MCFragment &, MCFixup &, MCValue &,
|
||||
uint64_t &) override;
|
||||
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) override;
|
||||
uint8_t *Data, uint64_t Value, bool IsResolved) override;
|
||||
bool writeNopData(raw_ostream &OS, uint64_t Count,
|
||||
const MCSubtargetInfo *STI) const override;
|
||||
|
||||
@ -153,9 +152,8 @@ std::optional<bool> XtensaAsmBackend::evaluateFixup(const MCFragment &F,
|
||||
}
|
||||
|
||||
void XtensaAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
const MCValue &Target,
|
||||
MutableArrayRef<char> Data, uint64_t Value,
|
||||
bool IsResolved) {
|
||||
const MCValue &Target, uint8_t *Data,
|
||||
uint64_t Value, bool IsResolved) {
|
||||
maybeAddReloc(F, Fixup, Target, Value, IsResolved);
|
||||
MCContext &Ctx = getContext();
|
||||
MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
|
||||
@ -168,11 +166,10 @@ void XtensaAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
|
||||
if (!Value)
|
||||
return; // Doesn't change encoding.
|
||||
|
||||
unsigned Offset = Fixup.getOffset();
|
||||
unsigned FullSize = getSize(Fixup.getKind());
|
||||
|
||||
for (unsigned i = 0; i != FullSize; ++i) {
|
||||
Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
Data[i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user