[NFC][X86] Reorder the registers to reduce unnecessary iterations (#70222)

* Introduce field `PositionOrder` for class `Register` and
`RegisterTuples`
* If register A's `PositionOrder` < register B's `PositionOrder`, then A
is placed before B in the enum in X86GenRegisterInfo.inc
* The new order of registers in the enum for X86 will be
      1. Registers before AVX512,
      2. AVX512 registers (X/YMM16-31, ZMM0-31, K registers)
      3. AMX registers (TMM)
      4.  APX registers (R16-R31)
* Add a new target hook `getNumSupportedRegs()` to return the number of
registers for the function (may overestimate).
* Replace `getNumRegs()` with `getNumSupportedRegs()` in LiveVariables
to eliminate iterations on unsupported registers

This patch can reduce 0.3% instruction count regression for sqlite3
during compile-stage (O3) by not iterating on APX registers
for #67702
This commit is contained in:
Shengchen Kan 2023-11-02 00:12:05 +08:00 committed by GitHub
parent e2550b7aa0
commit 860f9e5170
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 166 additions and 60 deletions

View File

@ -147,7 +147,7 @@ private: // Intermediate data structures
bool HandlePhysRegKill(Register Reg, MachineInstr *MI);
/// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
void HandleRegMask(const MachineOperand&);
void HandleRegMask(const MachineOperand &, unsigned);
void HandlePhysRegUse(Register Reg, MachineInstr &MI);
void HandlePhysRegDef(Register Reg, MachineInstr *MI,
@ -170,7 +170,8 @@ private: // Intermediate data structures
/// is coming from.
void analyzePHINodes(const MachineFunction& Fn);
void runOnInstr(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);
void runOnInstr(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs,
unsigned NumRegs);
void runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs);
public:

View File

@ -266,6 +266,11 @@ protected:
virtual ~TargetRegisterInfo();
public:
/// Return the number of registers for the function. (may overestimate)
virtual unsigned getNumSupportedRegs(const MachineFunction &) const {
return getNumRegs();
}
// Register numbers can represent physical registers, virtual registers, and
// sometimes stack slots. The unsigned values are divided into these ranges:
//

View File

@ -2154,6 +2154,11 @@ struct LessRecordRegister {
};
bool operator()(const Record *Rec1, const Record *Rec2) const {
int64_t LHSPositionOrder = Rec1->getValueAsInt("PositionOrder");
int64_t RHSPositionOrder = Rec2->getValueAsInt("PositionOrder");
if (LHSPositionOrder != RHSPositionOrder)
return LHSPositionOrder < RHSPositionOrder;
RecordParts LHSParts(StringRef(Rec1->getName()));
RecordParts RHSParts(StringRef(Rec2->getName()));

View File

@ -205,6 +205,10 @@ class Register<string n, list<string> altNames = []> {
// isConstant - This register always holds a constant value (e.g. the zero
// register in architectures such as MIPS)
bit isConstant = false;
/// PositionOrder - Indicate tablegen to place the newly added register at a later
/// position to avoid iterations on them on unsupported target.
int PositionOrder = 0;
}
// RegisterWithSubRegs - This can be used to define instances of Register which
@ -417,6 +421,10 @@ class RegisterTuples<list<SubRegIndex> Indices, list<dag> Regs,
// List of asm names for the generated tuple registers.
list<string> RegAsmNames = RegNames;
// PositionOrder - Indicate tablegen to place the newly added register at a later
// position to avoid iterations on them on unsupported target.
int PositionOrder = 0;
}
// RegisterCategory - This class is a list of RegisterClasses that belong to a

View File

@ -406,11 +406,11 @@ bool LiveVariables::HandlePhysRegKill(Register Reg, MachineInstr *MI) {
return true;
}
void LiveVariables::HandleRegMask(const MachineOperand &MO) {
void LiveVariables::HandleRegMask(const MachineOperand &MO, unsigned NumRegs) {
// Call HandlePhysRegKill() for all live registers clobbered by Mask.
// Clobbered registers are always dead, sp there is no need to use
// HandlePhysRegDef().
for (unsigned Reg = 1, NumRegs = TRI->getNumRegs(); Reg != NumRegs; ++Reg) {
for (unsigned Reg = 1; Reg != NumRegs; ++Reg) {
// Skip dead regs.
if (!PhysRegDef[Reg] && !PhysRegUse[Reg])
continue;
@ -421,7 +421,8 @@ void LiveVariables::HandleRegMask(const MachineOperand &MO) {
// This avoids needless implicit operands.
unsigned Super = Reg;
for (MCPhysReg SR : TRI->superregs(Reg))
if ((PhysRegDef[SR] || PhysRegUse[SR]) && MO.clobbersPhysReg(SR))
if (SR < NumRegs && (PhysRegDef[SR] || PhysRegUse[SR]) &&
MO.clobbersPhysReg(SR))
Super = SR;
HandlePhysRegKill(Super, nullptr);
}
@ -478,7 +479,8 @@ void LiveVariables::UpdatePhysRegDefs(MachineInstr &MI,
}
void LiveVariables::runOnInstr(MachineInstr &MI,
SmallVectorImpl<unsigned> &Defs) {
SmallVectorImpl<unsigned> &Defs,
unsigned NumRegs) {
assert(!MI.isDebugOrPseudoInstr());
// Process all of the operands of the instruction...
unsigned NumOperandsToProcess = MI.getNumOperands();
@ -527,7 +529,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
// Process all masked registers. (Call clobbers).
for (unsigned Mask : RegMasks)
HandleRegMask(MI.getOperand(Mask));
HandleRegMask(MI.getOperand(Mask), NumRegs);
// Process all defs.
for (unsigned MOReg : DefRegs) {
@ -539,7 +541,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
UpdatePhysRegDefs(MI, Defs);
}
void LiveVariables::runOnBlock(MachineBasicBlock *MBB, const unsigned NumRegs) {
void LiveVariables::runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs) {
// Mark live-in registers as live-in.
SmallVector<unsigned, 4> Defs;
for (const auto &LI : MBB->liveins()) {
@ -556,7 +558,7 @@ void LiveVariables::runOnBlock(MachineBasicBlock *MBB, const unsigned NumRegs) {
continue;
DistanceMap.insert(std::make_pair(&MI, Dist++));
runOnInstr(MI, Defs);
runOnInstr(MI, Defs, NumRegs);
}
// Handle any virtual assignments from PHI nodes which might be at the
@ -597,7 +599,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
TRI = MF->getSubtarget().getRegisterInfo();
const unsigned NumRegs = TRI->getNumRegs();
const unsigned NumRegs = TRI->getNumSupportedRegs(mf);
PhysRegDef.assign(NumRegs, nullptr);
PhysRegUse.assign(NumRegs, nullptr);
PHIVarInfo.resize(MF->getNumBlockIDs());

View File

@ -357,28 +357,28 @@ struct X86Operand final : public MCParsedAsmOperand {
}
bool isMem64_RC128X() const {
return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
return isMem64() && X86II::isXMMReg(Mem.IndexReg);
}
bool isMem128_RC128X() const {
return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
return isMem128() && X86II::isXMMReg(Mem.IndexReg);
}
bool isMem128_RC256X() const {
return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
return isMem128() && X86II::isYMMReg(Mem.IndexReg);
}
bool isMem256_RC128X() const {
return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
return isMem256() && X86II::isXMMReg(Mem.IndexReg);
}
bool isMem256_RC256X() const {
return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
return isMem256() && X86II::isYMMReg(Mem.IndexReg);
}
bool isMem256_RC512() const {
return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
return isMem256() && X86II::isZMMReg(Mem.IndexReg);
}
bool isMem512_RC256X() const {
return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
return isMem512() && X86II::isYMMReg(Mem.IndexReg);
}
bool isMem512_RC512() const {
return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
return isMem512() && X86II::isZMMReg(Mem.IndexReg);
}
bool isMem512_GR16() const {
if (!isMem512())

View File

@ -1182,11 +1182,39 @@ namespace X86II {
}
}
/// \returns true if the register is a XMM.
inline bool isXMMReg(unsigned RegNo) {
assert(X86::XMM15 - X86::XMM0 == 15 &&
"XMM0-15 registers are not continuous");
assert(X86::XMM31 - X86::XMM16 == 15 &&
"XMM16-31 registers are not continuous");
return (RegNo >= X86::XMM0 && RegNo <= X86::XMM15) ||
(RegNo >= X86::XMM16 && RegNo <= X86::XMM31);
}
/// \returns true if the register is a YMM.
inline bool isYMMReg(unsigned RegNo) {
assert(X86::YMM15 - X86::YMM0 == 15 &&
"YMM0-15 registers are not continuous");
assert(X86::YMM31 - X86::YMM16 == 15 &&
"YMM16-31 registers are not continuous");
return (RegNo >= X86::YMM0 && RegNo <= X86::YMM15) ||
(RegNo >= X86::YMM16 && RegNo <= X86::YMM31);
}
/// \returns true if the register is a ZMM.
inline bool isZMMReg(unsigned RegNo) {
assert(X86::ZMM31 - X86::ZMM0 == 31 && "ZMM registers are not continuous");
return RegNo >= X86::ZMM0 && RegNo <= X86::ZMM31;
}
/// \returns true if the MachineOperand is a x86-64 extended (r8 or
/// higher) register, e.g. r8, xmm8, xmm13, etc.
inline bool isX86_64ExtendedReg(unsigned RegNo) {
if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM31) ||
(RegNo >= X86::YMM8 && RegNo <= X86::YMM31) ||
if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM15) ||
(RegNo >= X86::XMM16 && RegNo <= X86::XMM31) ||
(RegNo >= X86::YMM8 && RegNo <= X86::YMM15) ||
(RegNo >= X86::YMM16 && RegNo <= X86::YMM31) ||
(RegNo >= X86::ZMM8 && RegNo <= X86::ZMM31))
return true;

View File

@ -234,11 +234,11 @@ using namespace llvm;
CASE_AVX_INS_COMMON(Inst##SS4, , mr_Int)
static unsigned getVectorRegSize(unsigned RegNo) {
if (X86::ZMM0 <= RegNo && RegNo <= X86::ZMM31)
if (X86II::isZMMReg(RegNo))
return 512;
if (X86::YMM0 <= RegNo && RegNo <= X86::YMM31)
if (X86II::isYMMReg(RegNo))
return 256;
if (X86::XMM0 <= RegNo && RegNo <= X86::XMM31)
if (X86II::isXMMReg(RegNo))
return 128;
if (X86::MM0 <= RegNo && RegNo <= X86::MM7)
return 64;

View File

@ -604,8 +604,9 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
}
}
if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
for (unsigned n = 16; n != 32; ++n) {
for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
for (unsigned n = 0; n != 16; ++n) {
for (MCRegAliasIterator AI(X86::XMM16 + n, this, true); AI.isValid();
++AI)
Reserved.set(*AI);
}
}
@ -616,6 +617,26 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
unsigned X86RegisterInfo::getNumSupportedRegs(const MachineFunction &MF) const {
// All existing Intel CPUs that support AMX support AVX512 and all existing
// Intel CPUs that support APX support AMX. AVX512 implies AVX.
//
// We enumerate the registers in X86GenRegisterInfo.inc in this order:
//
// Registers before AVX512,
// AVX512 registers (X/YMM16-31, ZMM0-31, K registers)
// AMX registers (TMM)
// APX registers (R16-R31)
//
// and try to return the minimum number of registers supported by the target.
assert((X86::R15WH + 1 == X86 ::YMM0) && (X86::YMM15 + 1 == X86::K0) &&
(X86::K6_K7 + 1 == X86::TMMCFG) &&
(X86::TMM7 + 1 == X86::NUM_TARGET_REGS) &&
"Register number may be incorrect");
return X86::NUM_TARGET_REGS;
}
bool X86RegisterInfo::isArgumentRegister(const MachineFunction &MF,
MCRegister Reg) const {
const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();

View File

@ -51,6 +51,9 @@ private:
public:
explicit X86RegisterInfo(const Triple &TT);
/// Return the number of registers for the function.
unsigned getNumSupportedRegs(const MachineFunction &MF) const override;
// FIXME: This should be tablegen'd like getDwarfRegNum is
int getSEHRegNum(unsigned i) const;

View File

@ -223,6 +223,8 @@ def XMM13: X86Reg<"xmm13", 13>, DwarfRegNum<[30, -2, -2]>;
def XMM14: X86Reg<"xmm14", 14>, DwarfRegNum<[31, -2, -2]>;
def XMM15: X86Reg<"xmm15", 15>, DwarfRegNum<[32, -2, -2]>;
let PositionOrder = 2 in {
// XMM16-31 registers, used by AVX-512 instructions.
def XMM16: X86Reg<"xmm16", 16>, DwarfRegNum<[67, -2, -2]>;
def XMM17: X86Reg<"xmm17", 17>, DwarfRegNum<[68, -2, -2]>;
def XMM18: X86Reg<"xmm18", 18>, DwarfRegNum<[69, -2, -2]>;
@ -239,37 +241,33 @@ def XMM28: X86Reg<"xmm28", 28>, DwarfRegNum<[79, -2, -2]>;
def XMM29: X86Reg<"xmm29", 29>, DwarfRegNum<[80, -2, -2]>;
def XMM30: X86Reg<"xmm30", 30>, DwarfRegNum<[81, -2, -2]>;
def XMM31: X86Reg<"xmm31", 31>, DwarfRegNum<[82, -2, -2]>;
}
// YMM0-15 registers, used by AVX instructions and
// YMM16-31 registers, used by AVX-512 instructions.
let SubRegIndices = [sub_xmm] in {
foreach Index = 0-31 in {
let SubRegIndices = [sub_xmm], PositionOrder = 1 in {
foreach Index = 0-15 in {
def YMM#Index : X86Reg<"ymm"#Index, Index, [!cast<X86Reg>("XMM"#Index)]>,
DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
}
}
let SubRegIndices = [sub_xmm], PositionOrder = 2 in {
foreach Index = 16-31 in {
def YMM#Index : X86Reg<"ymm"#Index, Index, [!cast<X86Reg>("XMM"#Index)]>,
DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
}
}
// ZMM Registers, used by AVX-512 instructions.
let SubRegIndices = [sub_ymm] in {
let SubRegIndices = [sub_ymm], PositionOrder = 2 in {
foreach Index = 0-31 in {
def ZMM#Index : X86Reg<"zmm"#Index, Index, [!cast<X86Reg>("YMM"#Index)]>,
DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
}
}
// Tile config registers.
def TMMCFG: X86Reg<"tmmcfg", 0>;
// Tile "registers".
def TMM0: X86Reg<"tmm0", 0>;
def TMM1: X86Reg<"tmm1", 1>;
def TMM2: X86Reg<"tmm2", 2>;
def TMM3: X86Reg<"tmm3", 3>;
def TMM4: X86Reg<"tmm4", 4>;
def TMM5: X86Reg<"tmm5", 5>;
def TMM6: X86Reg<"tmm6", 6>;
def TMM7: X86Reg<"tmm7", 7>;
let PositionOrder = 2 in {
// Mask Registers, used by AVX-512 instructions.
def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, 93, 93]>;
def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, 94, 94]>;
@ -279,6 +277,25 @@ def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, 97, 97]>;
def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, 98, 98]>;
def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, 99, 99]>;
def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, 100, 100]>;
// Mask register pairs
def KPAIRS : RegisterTuples<[sub_mask_0, sub_mask_1],
[(add K0, K2, K4, K6), (add K1, K3, K5, K7)]>;
}
// TMM registers, used by AMX instructions.
let PositionOrder = 3 in {
// Tile config registers.
def TMMCFG: X86Reg<"tmmcfg", 0>;
// Tile "registers".
def TMM0: X86Reg<"tmm0", 0>;
def TMM1: X86Reg<"tmm1", 1>;
def TMM2: X86Reg<"tmm2", 2>;
def TMM3: X86Reg<"tmm3", 3>;
def TMM4: X86Reg<"tmm4", 4>;
def TMM5: X86Reg<"tmm5", 5>;
def TMM6: X86Reg<"tmm6", 6>;
def TMM7: X86Reg<"tmm7", 7>;
}
// Floating point stack registers. These don't map one-to-one to the FP
// pseudo registers, but we still mark them as aliasing FP registers. That
@ -627,10 +644,6 @@ def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
// Mask register pairs
def KPAIRS : RegisterTuples<[sub_mask_0, sub_mask_1],
[(add K0, K2, K4, K6), (add K1, K3, K5, K7)]>;
def VK1PAIR : RegisterClass<"X86", [untyped], 16, (add KPAIRS)> {let Size = 32;}
def VK2PAIR : RegisterClass<"X86", [untyped], 16, (add KPAIRS)> {let Size = 32;}
def VK4PAIR : RegisterClass<"X86", [untyped], 16, (add KPAIRS)> {let Size = 32;}

View File

@ -3,7 +3,7 @@
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
; CHECK: foo Clobbered Registers: $cs $df $ds $eflags $eip $eiz $es $esp $fpcw $fpsw $fs $fs_base $gs $gs_base $hip $hsp $ip $mxcsr $rflags $rip $riz $rsp $sp $sph $spl $ss $ssp $tmmcfg $_eflags $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $tmm0 $tmm1 $tmm2 $tmm3 $tmm4 $tmm5 $tmm6 $tmm7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $r11b $r11bh $r11d $r11w $r11wh $k0_k1 $k2_k3 $k4_k5 $k6_k7
; CHECK: foo Clobbered Registers: $cs $df $ds $eflags $eip $eiz $es $esp $fpcw $fpsw $fs $fs_base $gs $gs_base $hip $hsp $ip $mxcsr $rflags $rip $riz $rsp $sp $sph $spl $ss $ssp $_eflags $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $r11b $r11bh $r11d $r11w $r11wh $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $k0_k1 $k2_k3 $k4_k5 $k6_k7 $tmmcfg $tmm0 $tmm1 $tmm2 $tmm3 $tmm4 $tmm5 $tmm6 $tmm7
call void @bar1()
call void @bar2()
ret void

View File

@ -1175,22 +1175,42 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
for (auto &Idx : SubRegIndices)
Idx.updateComponents(*this);
// Read in the register definitions.
std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
llvm::sort(Regs, LessRecordRegister());
// Assign the enumeration values.
for (unsigned i = 0, e = Regs.size(); i != e; ++i)
getReg(Regs[i]);
// Read in the register and register tuple definitions.
std::vector<Record *> Regs = Records.getAllDerivedDefinitions("Register");
if (!Regs.empty() && Regs[0]->isSubClassOf("X86Reg")) {
// For X86, we need to sort Registers and RegisterTuples together to list
// new registers and register tuples at a later position. So that we can
// reduce unnecessary iterations on unsupported registers in LiveVariables.
// TODO: Remove this logic when migrate from LiveVariables to LiveIntervals
// completely.
std::vector<Record *> Tups =
Records.getAllDerivedDefinitions("RegisterTuples");
for (Record *R : Tups) {
// Expand tuples and merge the vectors
std::vector<Record *> TupRegs = *Sets.expand(R);
Regs.insert(Regs.end(), TupRegs.begin(), TupRegs.end());
}
// Expand tuples and number the new registers.
std::vector<Record*> Tups =
Records.getAllDerivedDefinitions("RegisterTuples");
llvm::sort(Regs, LessRecordRegister());
// Assign the enumeration values.
for (unsigned i = 0, e = Regs.size(); i != e; ++i)
getReg(Regs[i]);
} else {
llvm::sort(Regs, LessRecordRegister());
// Assign the enumeration values.
for (unsigned i = 0, e = Regs.size(); i != e; ++i)
getReg(Regs[i]);
for (Record *R : Tups) {
std::vector<Record *> TupRegs = *Sets.expand(R);
llvm::sort(TupRegs, LessRecordRegister());
for (Record *RC : TupRegs)
getReg(RC);
// Expand tuples and number the new registers.
std::vector<Record *> Tups =
Records.getAllDerivedDefinitions("RegisterTuples");
for (Record *R : Tups) {
std::vector<Record *> TupRegs = *Sets.expand(R);
llvm::sort(TupRegs, LessRecordRegister());
for (Record *RC : TupRegs)
getReg(RC);
}
}
// Now all the registers are known. Build the object graph of explicit