Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TargetSchedule.cpp | //===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a wrapper around MCSchedModel that allows the interface
// to benefit from information currently only available in TargetInstrInfo.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
cl::desc("Use TargetSchedModel for latency lookup"));
static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
cl::desc("Use InstrItineraryData for latency lookup"));
bool TargetSchedModel::hasInstrSchedModel() const {
return EnableSchedModel && SchedModel.hasInstrSchedModel();
}
bool TargetSchedModel::hasInstrItineraries() const {
return EnableSchedItins && !InstrItins.isEmpty();
}
static unsigned gcd(unsigned Dividend, unsigned Divisor) {
// Dividend and Divisor will be naturally swapped as needed.
while(Divisor) {
unsigned Rem = Dividend % Divisor;
Dividend = Divisor;
Divisor = Rem;
};
return Dividend;
}
static unsigned lcm(unsigned A, unsigned B) {
unsigned LCM = (uint64_t(A) * B) / gcd(A, B);
assert((LCM >= A && LCM >= B) && "LCM overflow");
return LCM;
}
void TargetSchedModel::init(const MCSchedModel &sm,
const TargetSubtargetInfo *sti,
const TargetInstrInfo *tii) {
SchedModel = sm;
STI = sti;
TII = tii;
STI->initInstrItins(InstrItins);
unsigned NumRes = SchedModel.getNumProcResourceKinds();
ResourceFactors.resize(NumRes);
ResourceLCM = SchedModel.IssueWidth;
for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
if (NumUnits > 0)
ResourceLCM = lcm(ResourceLCM, NumUnits);
}
MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
}
}
unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
const MCSchedClassDesc *SC) const {
if (hasInstrItineraries()) {
int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI);
}
if (hasInstrSchedModel()) {
if (!SC)
SC = resolveSchedClass(MI);
if (SC->isValid())
return SC->NumMicroOps;
}
return MI->isTransient() ? 0 : 1;
}
// The machine model may explicitly specify an invalid latency, which
// effectively means infinite latency. Since users of the TargetSchedule API
// don't know how to handle this, we convert it to a very large latency that is
// easy to distinguish when debugging the DAG but won't induce overflow.
static unsigned capLatency(int Cycles) {
return Cycles >= 0 ? Cycles : 1000;
}
/// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
/// evaluation of predicates that depend on instruction operands or flags.
const MCSchedClassDesc *TargetSchedModel::
resolveSchedClass(const MachineInstr *MI) const {
// Get the definition's scheduling class descriptor from this machine model.
unsigned SchedClass = MI->getDesc().getSchedClass();
const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
if (!SCDesc->isValid())
return SCDesc;
#ifndef NDEBUG
unsigned NIter = 0;
#endif
while (SCDesc->isVariant()) {
assert(++NIter < 6 && "Variants are nested deeper than the magic number");
SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
SCDesc = SchedModel.getSchedClassDesc(SchedClass);
}
return SCDesc;
}
/// Find the def index of this operand. This index maps to the machine model and
/// is independent of use operands. Def operands may be reordered with uses or
/// merged with uses without affecting the def index (e.g. before/after
/// regalloc). However, an instruction's def operands must never be reordered
/// with respect to each other.
static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
unsigned DefIdx = 0;
for (unsigned i = 0; i != DefOperIdx; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef())
++DefIdx;
}
return DefIdx;
}
/// Find the use index of this operand. This is independent of the instruction's
/// def operands.
///
/// Note that uses are not determined by the operand's isUse property, which
/// is simply the inverse of isDef. Here we consider any readsReg operand to be
/// a "use". The machine model allows an operand to be both a Def and Use.
static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
unsigned UseIdx = 0;
for (unsigned i = 0; i != UseOperIdx; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.readsReg())
++UseIdx;
}
return UseIdx;
}
// Top-level API for clients that know the operand indices.
unsigned TargetSchedModel::computeOperandLatency(
const MachineInstr *DefMI, unsigned DefOperIdx,
const MachineInstr *UseMI, unsigned UseOperIdx) const {
if (!hasInstrSchedModel() && !hasInstrItineraries())
return TII->defaultDefLatency(SchedModel, DefMI);
if (hasInstrItineraries()) {
int OperLatency = 0;
if (UseMI) {
OperLatency = TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx,
UseMI, UseOperIdx);
}
else {
unsigned DefClass = DefMI->getDesc().getSchedClass();
OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
}
if (OperLatency >= 0)
return OperLatency;
// No operand latency was found.
unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
// Expected latency is the max of the stage latency and itinerary props.
// Rather than directly querying InstrItins stage latency, we call a TII
// hook to allow subtargets to specialize latency. This hook is only
// applicable to the InstrItins model. InstrSchedModel should model all
// special cases without TII hooks.
InstrLatency = std::max(InstrLatency,
TII->defaultDefLatency(SchedModel, DefMI));
return InstrLatency;
}
// hasInstrSchedModel()
const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
if (DefIdx < SCDesc->NumWriteLatencyEntries) {
// Lookup the definition's write latency in SubtargetInfo.
const MCWriteLatencyEntry *WLEntry =
STI->getWriteLatencyEntry(SCDesc, DefIdx);
unsigned WriteID = WLEntry->WriteResourceID;
unsigned Latency = capLatency(WLEntry->Cycles);
if (!UseMI)
return Latency;
// Lookup the use's latency adjustment in SubtargetInfo.
const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
if (UseDesc->NumReadAdvanceEntries == 0)
return Latency;
unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap
return 0;
return Latency - Advance;
}
// If DefIdx does not exist in the model (e.g. implicit defs), then return
// unit latency (defaultDefLatency may be too conservative).
#ifndef NDEBUG
if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
&& !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
&& SchedModel.isComplete()) {
std::string Err;
raw_string_ostream ss(Err);
ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
<< *DefMI;
report_fatal_error(ss.str());
}
#endif
// FIXME: Automatically giving all implicit defs defaultDefLatency is
// undesirable. We should only do it for defs that are known to the MC
// desc like flags. Truly implicit defs should get 1 cycle latency.
return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, DefMI);
}
unsigned
TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const {
unsigned Latency = 0;
for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries;
DefIdx != DefEnd; ++DefIdx) {
// Lookup the definition's write latency in SubtargetInfo.
const MCWriteLatencyEntry *WLEntry =
STI->getWriteLatencyEntry(&SCDesc, DefIdx);
Latency = std::max(Latency, capLatency(WLEntry->Cycles));
}
return Latency;
}
unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
assert(hasInstrSchedModel() && "Only call this function with a SchedModel");
unsigned SCIdx = TII->get(Opcode).getSchedClass();
const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SCIdx);
if (SCDesc->isValid() && !SCDesc->isVariant())
return computeInstrLatency(*SCDesc);
llvm_unreachable("No MI sched latency");
}
unsigned
TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
bool UseDefaultDefLatency) const {
// For the itinerary model, fall back to the old subtarget hook.
// Allow subtargets to compute Bundle latencies outside the machine model.
if (hasInstrItineraries() || MI->isBundle() ||
(!hasInstrSchedModel() && !UseDefaultDefLatency))
return TII->getInstrLatency(&InstrItins, MI);
if (hasInstrSchedModel()) {
const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
if (SCDesc->isValid())
return computeInstrLatency(*SCDesc);
}
return TII->defaultDefLatency(SchedModel, MI);
}
unsigned TargetSchedModel::
computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
const MachineInstr *DepMI) const {
if (SchedModel.MicroOpBufferSize <= 1)
return 1;
// MicroOpBufferSize > 1 indicates an out-of-order processor that can dispatch
// WAW dependencies in the same cycle.
// Treat predication as a data dependency for out-of-order cpus. In-order
// cpus do not need to treat predicated writes specially.
//
// TODO: The following hack exists because predication passes do not
// correctly append imp-use operands, and readsReg() strangely returns false
// for predicated defs.
unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
const MachineFunction &MF = *DefMI->getParent()->getParent();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI))
return computeInstrLatency(DefMI);
// If we have a per operand scheduling model, check if this def is writing
// an unbuffered resource. If so, it treated like an in-order cpu.
if (hasInstrSchedModel()) {
const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
if (SCDesc->isValid()) {
for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
*PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
return 1;
}
}
}
return 0;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegisterCoalescer.cpp | //===- RegisterCoalescer.cpp - Generic Register Coalescing Interface -------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the generic RegisterCoalescer interface which
// is used as the common interface used by all clients and
// implementations of register coalescing.
//
//===----------------------------------------------------------------------===//
#include "RegisterCoalescer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(numJoins , "Number of interval joins performed");
STATISTIC(numCrossRCs , "Number of cross class joins performed");
STATISTIC(numCommutes , "Number of instruction commuting performed");
STATISTIC(numExtends , "Number of copies extended");
STATISTIC(NumReMats , "Number of instructions re-materialized");
STATISTIC(NumInflated , "Number of register classes inflated");
STATISTIC(NumLaneConflicts, "Number of dead lane conflicts tested");
STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved");
static cl::opt<bool>
EnableJoining("join-liveintervals",
cl::desc("Coalesce copies (default=true)"),
cl::init(true));
static cl::opt<bool> UseTerminalRule("terminal-rule",
cl::desc("Apply the terminal rule"),
cl::init(false), cl::Hidden);
/// Temporary flag to test critical edge unsplitting.
static cl::opt<bool>
EnableJoinSplits("join-splitedges",
cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden);
/// Temporary flag to test global copy optimization.
static cl::opt<cl::boolOrDefault>
EnableGlobalCopies("join-globalcopies",
cl::desc("Coalesce copies that span blocks (default=subtarget)"),
cl::init(cl::BOU_UNSET), cl::Hidden);
static cl::opt<bool>
VerifyCoalescing("verify-coalescing",
cl::desc("Verify machine instrs before and after register coalescing"),
cl::Hidden);
namespace {
class RegisterCoalescer : public MachineFunctionPass,
private LiveRangeEdit::Delegate {
MachineFunction* MF;
MachineRegisterInfo* MRI;
const TargetMachine* TM;
const TargetRegisterInfo* TRI;
const TargetInstrInfo* TII;
LiveIntervals *LIS;
const MachineLoopInfo* Loops;
AliasAnalysis *AA;
RegisterClassInfo RegClassInfo;
/// A LaneMask to remember on which subregister live ranges we need to call
/// shrinkToUses() later.
unsigned ShrinkMask;
/// True if the main range of the currently coalesced intervals should be
/// checked for smaller live intervals.
bool ShrinkMainRange;
/// \brief True if the coalescer should aggressively coalesce global copies
/// in favor of keeping local copies.
bool JoinGlobalCopies;
/// \brief True if the coalescer should aggressively coalesce fall-thru
/// blocks exclusively containing copies.
bool JoinSplitEdges;
/// Copy instructions yet to be coalesced.
SmallVector<MachineInstr*, 8> WorkList;
SmallVector<MachineInstr*, 8> LocalWorkList;
/// Set of instruction pointers that have been erased, and
/// that may be present in WorkList.
SmallPtrSet<MachineInstr*, 8> ErasedInstrs;
/// Dead instructions that are about to be deleted.
SmallVector<MachineInstr*, 8> DeadDefs;
/// Virtual registers to be considered for register class inflation.
SmallVector<unsigned, 8> InflateRegs;
/// Recursively eliminate dead defs in DeadDefs.
void eliminateDeadDefs();
/// LiveRangeEdit callback for eliminateDeadDefs().
void LRE_WillEraseInstruction(MachineInstr *MI) override;
/// Coalesce the LocalWorkList.
void coalesceLocals();
/// Join compatible live intervals
void joinAllIntervals();
/// Coalesce copies in the specified MBB, putting
/// copies that cannot yet be coalesced into WorkList.
void copyCoalesceInMBB(MachineBasicBlock *MBB);
/// Tries to coalesce all copies in CurrList. Returns true if any progress
/// was made.
bool copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList);
/// Attempt to join intervals corresponding to SrcReg/DstReg, which are the
/// src/dst of the copy instruction CopyMI. This returns true if the copy
/// was successfully coalesced away. If it is not currently possible to
/// coalesce this interval, but it may be possible if other things get
/// coalesced, then it returns true by reference in 'Again'.
bool joinCopy(MachineInstr *TheCopy, bool &Again);
/// Attempt to join these two intervals. On failure, this
/// returns false. The output "SrcInt" will not have been modified, so we
/// can use this information below to update aliases.
bool joinIntervals(CoalescerPair &CP);
/// Attempt joining two virtual registers. Return true on success.
bool joinVirtRegs(CoalescerPair &CP);
/// Attempt joining with a reserved physreg.
bool joinReservedPhysReg(CoalescerPair &CP);
/// Add the LiveRange @p ToMerge as a subregister liverange of @p LI.
/// Subranges in @p LI which only partially interfere with the desired
/// LaneMask are split as necessary. @p LaneMask are the lanes that
/// @p ToMerge will occupy in the coalescer register. @p LI has its subrange
/// lanemasks already adjusted to the coalesced register.
/// @returns false if live range conflicts couldn't get resolved.
bool mergeSubRangeInto(LiveInterval &LI, const LiveRange &ToMerge,
unsigned LaneMask, CoalescerPair &CP);
/// Join the liveranges of two subregisters. Joins @p RRange into
/// @p LRange, @p RRange may be invalid afterwards.
/// @returns false if live range conflicts couldn't get resolved.
bool joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
unsigned LaneMask, const CoalescerPair &CP);
/// We found a non-trivially-coalescable copy. If the source value number is
/// defined by a copy from the destination reg see if we can merge these two
/// destination reg valno# into a single value number, eliminating a copy.
/// This returns true if an interval was modified.
bool adjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
/// Return true if there are definitions of IntB
/// other than BValNo val# that can reach uses of AValno val# of IntA.
bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
VNInfo *AValNo, VNInfo *BValNo);
/// We found a non-trivially-coalescable copy.
/// If the source value number is defined by a commutable instruction and
/// its other operand is coalesced to the copy dest register, see if we
/// can transform the copy into a noop by commuting the definition.
/// This returns true if an interval was modified.
bool removeCopyByCommutingDef(const CoalescerPair &CP,MachineInstr *CopyMI);
/// If the source of a copy is defined by a
/// trivial computation, replace the copy by rematerialize the definition.
bool reMaterializeTrivialDef(const CoalescerPair &CP, MachineInstr *CopyMI,
bool &IsDefCopy);
/// Return true if a copy involving a physreg should be joined.
bool canJoinPhys(const CoalescerPair &CP);
/// Replace all defs and uses of SrcReg to DstReg and update the subregister
/// number if it is not zero. If DstReg is a physical register and the
/// existing subregister number of the def / use being updated is not zero,
/// make sure to set it to the correct physical subregister.
void updateRegDefsUses(unsigned SrcReg, unsigned DstReg, unsigned SubIdx);
/// Handle copies of undef values.
/// Returns true if @p CopyMI was a copy of an undef value and eliminated.
bool eliminateUndefCopy(MachineInstr *CopyMI);
/// Check whether or not we should apply the terminal rule on the
/// destination (Dst) of \p Copy.
/// When the terminal rule applies, Copy is not profitable to
/// coalesce.
/// Dst is terminal if it has exactly one affinity (Dst, Src) and
/// at least one interference (Dst, Dst2). If Dst is terminal, the
/// terminal rule consists in checking that at least one of
/// interfering node, say Dst2, has an affinity of equal or greater
/// weight with Src.
/// In that case, Dst2 and Dst will not be able to be both coalesced
/// with Src. Since Dst2 exposes more coalescing opportunities than
/// Dst, we can drop \p Copy.
bool applyTerminalRule(const MachineInstr &Copy) const;
/// Check whether or not \p LI is composed by multiple connected
/// components and if that is the case, fix that.
void splitNewRanges(LiveInterval *LI) {
ConnectedVNInfoEqClasses ConEQ(*LIS);
unsigned NumComps = ConEQ.Classify(LI);
if (NumComps <= 1)
return;
SmallVector<LiveInterval*, 8> NewComps(1, LI);
for (unsigned i = 1; i != NumComps; ++i) {
unsigned VReg = MRI->createVirtualRegister(MRI->getRegClass(LI->reg));
NewComps.push_back(&LIS->createEmptyInterval(VReg));
}
ConEQ.Distribute(&NewComps[0], *MRI);
}
/// Wrapper method for \see LiveIntervals::shrinkToUses.
/// This method does the proper fixing of the live-ranges when the afore
/// mentioned method returns true.
void shrinkToUses(LiveInterval *LI,
SmallVectorImpl<MachineInstr * > *Dead = nullptr) {
if (LIS->shrinkToUses(LI, Dead))
// We may have created multiple connected components, split them.
splitNewRanges(LI);
}
public:
static char ID; ///< Class identification, replacement for typeinfo
RegisterCoalescer() : MachineFunctionPass(ID) {
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
void releaseMemory() override;
/// This is the pass entry point.
bool runOnMachineFunction(MachineFunction&) override;
/// Implement the dump method.
void print(raw_ostream &O, const Module* = nullptr) const override;
};
} // end anonymous namespace
char &llvm::RegisterCoalescerID = RegisterCoalescer::ID;
INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",
"Simple Register Coalescing", false, false)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",
"Simple Register Coalescing", false, false)
char RegisterCoalescer::ID = 0;
static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
unsigned &Src, unsigned &Dst,
unsigned &SrcSub, unsigned &DstSub) {
if (MI->isCopy()) {
Dst = MI->getOperand(0).getReg();
DstSub = MI->getOperand(0).getSubReg();
Src = MI->getOperand(1).getReg();
SrcSub = MI->getOperand(1).getSubReg();
} else if (MI->isSubregToReg()) {
Dst = MI->getOperand(0).getReg();
DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
MI->getOperand(3).getImm());
Src = MI->getOperand(2).getReg();
SrcSub = MI->getOperand(2).getSubReg();
} else
return false;
return true;
}
/// Return true if this block should be vacated by the coalescer to eliminate
/// branches. The important cases to handle in the coalescer are critical edges
/// split during phi elimination which contain only copies. Simple blocks that
/// contain non-branches should also be vacated, but this can be handled by an
/// earlier pass similar to early if-conversion.
static bool isSplitEdge(const MachineBasicBlock *MBB) {
if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
return false;
for (const auto &MI : *MBB) {
if (!MI.isCopyLike() && !MI.isUnconditionalBranch())
return false;
}
return true;
}
bool CoalescerPair::setRegisters(const MachineInstr *MI) {
SrcReg = DstReg = 0;
SrcIdx = DstIdx = 0;
NewRC = nullptr;
Flipped = CrossClass = false;
unsigned Src, Dst, SrcSub, DstSub;
if (!isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub))
return false;
Partial = SrcSub || DstSub;
// If one register is a physreg, it must be Dst.
if (TargetRegisterInfo::isPhysicalRegister(Src)) {
if (TargetRegisterInfo::isPhysicalRegister(Dst))
return false;
std::swap(Src, Dst);
std::swap(SrcSub, DstSub);
Flipped = true;
}
const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
// Eliminate DstSub on a physreg.
if (DstSub) {
Dst = TRI.getSubReg(Dst, DstSub);
if (!Dst) return false;
DstSub = 0;
}
// Eliminate SrcSub by picking a corresponding Dst superregister.
if (SrcSub) {
Dst = TRI.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
if (!Dst) return false;
} else if (!MRI.getRegClass(Src)->contains(Dst)) {
return false;
}
} else {
// Both registers are virtual.
const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
// Both registers have subreg indices.
if (SrcSub && DstSub) {
// Copies between different sub-registers are never coalescable.
if (Src == Dst && SrcSub != DstSub)
return false;
NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub,
SrcIdx, DstIdx);
if (!NewRC)
return false;
} else if (DstSub) {
// SrcReg will be merged with a sub-register of DstReg.
SrcIdx = DstSub;
NewRC = TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
} else if (SrcSub) {
// DstReg will be merged with a sub-register of SrcReg.
DstIdx = SrcSub;
NewRC = TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub);
} else {
// This is a straight copy without sub-registers.
NewRC = TRI.getCommonSubClass(DstRC, SrcRC);
}
// The combined constraint may be impossible to satisfy.
if (!NewRC)
return false;
// Prefer SrcReg to be a sub-register of DstReg.
// FIXME: Coalescer should support subregs symmetrically.
if (DstIdx && !SrcIdx) {
std::swap(Src, Dst);
std::swap(SrcIdx, DstIdx);
Flipped = !Flipped;
}
CrossClass = NewRC != DstRC || NewRC != SrcRC;
}
// Check our invariants
assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");
assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&
"Cannot have a physical SubIdx");
SrcReg = Src;
DstReg = Dst;
return true;
}
bool CoalescerPair::flip() {
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
return false;
std::swap(SrcReg, DstReg);
std::swap(SrcIdx, DstIdx);
Flipped = !Flipped;
return true;
}
bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
if (!MI)
return false;
unsigned Src, Dst, SrcSub, DstSub;
if (!isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub))
return false;
// Find the virtual register that is SrcReg.
if (Dst == SrcReg) {
std::swap(Src, Dst);
std::swap(SrcSub, DstSub);
} else if (Src != SrcReg) {
return false;
}
// Now check that Dst matches DstReg.
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
if (!TargetRegisterInfo::isPhysicalRegister(Dst))
return false;
assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state.");
// DstSub could be set for a physreg from INSERT_SUBREG.
if (DstSub)
Dst = TRI.getSubReg(Dst, DstSub);
// Full copy of Src.
if (!SrcSub)
return DstReg == Dst;
// This is a partial register copy. Check that the parts match.
return TRI.getSubReg(DstReg, SrcSub) == Dst;
} else {
// DstReg is virtual.
if (DstReg != Dst)
return false;
// Registers match, do the subregisters line up?
return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
TRI.composeSubRegIndices(DstIdx, DstSub);
}
}
void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addPreservedID(MachineDominatorsID);
MachineFunctionPass::getAnalysisUsage(AU);
}
void RegisterCoalescer::eliminateDeadDefs() {
SmallVector<unsigned, 8> NewRegs;
LiveRangeEdit(nullptr, NewRegs, *MF, *LIS,
nullptr, this).eliminateDeadDefs(DeadDefs);
}
void RegisterCoalescer::LRE_WillEraseInstruction(MachineInstr *MI) {
// MI may be in WorkList. Make sure we don't visit it.
ErasedInstrs.insert(MI);
}
bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
MachineInstr *CopyMI) {
assert(!CP.isPartial() && "This doesn't work for partial copies.");
assert(!CP.isPhys() && "This doesn't work for physreg copies.");
LiveInterval &IntA =
LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
LiveInterval &IntB =
LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot();
// We have a non-trivially-coalescable copy with IntA being the source and
// IntB being the dest, thus this defines a value number in IntB. If the
// source value number (in IntA) is defined by a copy from B, see if we can
// merge these two pieces of B into a single value number, eliminating a copy.
// For example:
//
// A3 = B0
// ...
// B1 = A3 <- this copy
//
// In this case, B0 can be extended to where the B1 copy lives, allowing the
// B1 value number to be replaced with B0 (which simplifies the B
// liveinterval).
// BValNo is a value number in B that is defined by a copy from A. 'B1' in
// the example above.
LiveInterval::iterator BS = IntB.FindSegmentContaining(CopyIdx);
if (BS == IntB.end()) return false;
VNInfo *BValNo = BS->valno;
// Get the location that B is defined at. Two options: either this value has
// an unknown definition point or it is defined at CopyIdx. If unknown, we
// can't process it.
if (BValNo->def != CopyIdx) return false;
// AValNo is the value number in A that defines the copy, A3 in the example.
SlotIndex CopyUseIdx = CopyIdx.getRegSlot(true);
LiveInterval::iterator AS = IntA.FindSegmentContaining(CopyUseIdx);
// The live segment might not exist after fun with physreg coalescing.
if (AS == IntA.end()) return false;
VNInfo *AValNo = AS->valno;
// If AValNo is defined as a copy from IntB, we can potentially process this.
// Get the instruction that defines this value number.
MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def);
// Don't allow any partial copies, even if isCoalescable() allows them.
if (!CP.isCoalescable(ACopyMI) || !ACopyMI->isFullCopy())
return false;
// Get the Segment in IntB that this value number starts with.
LiveInterval::iterator ValS =
IntB.FindSegmentContaining(AValNo->def.getPrevSlot());
if (ValS == IntB.end())
return false;
// Make sure that the end of the live segment is inside the same block as
// CopyMI.
MachineInstr *ValSEndInst =
LIS->getInstructionFromIndex(ValS->end.getPrevSlot());
if (!ValSEndInst || ValSEndInst->getParent() != CopyMI->getParent())
return false;
// Okay, we now know that ValS ends in the same block that the CopyMI
// live-range starts. If there are no intervening live segments between them
// in IntB, we can merge them.
if (ValS+1 != BS) return false;
DEBUG(dbgs() << "Extending: " << PrintReg(IntB.reg, TRI));
SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
// We are about to delete CopyMI, so need to remove it as the 'instruction
// that defines this value #'. Update the valnum with the new defining
// instruction #.
BValNo->def = FillerStart;
// Okay, we can merge them. We need to insert a new liverange:
// [ValS.end, BS.begin) of either value number, then we merge the
// two value numbers.
IntB.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, BValNo));
// Okay, merge "B1" into the same value number as "B0".
if (BValNo != ValS->valno)
IntB.MergeValueNumberInto(BValNo, ValS->valno);
// Do the same for the subregister segments.
for (LiveInterval::SubRange &S : IntB.subranges()) {
VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
S.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, SubBValNo));
VNInfo *SubValSNo = S.getVNInfoAt(AValNo->def.getPrevSlot());
if (SubBValNo != SubValSNo)
S.MergeValueNumberInto(SubBValNo, SubValSNo);
}
DEBUG(dbgs() << " result = " << IntB << '\n');
// If the source instruction was killing the source register before the
// merge, unset the isKill marker given the live range has been extended.
int UIdx = ValSEndInst->findRegisterUseOperandIdx(IntB.reg, true);
if (UIdx != -1) {
ValSEndInst->getOperand(UIdx).setIsKill(false);
}
// Rewrite the copy. If the copy instruction was killing the destination
// register before the merge, find the last use and trim the live range. That
// will also add the isKill marker.
CopyMI->substituteRegister(IntA.reg, IntB.reg, 0, *TRI);
if (AS->end == CopyIdx)
shrinkToUses(&IntA);
++numExtends;
return true;
}
bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA,
LiveInterval &IntB,
VNInfo *AValNo,
VNInfo *BValNo) {
// If AValNo has PHI kills, conservatively assume that IntB defs can reach
// the PHI values.
if (LIS->hasPHIKill(IntA, AValNo))
return true;
for (LiveRange::Segment &ASeg : IntA.segments) {
if (ASeg.valno != AValNo) continue;
LiveInterval::iterator BI =
std::upper_bound(IntB.begin(), IntB.end(), ASeg.start);
if (BI != IntB.begin())
--BI;
for (; BI != IntB.end() && ASeg.end >= BI->start; ++BI) {
if (BI->valno == BValNo)
continue;
if (BI->start <= ASeg.start && BI->end > ASeg.start)
return true;
if (BI->start > ASeg.start && BI->start < ASeg.end)
return true;
}
}
return false;
}
/// Copy segements with value number @p SrcValNo from liverange @p Src to live
/// range @Dst and use value number @p DstValNo there.
static void addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo,
const LiveRange &Src, const VNInfo *SrcValNo)
{
for (const LiveRange::Segment &S : Src.segments) {
if (S.valno != SrcValNo)
continue;
Dst.addSegment(LiveRange::Segment(S.start, S.end, DstValNo));
}
}
bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *CopyMI) {
assert(!CP.isPhys());
LiveInterval &IntA =
LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
LiveInterval &IntB =
LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
// We found a non-trivially-coalescable copy with IntA being the source and
// IntB being the dest, thus this defines a value number in IntB. If the
// source value number (in IntA) is defined by a commutable instruction and
// its other operand is coalesced to the copy dest register, see if we can
// transform the copy into a noop by commuting the definition. For example,
//
// A3 = op A2 B0<kill>
// ...
// B1 = A3 <- this copy
// ...
// = op A3 <- more uses
//
// ==>
//
// B2 = op B0 A2<kill>
// ...
// B1 = B2 <- now an identity copy
// ...
// = op B2 <- more uses
// BValNo is a value number in B that is defined by a copy from A. 'B1' in
// the example above.
SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot();
VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
assert(BValNo != nullptr && BValNo->def == CopyIdx);
// AValNo is the value number in A that defines the copy, A3 in the example.
VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getRegSlot(true));
assert(AValNo && !AValNo->isUnused() && "COPY source not live");
if (AValNo->isPHIDef())
return false;
MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def);
if (!DefMI)
return false;
if (!DefMI->isCommutable())
return false;
// If DefMI is a two-address instruction then commuting it will change the
// destination register.
int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg);
assert(DefIdx != -1);
unsigned UseOpIdx;
if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
return false;
unsigned Op1, Op2, NewDstIdx;
if (!TII->findCommutedOpIndices(DefMI, Op1, Op2))
return false;
if (Op1 == UseOpIdx)
NewDstIdx = Op2;
else if (Op2 == UseOpIdx)
NewDstIdx = Op1;
else
return false;
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
unsigned NewReg = NewDstMO.getReg();
if (NewReg != IntB.reg || !IntB.Query(AValNo->def).isKill())
return false;
// Make sure there are no other definitions of IntB that would reach the
// uses which the new definition can reach.
if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
return false;
// If some of the uses of IntA.reg is already coalesced away, return false.
// It's not possible to determine whether it's safe to perform the coalescing.
for (MachineOperand &MO : MRI->use_nodbg_operands(IntA.reg)) {
MachineInstr *UseMI = MO.getParent();
unsigned OpNo = &MO - &UseMI->getOperand(0);
SlotIndex UseIdx = LIS->getInstructionIndex(UseMI);
LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx);
if (US == IntA.end() || US->valno != AValNo)
continue;
// If this use is tied to a def, we can't rewrite the register.
if (UseMI->isRegTiedToDefOperand(OpNo))
return false;
}
DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t'
<< *DefMI);
// At this point we have decided that it is legal to do this
// transformation. Start by commuting the instruction.
MachineBasicBlock *MBB = DefMI->getParent();
MachineInstr *NewMI = TII->commuteInstruction(DefMI);
if (!NewMI)
return false;
if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
TargetRegisterInfo::isVirtualRegister(IntB.reg) &&
!MRI->constrainRegClass(IntB.reg, MRI->getRegClass(IntA.reg)))
return false;
if (NewMI != DefMI) {
LIS->ReplaceMachineInstrInMaps(DefMI, NewMI);
MachineBasicBlock::iterator Pos = DefMI;
MBB->insert(Pos, NewMI);
MBB->erase(DefMI);
}
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
// A = or A, B
// ...
// B = A
// ...
// C = A<kill>
// ...
// = B
// Update uses of IntA of the specific Val# with IntB.
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(IntA.reg),
UE = MRI->use_end();
UI != UE; /* ++UI is below because of possible MI removal */) {
MachineOperand &UseMO = *UI;
++UI;
if (UseMO.isUndef())
continue;
MachineInstr *UseMI = UseMO.getParent();
if (UseMI->isDebugValue()) {
// FIXME These don't have an instruction index. Not clear we have enough
// info to decide whether to do this replacement or not. For now do it.
UseMO.setReg(NewReg);
continue;
}
SlotIndex UseIdx = LIS->getInstructionIndex(UseMI).getRegSlot(true);
LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx);
assert(US != IntA.end() && "Use must be live");
if (US->valno != AValNo)
continue;
// Kill flags are no longer accurate. They are recomputed after RA.
UseMO.setIsKill(false);
if (TargetRegisterInfo::isPhysicalRegister(NewReg))
UseMO.substPhysReg(NewReg, *TRI);
else
UseMO.setReg(NewReg);
if (UseMI == CopyMI)
continue;
if (!UseMI->isCopy())
continue;
if (UseMI->getOperand(0).getReg() != IntB.reg ||
UseMI->getOperand(0).getSubReg())
continue;
// This copy will become a noop. If it's defining a new val#, merge it into
// BValNo.
SlotIndex DefIdx = UseIdx.getRegSlot();
VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
if (!DVNI)
continue;
DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
assert(DVNI->def == DefIdx);
BValNo = IntB.MergeValueNumberInto(DVNI, BValNo);
for (LiveInterval::SubRange &S : IntB.subranges()) {
VNInfo *SubDVNI = S.getVNInfoAt(DefIdx);
if (!SubDVNI)
continue;
VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
assert(SubBValNo->def == CopyIdx);
S.MergeValueNumberInto(SubDVNI, SubBValNo);
}
ErasedInstrs.insert(UseMI);
LIS->RemoveMachineInstrFromMaps(UseMI);
UseMI->eraseFromParent();
}
// Extend BValNo by merging in IntA live segments of AValNo. Val# definition
// is updated.
BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
if (IntB.hasSubRanges()) {
if (!IntA.hasSubRanges()) {
unsigned Mask = MRI->getMaxLaneMaskForVReg(IntA.reg);
IntA.createSubRangeFrom(Allocator, Mask, IntA);
}
SlotIndex AIdx = CopyIdx.getRegSlot(true);
for (LiveInterval::SubRange &SA : IntA.subranges()) {
VNInfo *ASubValNo = SA.getVNInfoAt(AIdx);
assert(ASubValNo != nullptr);
unsigned AMask = SA.LaneMask;
for (LiveInterval::SubRange &SB : IntB.subranges()) {
unsigned BMask = SB.LaneMask;
unsigned Common = BMask & AMask;
if (Common == 0)
continue;
DEBUG(
dbgs() << format("\t\tCopy+Merge %04X into %04X\n", BMask, Common));
unsigned BRest = BMask & ~AMask;
LiveInterval::SubRange *CommonRange;
if (BRest != 0) {
SB.LaneMask = BRest;
DEBUG(dbgs() << format("\t\tReduce Lane to %04X\n", BRest));
// Duplicate SubRange for newly merged common stuff.
CommonRange = IntB.createSubRangeFrom(Allocator, Common, SB);
} else {
// We van reuse the L SubRange.
SB.LaneMask = Common;
CommonRange = &SB;
}
LiveRange RangeCopy(SB, Allocator);
VNInfo *BSubValNo = CommonRange->getVNInfoAt(CopyIdx);
assert(BSubValNo->def == CopyIdx);
BSubValNo->def = ASubValNo->def;
addSegmentsWithValNo(*CommonRange, BSubValNo, SA, ASubValNo);
AMask &= ~BMask;
}
if (AMask != 0) {
DEBUG(dbgs() << format("\t\tNew Lane %04X\n", AMask));
LiveRange *NewRange = IntB.createSubRange(Allocator, AMask);
VNInfo *BSubValNo = NewRange->getNextValue(CopyIdx, Allocator);
addSegmentsWithValNo(*NewRange, BSubValNo, SA, ASubValNo);
}
}
}
BValNo->def = AValNo->def;
addSegmentsWithValNo(IntB, BValNo, IntA, AValNo);
DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
LIS->removeVRegDefAt(IntA, AValNo->def);
DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n');
++numCommutes;
return true;
}
/// Returns true if @p MI defines the full vreg @p Reg, as opposed to just
/// defining a subregister.
static bool definesFullReg(const MachineInstr &MI, unsigned Reg) {
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) &&
"This code cannot handle physreg aliasing");
for (const MachineOperand &Op : MI.operands()) {
if (!Op.isReg() || !Op.isDef() || Op.getReg() != Reg)
continue;
// Return true if we define the full register or don't care about the value
// inside other subregisters.
if (Op.getSubReg() == 0 || Op.isUndef())
return true;
}
return false;
}
bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
MachineInstr *CopyMI,
bool &IsDefCopy) {
IsDefCopy = false;
unsigned SrcReg = CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg();
unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx();
unsigned DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg();
unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx();
if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
LiveInterval &SrcInt = LIS->getInterval(SrcReg);
SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI);
VNInfo *ValNo = SrcInt.Query(CopyIdx).valueIn();
assert(ValNo && "CopyMI input register not live");
if (ValNo->isPHIDef() || ValNo->isUnused())
return false;
MachineInstr *DefMI = LIS->getInstructionFromIndex(ValNo->def);
if (!DefMI)
return false;
if (DefMI->isCopyLike()) {
IsDefCopy = true;
return false;
}
if (!TII->isAsCheapAsAMove(DefMI))
return false;
if (!TII->isTriviallyReMaterializable(DefMI, AA))
return false;
if (!definesFullReg(*DefMI, SrcReg))
return false;
bool SawStore = false;
if (!DefMI->isSafeToMove(AA, SawStore))
return false;
const MCInstrDesc &MCID = DefMI->getDesc();
if (MCID.getNumDefs() != 1)
return false;
// Only support subregister destinations when the def is read-undef.
MachineOperand &DstOperand = CopyMI->getOperand(0);
unsigned CopyDstReg = DstOperand.getReg();
if (DstOperand.getSubReg() && !DstOperand.isUndef())
return false;
// If both SrcIdx and DstIdx are set, correct rematerialization would widen
// the register substantially (beyond both source and dest size). This is bad
// for performance since it can cascade through a function, introducing many
// extra spills and fills (e.g. ARM can easily end up copying QQQQPR registers
// around after a few subreg copies).
if (SrcIdx && DstIdx)
return false;
const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI, *MF);
if (!DefMI->isImplicitDef()) {
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
unsigned NewDstReg = DstReg;
unsigned NewDstIdx = TRI->composeSubRegIndices(CP.getSrcIdx(),
DefMI->getOperand(0).getSubReg());
if (NewDstIdx)
NewDstReg = TRI->getSubReg(DstReg, NewDstIdx);
// Finally, make sure that the physical subregister that will be
// constructed later is permitted for the instruction.
if (!DefRC->contains(NewDstReg))
return false;
} else {
// Theoretically, some stack frame reference could exist. Just make sure
// it hasn't actually happened.
assert(TargetRegisterInfo::isVirtualRegister(DstReg) &&
"Only expect to deal with virtual or physical registers");
}
}
MachineBasicBlock *MBB = CopyMI->getParent();
MachineBasicBlock::iterator MII =
std::next(MachineBasicBlock::iterator(CopyMI));
TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI);
MachineInstr *NewMI = std::prev(MII);
// In a situation like the following:
// %vreg0:subreg = instr ; DefMI, subreg = DstIdx
// %vreg1 = copy %vreg0:subreg ; CopyMI, SrcIdx = 0
// instead of widening %vreg1 to the register class of %vreg0 simply do:
// %vreg1 = instr
const TargetRegisterClass *NewRC = CP.getNewRC();
if (DstIdx != 0) {
MachineOperand &DefMO = NewMI->getOperand(0);
if (DefMO.getSubReg() == DstIdx) {
assert(SrcIdx == 0 && CP.isFlipped()
&& "Shouldn't have SrcIdx+DstIdx at this point");
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
const TargetRegisterClass *CommonRC =
TRI->getCommonSubClass(DefRC, DstRC);
if (CommonRC != nullptr) {
NewRC = CommonRC;
DstIdx = 0;
DefMO.setSubReg(0);
}
}
}
LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI);
CopyMI->eraseFromParent();
ErasedInstrs.insert(CopyMI);
// NewMI may have dead implicit defs (E.g. EFLAGS for MOV<bits>r0 on X86).
// We need to remember these so we can add intervals once we insert
// NewMI into SlotIndexes.
SmallVector<unsigned, 4> NewMIImplDefs;
for (unsigned i = NewMI->getDesc().getNumOperands(),
e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
assert(MO.isImplicit() && MO.isDead() &&
TargetRegisterInfo::isPhysicalRegister(MO.getReg()));
NewMIImplDefs.push_back(MO.getReg());
}
}
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
unsigned NewIdx = NewMI->getOperand(0).getSubReg();
if (DefRC != nullptr) {
if (NewIdx)
NewRC = TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx);
else
NewRC = TRI->getCommonSubClass(NewRC, DefRC);
assert(NewRC && "subreg chosen for remat incompatible with instruction");
}
MRI->setRegClass(DstReg, NewRC);
updateRegDefsUses(DstReg, DstReg, DstIdx);
NewMI->getOperand(0).setSubReg(NewIdx);
} else if (NewMI->getOperand(0).getReg() != CopyDstReg) {
// The New instruction may be defining a sub-register of what's actually
// been asked for. If so it must implicitly define the whole thing.
assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&
"Only expect virtual or physical registers in remat");
NewMI->getOperand(0).setIsDead(true);
NewMI->addOperand(MachineOperand::CreateReg(CopyDstReg,
true /*IsDef*/,
true /*IsImp*/,
false /*IsKill*/));
// Record small dead def live-ranges for all the subregisters
// of the destination register.
// Otherwise, variables that live through may miss some
// interferences, thus creating invalid allocation.
// E.g., i386 code:
// vreg1 = somedef ; vreg1 GR8
// vreg2 = remat ; vreg2 GR32
// CL = COPY vreg2.sub_8bit
// = somedef vreg1 ; vreg1 GR8
// =>
// vreg1 = somedef ; vreg1 GR8
// ECX<def, dead> = remat ; CL<imp-def>
// = somedef vreg1 ; vreg1 GR8
// vreg1 will see the inteferences with CL but not with CH since
// no live-ranges would have been created for ECX.
// Fix that!
SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
for (MCRegUnitIterator Units(NewMI->getOperand(0).getReg(), TRI);
Units.isValid(); ++Units)
if (LiveRange *LR = LIS->getCachedRegUnit(*Units))
LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator());
}
if (NewMI->getOperand(0).getSubReg())
NewMI->getOperand(0).setIsUndef();
// CopyMI may have implicit operands, transfer them over to the newly
// rematerialized instruction. And update implicit def interval valnos.
for (unsigned i = CopyMI->getDesc().getNumOperands(),
e = CopyMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = CopyMI->getOperand(i);
if (MO.isReg()) {
assert(MO.isImplicit() && "No explicit operands after implict operands.");
// Discard VReg implicit defs.
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
NewMI->addOperand(MO);
}
}
}
SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
for (unsigned i = 0, e = NewMIImplDefs.size(); i != e; ++i) {
unsigned Reg = NewMIImplDefs[i];
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
if (LiveRange *LR = LIS->getCachedRegUnit(*Units))
LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator());
}
DEBUG(dbgs() << "Remat: " << *NewMI);
++NumReMats;
// The source interval can become smaller because we removed a use.
shrinkToUses(&SrcInt, &DeadDefs);
if (!DeadDefs.empty()) {
// If the virtual SrcReg is completely eliminated, update all DBG_VALUEs
// to describe DstReg instead.
for (MachineOperand &UseMO : MRI->use_operands(SrcReg)) {
MachineInstr *UseMI = UseMO.getParent();
if (UseMI->isDebugValue()) {
UseMO.setReg(DstReg);
DEBUG(dbgs() << "\t\tupdated: " << *UseMI);
}
}
eliminateDeadDefs();
}
return true;
}
bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
// ProcessImpicitDefs may leave some copies of <undef> values, it only removes
// local variables. When we have a copy like:
//
// %vreg1 = COPY %vreg2<undef>
//
// We delete the copy and remove the corresponding value number from %vreg1.
// Any uses of that value number are marked as <undef>.
// Note that we do not query CoalescerPair here but redo isMoveInstr as the
// CoalescerPair may have a new register class with adjusted subreg indices
// at this point.
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
isMoveInstr(*TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx);
SlotIndex Idx = LIS->getInstructionIndex(CopyMI);
const LiveInterval &SrcLI = LIS->getInterval(SrcReg);
// CopyMI is undef iff SrcReg is not live before the instruction.
if (SrcSubIdx != 0 && SrcLI.hasSubRanges()) {
unsigned SrcMask = TRI->getSubRegIndexLaneMask(SrcSubIdx);
for (const LiveInterval::SubRange &SR : SrcLI.subranges()) {
if ((SR.LaneMask & SrcMask) == 0)
continue;
if (SR.liveAt(Idx))
return false;
}
} else if (SrcLI.liveAt(Idx))
return false;
DEBUG(dbgs() << "\tEliminating copy of <undef> value\n");
// Remove any DstReg segments starting at the instruction.
LiveInterval &DstLI = LIS->getInterval(DstReg);
SlotIndex RegIndex = Idx.getRegSlot();
// Remove value or merge with previous one in case of a subregister def.
if (VNInfo *PrevVNI = DstLI.getVNInfoAt(Idx)) {
VNInfo *VNI = DstLI.getVNInfoAt(RegIndex);
DstLI.MergeValueNumberInto(VNI, PrevVNI);
// The affected subregister segments can be removed.
unsigned DstMask = TRI->getSubRegIndexLaneMask(DstSubIdx);
for (LiveInterval::SubRange &SR : DstLI.subranges()) {
if ((SR.LaneMask & DstMask) == 0)
continue;
VNInfo *SVNI = SR.getVNInfoAt(RegIndex);
assert(SVNI != nullptr && SlotIndex::isSameInstr(SVNI->def, RegIndex));
SR.removeValNo(SVNI);
}
DstLI.removeEmptySubRanges();
} else
LIS->removeVRegDefAt(DstLI, RegIndex);
// Mark uses as undef.
for (MachineOperand &MO : MRI->reg_nodbg_operands(DstReg)) {
if (MO.isDef() /*|| MO.isUndef()*/)
continue;
const MachineInstr &MI = *MO.getParent();
SlotIndex UseIdx = LIS->getInstructionIndex(&MI);
unsigned UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
bool isLive;
if (UseMask != ~0u && DstLI.hasSubRanges()) {
isLive = false;
for (const LiveInterval::SubRange &SR : DstLI.subranges()) {
if ((SR.LaneMask & UseMask) == 0)
continue;
if (SR.liveAt(UseIdx)) {
isLive = true;
break;
}
}
} else
isLive = DstLI.liveAt(UseIdx);
if (isLive)
continue;
MO.setIsUndef(true);
DEBUG(dbgs() << "\tnew undef: " << UseIdx << '\t' << MI);
}
return true;
}
void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
unsigned DstReg,
unsigned SubIdx) {
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(DstReg);
SmallPtrSet<MachineInstr*, 8> Visited;
for (MachineRegisterInfo::reg_instr_iterator
I = MRI->reg_instr_begin(SrcReg), E = MRI->reg_instr_end();
I != E; ) {
MachineInstr *UseMI = &*(I++);
// Each instruction can only be rewritten once because sub-register
// composition is not always idempotent. When SrcReg != DstReg, rewriting
// the UseMI operands removes them from the SrcReg use-def chain, but when
// SrcReg is DstReg we could encounter UseMI twice if it has multiple
// operands mentioning the virtual register.
if (SrcReg == DstReg && !Visited.insert(UseMI).second)
continue;
SmallVector<unsigned,8> Ops;
bool Reads, Writes;
std::tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
// If SrcReg wasn't read, it may still be the case that DstReg is live-in
// because SrcReg is a sub-register.
if (DstInt && !Reads && SubIdx)
Reads = DstInt->liveAt(LIS->getInstructionIndex(UseMI));
// Replace SrcReg with DstReg in all UseMI operands.
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = UseMI->getOperand(Ops[i]);
// Adjust <undef> flags in case of sub-register joins. We don't want to
// turn a full def into a read-modify-write sub-register def and vice
// versa.
if (SubIdx && MO.isDef())
MO.setIsUndef(!Reads);
// A subreg use of a partially undef (super) register may be a complete
// undef use now and then has to be marked that way.
if (SubIdx != 0 && MO.isUse() && MRI->shouldTrackSubRegLiveness(DstReg)) {
if (!DstInt->hasSubRanges()) {
BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
unsigned Mask = MRI->getMaxLaneMaskForVReg(DstInt->reg);
DstInt->createSubRangeFrom(Allocator, Mask, *DstInt);
}
unsigned Mask = TRI->getSubRegIndexLaneMask(SubIdx);
bool IsUndef = true;
SlotIndex MIIdx = UseMI->isDebugValue()
? LIS->getSlotIndexes()->getIndexBefore(UseMI)
: LIS->getInstructionIndex(UseMI);
SlotIndex UseIdx = MIIdx.getRegSlot(true);
for (LiveInterval::SubRange &S : DstInt->subranges()) {
if ((S.LaneMask & Mask) == 0)
continue;
if (S.liveAt(UseIdx)) {
IsUndef = false;
break;
}
}
if (IsUndef) {
MO.setIsUndef(true);
// We found out some subregister use is actually reading an undefined
// value. In some cases the whole vreg has become undefined at this
// point so we have to potentially shrink the main range if the
// use was ending a live segment there.
LiveQueryResult Q = DstInt->Query(MIIdx);
if (Q.valueOut() == nullptr)
ShrinkMainRange = true;
}
}
if (DstIsPhys)
MO.substPhysReg(DstReg, *TRI);
else
MO.substVirtReg(DstReg, SubIdx, *TRI);
}
DEBUG({
dbgs() << "\t\tupdated: ";
if (!UseMI->isDebugValue())
dbgs() << LIS->getInstructionIndex(UseMI) << "\t";
dbgs() << *UseMI;
});
}
}
bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) {
// Always join simple intervals that are defined by a single copy from a
// reserved register. This doesn't increase register pressure, so it is
// always beneficial.
if (!MRI->isReserved(CP.getDstReg())) {
DEBUG(dbgs() << "\tCan only merge into reserved registers.\n");
return false;
}
LiveInterval &JoinVInt = LIS->getInterval(CP.getSrcReg());
if (JoinVInt.containsOneValue())
return true;
DEBUG(dbgs() << "\tCannot join complex intervals into reserved register.\n");
return false;
}
bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
Again = false;
DEBUG(dbgs() << LIS->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
CoalescerPair CP(*TRI);
if (!CP.setRegisters(CopyMI)) {
DEBUG(dbgs() << "\tNot coalescable.\n");
return false;
}
if (CP.getNewRC()) {
auto SrcRC = MRI->getRegClass(CP.getSrcReg());
auto DstRC = MRI->getRegClass(CP.getDstReg());
unsigned SrcIdx = CP.getSrcIdx();
unsigned DstIdx = CP.getDstIdx();
if (CP.isFlipped()) {
std::swap(SrcIdx, DstIdx);
std::swap(SrcRC, DstRC);
}
if (!TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
CP.getNewRC())) {
DEBUG(dbgs() << "\tSubtarget bailed on coalescing.\n");
return false;
}
}
// Dead code elimination. This really should be handled by MachineDCE, but
// sometimes dead copies slip through, and we can't generate invalid live
// ranges.
if (!CP.isPhys() && CopyMI->allDefsAreDead()) {
DEBUG(dbgs() << "\tCopy is dead.\n");
DeadDefs.push_back(CopyMI);
eliminateDeadDefs();
return true;
}
// Eliminate undefs.
if (!CP.isPhys() && eliminateUndefCopy(CopyMI)) {
LIS->RemoveMachineInstrFromMaps(CopyMI);
CopyMI->eraseFromParent();
return false; // Not coalescable.
}
// Coalesced copies are normally removed immediately, but transformations
// like removeCopyByCommutingDef() can inadvertently create identity copies.
// When that happens, just join the values and remove the copy.
if (CP.getSrcReg() == CP.getDstReg()) {
LiveInterval &LI = LIS->getInterval(CP.getSrcReg());
DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n');
const SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI);
LiveQueryResult LRQ = LI.Query(CopyIdx);
if (VNInfo *DefVNI = LRQ.valueDefined()) {
VNInfo *ReadVNI = LRQ.valueIn();
assert(ReadVNI && "No value before copy and no <undef> flag.");
assert(ReadVNI != DefVNI && "Cannot read and define the same value.");
LI.MergeValueNumberInto(DefVNI, ReadVNI);
// Process subregister liveranges.
for (LiveInterval::SubRange &S : LI.subranges()) {
LiveQueryResult SLRQ = S.Query(CopyIdx);
if (VNInfo *SDefVNI = SLRQ.valueDefined()) {
VNInfo *SReadVNI = SLRQ.valueIn();
S.MergeValueNumberInto(SDefVNI, SReadVNI);
}
}
DEBUG(dbgs() << "\tMerged values: " << LI << '\n');
}
LIS->RemoveMachineInstrFromMaps(CopyMI);
CopyMI->eraseFromParent();
return true;
}
// Enforce policies.
if (CP.isPhys()) {
DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), TRI)
<< " with " << PrintReg(CP.getDstReg(), TRI, CP.getSrcIdx())
<< '\n');
if (!canJoinPhys(CP)) {
// Before giving up coalescing, if definition of source is defined by
// trivial computation, try rematerializing it.
bool IsDefCopy;
if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
return true;
if (IsDefCopy)
Again = true; // May be possible to coalesce later.
return false;
}
} else {
// When possible, let DstReg be the larger interval.
if (!CP.isPartial() && LIS->getInterval(CP.getSrcReg()).size() >
LIS->getInterval(CP.getDstReg()).size())
CP.flip();
DEBUG({
dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with ";
if (CP.getDstIdx() && CP.getSrcIdx())
dbgs() << PrintReg(CP.getDstReg()) << " in "
<< TRI->getSubRegIndexName(CP.getDstIdx()) << " and "
<< PrintReg(CP.getSrcReg()) << " in "
<< TRI->getSubRegIndexName(CP.getSrcIdx()) << '\n';
else
dbgs() << PrintReg(CP.getSrcReg(), TRI) << " in "
<< PrintReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n';
});
}
ShrinkMask = 0;
ShrinkMainRange = false;
// Okay, attempt to join these two intervals. On failure, this returns false.
// Otherwise, if one of the intervals being joined is a physreg, this method
// always canonicalizes DstInt to be it. The output "SrcInt" will not have
// been modified, so we can use this information below to update aliases.
if (!joinIntervals(CP)) {
// Coalescing failed.
// If definition of source is defined by trivial computation, try
// rematerializing it.
bool IsDefCopy;
if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
return true;
// If we can eliminate the copy without merging the live segments, do so
// now.
if (!CP.isPartial() && !CP.isPhys()) {
if (adjustCopiesBackFrom(CP, CopyMI) ||
removeCopyByCommutingDef(CP, CopyMI)) {
LIS->RemoveMachineInstrFromMaps(CopyMI);
CopyMI->eraseFromParent();
DEBUG(dbgs() << "\tTrivial!\n");
return true;
}
}
// Otherwise, we are unable to join the intervals.
DEBUG(dbgs() << "\tInterference!\n");
Again = true; // May be possible to coalesce later.
return false;
}
// Coalescing to a virtual register that is of a sub-register class of the
// other. Make sure the resulting register is set to the right register class.
if (CP.isCrossClass()) {
++numCrossRCs;
MRI->setRegClass(CP.getDstReg(), CP.getNewRC());
}
// Removing sub-register copies can ease the register class constraints.
// Make sure we attempt to inflate the register class of DstReg.
if (!CP.isPhys() && RegClassInfo.isProperSubClass(CP.getNewRC()))
InflateRegs.push_back(CP.getDstReg());
// CopyMI has been erased by joinIntervals at this point. Remove it from
// ErasedInstrs since copyCoalesceWorkList() won't add a successful join back
// to the work list. This keeps ErasedInstrs from growing needlessly.
ErasedInstrs.erase(CopyMI);
// Rewrite all SrcReg operands to DstReg.
// Also update DstReg operands to include DstIdx if it is set.
if (CP.getDstIdx())
updateRegDefsUses(CP.getDstReg(), CP.getDstReg(), CP.getDstIdx());
updateRegDefsUses(CP.getSrcReg(), CP.getDstReg(), CP.getSrcIdx());
// Shrink subregister ranges if necessary.
if (ShrinkMask != 0) {
LiveInterval &LI = LIS->getInterval(CP.getDstReg());
for (LiveInterval::SubRange &S : LI.subranges()) {
if ((S.LaneMask & ShrinkMask) == 0)
continue;
DEBUG(dbgs() << "Shrink LaneUses (Lane "
<< format("%04X", S.LaneMask) << ")\n");
LIS->shrinkToUses(S, LI.reg);
}
LI.removeEmptySubRanges();
}
if (ShrinkMainRange) {
LiveInterval &LI = LIS->getInterval(CP.getDstReg());
shrinkToUses(&LI);
}
// SrcReg is guaranteed to be the register whose live interval that is
// being merged.
LIS->removeInterval(CP.getSrcReg());
// Update regalloc hint.
TRI->updateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *MF);
DEBUG({
dbgs() << "\tSuccess: " << PrintReg(CP.getSrcReg(), TRI, CP.getSrcIdx())
<< " -> " << PrintReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = ";
if (CP.isPhys())
dbgs() << PrintReg(CP.getDstReg(), TRI);
else
dbgs() << LIS->getInterval(CP.getDstReg());
dbgs() << '\n';
});
++numJoins;
return true;
}
bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
unsigned DstReg = CP.getDstReg();
assert(CP.isPhys() && "Must be a physreg copy");
assert(MRI->isReserved(DstReg) && "Not a reserved register");
LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n');
assert(RHS.containsOneValue() && "Invalid join with reserved register");
// Optimization for reserved registers like ESP. We can only merge with a
// reserved physreg if RHS has a single value that is a copy of DstReg.
// The live range of the reserved register will look like a set of dead defs
// - we don't properly track the live range of reserved registers.
// Deny any overlapping intervals. This depends on all the reserved
// register live ranges to look like dead defs.
for (MCRegUnitIterator UI(DstReg, TRI); UI.isValid(); ++UI)
if (RHS.overlaps(LIS->getRegUnit(*UI))) {
DEBUG(dbgs() << "\t\tInterference: " << PrintRegUnit(*UI, TRI) << '\n');
return false;
}
// Skip any value computations, we are not adding new values to the
// reserved register. Also skip merging the live ranges, the reserved
// register live range doesn't need to be accurate as long as all the
// defs are there.
// Delete the identity copy.
MachineInstr *CopyMI;
if (CP.isFlipped()) {
CopyMI = MRI->getVRegDef(RHS.reg);
} else {
if (!MRI->hasOneNonDBGUse(RHS.reg)) {
DEBUG(dbgs() << "\t\tMultiple vreg uses!\n");
return false;
}
MachineInstr *DestMI = MRI->getVRegDef(RHS.reg);
CopyMI = &*MRI->use_instr_nodbg_begin(RHS.reg);
const SlotIndex CopyRegIdx = LIS->getInstructionIndex(CopyMI).getRegSlot();
const SlotIndex DestRegIdx = LIS->getInstructionIndex(DestMI).getRegSlot();
// We checked above that there are no interfering defs of the physical
// register. However, for this case, where we intent to move up the def of
// the physical register, we also need to check for interfering uses.
SlotIndexes *Indexes = LIS->getSlotIndexes();
for (SlotIndex SI = Indexes->getNextNonNullIndex(DestRegIdx);
SI != CopyRegIdx; SI = Indexes->getNextNonNullIndex(SI)) {
MachineInstr *MI = LIS->getInstructionFromIndex(SI);
if (MI->readsRegister(DstReg, TRI)) {
DEBUG(dbgs() << "\t\tInterference (read): " << *MI);
return false;
}
// We must also check for clobbers caused by regmasks.
for (const auto &MO : MI->operands()) {
if (MO.isRegMask() && MO.clobbersPhysReg(DstReg)) {
DEBUG(dbgs() << "\t\tInterference (regmask clobber): " << *MI);
return false;
}
}
}
// We're going to remove the copy which defines a physical reserved
// register, so remove its valno, etc.
DEBUG(dbgs() << "\t\tRemoving phys reg def of " << DstReg << " at "
<< CopyRegIdx << "\n");
LIS->removePhysRegDefAt(DstReg, CopyRegIdx);
// Create a new dead def at the new def location.
for (MCRegUnitIterator UI(DstReg, TRI); UI.isValid(); ++UI) {
LiveRange &LR = LIS->getRegUnit(*UI);
LR.createDeadDef(DestRegIdx, LIS->getVNInfoAllocator());
}
}
LIS->RemoveMachineInstrFromMaps(CopyMI);
CopyMI->eraseFromParent();
// We don't track kills for reserved registers.
MRI->clearKillFlags(CP.getSrcReg());
return true;
}
//===----------------------------------------------------------------------===//
// Interference checking and interval joining
//===----------------------------------------------------------------------===//
//
// In the easiest case, the two live ranges being joined are disjoint, and
// there is no interference to consider. It is quite common, though, to have
// overlapping live ranges, and we need to check if the interference can be
// resolved.
//
// The live range of a single SSA value forms a sub-tree of the dominator tree.
// This means that two SSA values overlap if and only if the def of one value
// is contained in the live range of the other value. As a special case, the
// overlapping values can be defined at the same index.
//
// The interference from an overlapping def can be resolved in these cases:
//
// 1. Coalescable copies. The value is defined by a copy that would become an
// identity copy after joining SrcReg and DstReg. The copy instruction will
// be removed, and the value will be merged with the source value.
//
// There can be several copies back and forth, causing many values to be
// merged into one. We compute a list of ultimate values in the joined live
// range as well as a mappings from the old value numbers.
//
// 2. IMPLICIT_DEF. This instruction is only inserted to ensure all PHI
// predecessors have a live out value. It doesn't cause real interference,
// and can be merged into the value it overlaps. Like a coalescable copy, it
// can be erased after joining.
//
// 3. Copy of external value. The overlapping def may be a copy of a value that
// is already in the other register. This is like a coalescable copy, but
// the live range of the source register must be trimmed after erasing the
// copy instruction:
//
// %src = COPY %ext
// %dst = COPY %ext <-- Remove this COPY, trim the live range of %ext.
//
// 4. Clobbering undefined lanes. Vector registers are sometimes built by
// defining one lane at a time:
//
// %dst:ssub0<def,read-undef> = FOO
// %src = BAR
// %dst:ssub1<def> = COPY %src
//
// The live range of %src overlaps the %dst value defined by FOO, but
// merging %src into %dst:ssub1 is only going to clobber the ssub1 lane
// which was undef anyway.
//
// The value mapping is more complicated in this case. The final live range
// will have different value numbers for both FOO and BAR, but there is no
// simple mapping from old to new values. It may even be necessary to add
// new PHI values.
//
// 5. Clobbering dead lanes. A def may clobber a lane of a vector register that
// is live, but never read. This can happen because we don't compute
// individual live ranges per lane.
//
// %dst<def> = FOO
// %src = BAR
// %dst:ssub1<def> = COPY %src
//
// This kind of interference is only resolved locally. If the clobbered
// lane value escapes the block, the join is aborted.
namespace {
/// Track information about values in a single virtual register about to be
/// joined. Objects of this class are always created in pairs - one for each
/// side of the CoalescerPair (or one for each lane of a side of the coalescer
/// pair)
class JoinVals {
/// Live range we work on.
LiveRange &LR;
/// (Main) register we work on.
const unsigned Reg;
/// Reg (and therefore the values in this liverange) will end up as
/// subregister SubIdx in the coalesced register. Either CP.DstIdx or
/// CP.SrcIdx.
const unsigned SubIdx;
/// The LaneMask that this liverange will occupy the coalesced register. May
/// be smaller than the lanemask produced by SubIdx when merging subranges.
const unsigned LaneMask;
/// This is true when joining sub register ranges, false when joining main
/// ranges.
const bool SubRangeJoin;
/// Whether the current LiveInterval tracks subregister liveness.
const bool TrackSubRegLiveness;
/// Values that will be present in the final live range.
SmallVectorImpl<VNInfo*> &NewVNInfo;
const CoalescerPair &CP;
LiveIntervals *LIS;
SlotIndexes *Indexes;
const TargetRegisterInfo *TRI;
/// Value number assignments. Maps value numbers in LI to entries in
/// NewVNInfo. This is suitable for passing to LiveInterval::join().
SmallVector<int, 8> Assignments;
/// Conflict resolution for overlapping values.
enum ConflictResolution {
/// No overlap, simply keep this value.
CR_Keep,
/// Merge this value into OtherVNI and erase the defining instruction.
/// Used for IMPLICIT_DEF, coalescable copies, and copies from external
/// values.
CR_Erase,
/// Merge this value into OtherVNI but keep the defining instruction.
/// This is for the special case where OtherVNI is defined by the same
/// instruction.
CR_Merge,
/// Keep this value, and have it replace OtherVNI where possible. This
/// complicates value mapping since OtherVNI maps to two different values
/// before and after this def.
/// Used when clobbering undefined or dead lanes.
CR_Replace,
/// Unresolved conflict. Visit later when all values have been mapped.
CR_Unresolved,
/// Unresolvable conflict. Abort the join.
CR_Impossible
};
/// Per-value info for LI. The lane bit masks are all relative to the final
/// joined register, so they can be compared directly between SrcReg and
/// DstReg.
struct Val {
ConflictResolution Resolution;
/// Lanes written by this def, 0 for unanalyzed values.
unsigned WriteLanes;
/// Lanes with defined values in this register. Other lanes are undef and
/// safe to clobber.
unsigned ValidLanes;
/// Value in LI being redefined by this def.
VNInfo *RedefVNI;
/// Value in the other live range that overlaps this def, if any.
VNInfo *OtherVNI;
/// Is this value an IMPLICIT_DEF that can be erased?
///
/// IMPLICIT_DEF values should only exist at the end of a basic block that
/// is a predecessor to a phi-value. These IMPLICIT_DEF instructions can be
/// safely erased if they are overlapping a live value in the other live
/// interval.
///
/// Weird control flow graphs and incomplete PHI handling in
/// ProcessImplicitDefs can very rarely create IMPLICIT_DEF values with
/// longer live ranges. Such IMPLICIT_DEF values should be treated like
/// normal values.
bool ErasableImplicitDef;
/// True when the live range of this value will be pruned because of an
/// overlapping CR_Replace value in the other live range.
bool Pruned;
/// True once Pruned above has been computed.
bool PrunedComputed;
Val() : Resolution(CR_Keep), WriteLanes(0), ValidLanes(0),
RedefVNI(nullptr), OtherVNI(nullptr), ErasableImplicitDef(false),
Pruned(false), PrunedComputed(false) {}
bool isAnalyzed() const { return WriteLanes != 0; }
};
/// One entry per value number in LI.
SmallVector<Val, 8> Vals;
/// Compute the bitmask of lanes actually written by DefMI.
/// Set Redef if there are any partial register definitions that depend on the
/// previous value of the register.
unsigned computeWriteLanes(const MachineInstr *DefMI, bool &Redef) const;
/// Find the ultimate value that VNI was copied from.
std::pair<const VNInfo*,unsigned> followCopyChain(const VNInfo *VNI) const;
bool valuesIdentical(VNInfo *Val0, VNInfo *Val1, const JoinVals &Other) const;
/// Analyze ValNo in this live range, and set all fields of Vals[ValNo].
/// Return a conflict resolution when possible, but leave the hard cases as
/// CR_Unresolved.
/// Recursively calls computeAssignment() on this and Other, guaranteeing that
/// both OtherVNI and RedefVNI have been analyzed and mapped before returning.
/// The recursion always goes upwards in the dominator tree, making loops
/// impossible.
ConflictResolution analyzeValue(unsigned ValNo, JoinVals &Other);
/// Compute the value assignment for ValNo in RI.
/// This may be called recursively by analyzeValue(), but never for a ValNo on
/// the stack.
void computeAssignment(unsigned ValNo, JoinVals &Other);
/// Assuming ValNo is going to clobber some valid lanes in Other.LR, compute
/// the extent of the tainted lanes in the block.
///
/// Multiple values in Other.LR can be affected since partial redefinitions
/// can preserve previously tainted lanes.
///
/// 1 %dst = VLOAD <-- Define all lanes in %dst
/// 2 %src = FOO <-- ValNo to be joined with %dst:ssub0
/// 3 %dst:ssub1 = BAR <-- Partial redef doesn't clear taint in ssub0
/// 4 %dst:ssub0 = COPY %src <-- Conflict resolved, ssub0 wasn't read
///
/// For each ValNo in Other that is affected, add an (EndIndex, TaintedLanes)
/// entry to TaintedVals.
///
/// Returns false if the tainted lanes extend beyond the basic block.
bool taintExtent(unsigned, unsigned, JoinVals&,
SmallVectorImpl<std::pair<SlotIndex, unsigned> >&);
/// Return true if MI uses any of the given Lanes from Reg.
/// This does not include partial redefinitions of Reg.
bool usesLanes(const MachineInstr *MI, unsigned, unsigned, unsigned) const;
/// Determine if ValNo is a copy of a value number in LR or Other.LR that will
/// be pruned:
///
/// %dst = COPY %src
/// %src = COPY %dst <-- This value to be pruned.
/// %dst = COPY %src <-- This value is a copy of a pruned value.
bool isPrunedValue(unsigned ValNo, JoinVals &Other);
public:
JoinVals(LiveRange &LR, unsigned Reg, unsigned SubIdx, unsigned LaneMask,
SmallVectorImpl<VNInfo*> &newVNInfo, const CoalescerPair &cp,
LiveIntervals *lis, const TargetRegisterInfo *TRI, bool SubRangeJoin,
bool TrackSubRegLiveness)
: LR(LR), Reg(Reg), SubIdx(SubIdx), LaneMask(LaneMask),
SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
NewVNInfo(newVNInfo), CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
TRI(TRI), Assignments(LR.getNumValNums(), -1), Vals(LR.getNumValNums())
{}
/// Analyze defs in LR and compute a value mapping in NewVNInfo.
/// Returns false if any conflicts were impossible to resolve.
bool mapValues(JoinVals &Other);
/// Try to resolve conflicts that require all values to be mapped.
/// Returns false if any conflicts were impossible to resolve.
bool resolveConflicts(JoinVals &Other);
/// Prune the live range of values in Other.LR where they would conflict with
/// CR_Replace values in LR. Collect end points for restoring the live range
/// after joining.
void pruneValues(JoinVals &Other, SmallVectorImpl<SlotIndex> &EndPoints,
bool changeInstrs);
/// Removes subranges starting at copies that get removed. This sometimes
/// happens when undefined subranges are copied around. These ranges contain
/// no usefull information and can be removed.
void pruneSubRegValues(LiveInterval &LI, unsigned &ShrinkMask);
/// Erase any machine instructions that have been coalesced away.
/// Add erased instructions to ErasedInstrs.
/// Add foreign virtual registers to ShrinkRegs if their live range ended at
/// the erased instrs.
void eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
SmallVectorImpl<unsigned> &ShrinkRegs);
/// Remove liverange defs at places where implicit defs will be removed.
void removeImplicitDefs();
/// Get the value assignments suitable for passing to LiveInterval::join.
const int *getAssignments() const { return Assignments.data(); }
};
} // end anonymous namespace
unsigned JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef)
const {
unsigned L = 0;
for (const MachineOperand &MO : DefMI->operands()) {
if (!MO.isReg() || MO.getReg() != Reg || !MO.isDef())
continue;
L |= TRI->getSubRegIndexLaneMask(
TRI->composeSubRegIndices(SubIdx, MO.getSubReg()));
if (MO.readsReg())
Redef = true;
}
return L;
}
std::pair<const VNInfo*, unsigned> JoinVals::followCopyChain(
const VNInfo *VNI) const {
unsigned Reg = this->Reg;
while (!VNI->isPHIDef()) {
SlotIndex Def = VNI->def;
MachineInstr *MI = Indexes->getInstructionFromIndex(Def);
assert(MI && "No defining instruction");
if (!MI->isFullCopy())
return std::make_pair(VNI, Reg);
unsigned SrcReg = MI->getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
return std::make_pair(VNI, Reg);
const LiveInterval &LI = LIS->getInterval(SrcReg);
const VNInfo *ValueIn;
// No subrange involved.
if (!SubRangeJoin || !LI.hasSubRanges()) {
LiveQueryResult LRQ = LI.Query(Def);
ValueIn = LRQ.valueIn();
} else {
// Query subranges. Pick the first matching one.
ValueIn = nullptr;
for (const LiveInterval::SubRange &S : LI.subranges()) {
// Transform lanemask to a mask in the joined live interval.
unsigned SMask = TRI->composeSubRegIndexLaneMask(SubIdx, S.LaneMask);
if ((SMask & LaneMask) == 0)
continue;
LiveQueryResult LRQ = S.Query(Def);
ValueIn = LRQ.valueIn();
break;
}
}
if (ValueIn == nullptr)
break;
VNI = ValueIn;
Reg = SrcReg;
}
return std::make_pair(VNI, Reg);
}
bool JoinVals::valuesIdentical(VNInfo *Value0, VNInfo *Value1,
const JoinVals &Other) const {
const VNInfo *Orig0;
unsigned Reg0;
std::tie(Orig0, Reg0) = followCopyChain(Value0);
if (Orig0 == Value1)
return true;
const VNInfo *Orig1;
unsigned Reg1;
std::tie(Orig1, Reg1) = Other.followCopyChain(Value1);
// The values are equal if they are defined at the same place and use the
// same register. Note that we cannot compare VNInfos directly as some of
// them might be from a copy created in mergeSubRangeInto() while the other
// is from the original LiveInterval.
return Orig0->def == Orig1->def && Reg0 == Reg1;
}
JoinVals::ConflictResolution
JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
Val &V = Vals[ValNo];
assert(!V.isAnalyzed() && "Value has already been analyzed!");
VNInfo *VNI = LR.getValNumInfo(ValNo);
if (VNI->isUnused()) {
V.WriteLanes = ~0u;
return CR_Keep;
}
// Get the instruction defining this value, compute the lanes written.
const MachineInstr *DefMI = nullptr;
if (VNI->isPHIDef()) {
// Conservatively assume that all lanes in a PHI are valid.
unsigned Lanes = SubRangeJoin ? 1 : TRI->getSubRegIndexLaneMask(SubIdx);
V.ValidLanes = V.WriteLanes = Lanes;
} else {
DefMI = Indexes->getInstructionFromIndex(VNI->def);
assert(DefMI != nullptr);
if (SubRangeJoin) {
// We don't care about the lanes when joining subregister ranges.
V.WriteLanes = V.ValidLanes = 1;
if (DefMI->isImplicitDef()) {
V.ValidLanes = 0;
V.ErasableImplicitDef = true;
}
} else {
bool Redef = false;
V.ValidLanes = V.WriteLanes = computeWriteLanes(DefMI, Redef);
// If this is a read-modify-write instruction, there may be more valid
// lanes than the ones written by this instruction.
// This only covers partial redef operands. DefMI may have normal use
// operands reading the register. They don't contribute valid lanes.
//
// This adds ssub1 to the set of valid lanes in %src:
//
// %src:ssub1<def> = FOO
//
// This leaves only ssub1 valid, making any other lanes undef:
//
// %src:ssub1<def,read-undef> = FOO %src:ssub2
//
// The <read-undef> flag on the def operand means that old lane values are
// not important.
if (Redef) {
V.RedefVNI = LR.Query(VNI->def).valueIn();
assert((TrackSubRegLiveness || V.RedefVNI) &&
"Instruction is reading nonexistent value");
if (V.RedefVNI != nullptr) {
computeAssignment(V.RedefVNI->id, Other);
V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes;
}
}
// An IMPLICIT_DEF writes undef values.
if (DefMI->isImplicitDef()) {
// We normally expect IMPLICIT_DEF values to be live only until the end
// of their block. If the value is really live longer and gets pruned in
// another block, this flag is cleared again.
V.ErasableImplicitDef = true;
V.ValidLanes &= ~V.WriteLanes;
}
}
}
// Find the value in Other that overlaps VNI->def, if any.
LiveQueryResult OtherLRQ = Other.LR.Query(VNI->def);
// It is possible that both values are defined by the same instruction, or
// the values are PHIs defined in the same block. When that happens, the two
// values should be merged into one, but not into any preceding value.
// The first value defined or visited gets CR_Keep, the other gets CR_Merge.
if (VNInfo *OtherVNI = OtherLRQ.valueDefined()) {
assert(SlotIndex::isSameInstr(VNI->def, OtherVNI->def) && "Broken LRQ");
// One value stays, the other is merged. Keep the earlier one, or the first
// one we see.
if (OtherVNI->def < VNI->def)
Other.computeAssignment(OtherVNI->id, *this);
else if (VNI->def < OtherVNI->def && OtherLRQ.valueIn()) {
// This is an early-clobber def overlapping a live-in value in the other
// register. Not mergeable.
V.OtherVNI = OtherLRQ.valueIn();
return CR_Impossible;
}
V.OtherVNI = OtherVNI;
Val &OtherV = Other.Vals[OtherVNI->id];
// Keep this value, check for conflicts when analyzing OtherVNI.
if (!OtherV.isAnalyzed())
return CR_Keep;
// Both sides have been analyzed now.
// Allow overlapping PHI values. Any real interference would show up in a
// predecessor, the PHI itself can't introduce any conflicts.
if (VNI->isPHIDef())
return CR_Merge;
if (V.ValidLanes & OtherV.ValidLanes)
// Overlapping lanes can't be resolved.
return CR_Impossible;
else
return CR_Merge;
}
// No simultaneous def. Is Other live at the def?
V.OtherVNI = OtherLRQ.valueIn();
if (!V.OtherVNI)
// No overlap, no conflict.
return CR_Keep;
assert(!SlotIndex::isSameInstr(VNI->def, V.OtherVNI->def) && "Broken LRQ");
// We have overlapping values, or possibly a kill of Other.
// Recursively compute assignments up the dominator tree.
Other.computeAssignment(V.OtherVNI->id, *this);
Val &OtherV = Other.Vals[V.OtherVNI->id];
// Check if OtherV is an IMPLICIT_DEF that extends beyond its basic block.
// This shouldn't normally happen, but ProcessImplicitDefs can leave such
// IMPLICIT_DEF instructions behind, and there is nothing wrong with it
// technically.
//
// WHen it happens, treat that IMPLICIT_DEF as a normal value, and don't try
// to erase the IMPLICIT_DEF instruction.
if (OtherV.ErasableImplicitDef && DefMI &&
DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) {
DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def
<< " extends into BB#" << DefMI->getParent()->getNumber()
<< ", keeping it.\n");
OtherV.ErasableImplicitDef = false;
}
// Allow overlapping PHI values. Any real interference would show up in a
// predecessor, the PHI itself can't introduce any conflicts.
if (VNI->isPHIDef())
return CR_Replace;
// Check for simple erasable conflicts.
if (DefMI->isImplicitDef()) {
// We need the def for the subregister if there is nothing else live at the
// subrange at this point.
if (TrackSubRegLiveness
&& (V.WriteLanes & (OtherV.ValidLanes | OtherV.WriteLanes)) == 0)
return CR_Replace;
return CR_Erase;
}
// Include the non-conflict where DefMI is a coalescable copy that kills
// OtherVNI. We still want the copy erased and value numbers merged.
if (CP.isCoalescable(DefMI)) {
// Some of the lanes copied from OtherVNI may be undef, making them undef
// here too.
V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
return CR_Erase;
}
// This may not be a real conflict if DefMI simply kills Other and defines
// VNI.
if (OtherLRQ.isKill() && OtherLRQ.endPoint() <= VNI->def)
return CR_Keep;
// Handle the case where VNI and OtherVNI can be proven to be identical:
//
// %other = COPY %ext
// %this = COPY %ext <-- Erase this copy
//
if (DefMI->isFullCopy() && !CP.isPartial()
&& valuesIdentical(VNI, V.OtherVNI, Other))
return CR_Erase;
// If the lanes written by this instruction were all undef in OtherVNI, it is
// still safe to join the live ranges. This can't be done with a simple value
// mapping, though - OtherVNI will map to multiple values:
//
// 1 %dst:ssub0 = FOO <-- OtherVNI
// 2 %src = BAR <-- VNI
// 3 %dst:ssub1 = COPY %src<kill> <-- Eliminate this copy.
// 4 BAZ %dst<kill>
// 5 QUUX %src<kill>
//
// Here OtherVNI will map to itself in [1;2), but to VNI in [2;5). CR_Replace
// handles this complex value mapping.
if ((V.WriteLanes & OtherV.ValidLanes) == 0)
return CR_Replace;
// If the other live range is killed by DefMI and the live ranges are still
// overlapping, it must be because we're looking at an early clobber def:
//
// %dst<def,early-clobber> = ASM %src<kill>
//
// In this case, it is illegal to merge the two live ranges since the early
// clobber def would clobber %src before it was read.
if (OtherLRQ.isKill()) {
// This case where the def doesn't overlap the kill is handled above.
assert(VNI->def.isEarlyClobber() &&
"Only early clobber defs can overlap a kill");
return CR_Impossible;
}
// VNI is clobbering live lanes in OtherVNI, but there is still the
// possibility that no instructions actually read the clobbered lanes.
// If we're clobbering all the lanes in OtherVNI, at least one must be read.
// Otherwise Other.RI wouldn't be live here.
if ((TRI->getSubRegIndexLaneMask(Other.SubIdx) & ~V.WriteLanes) == 0)
return CR_Impossible;
// We need to verify that no instructions are reading the clobbered lanes. To
// save compile time, we'll only check that locally. Don't allow the tainted
// value to escape the basic block.
MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
if (OtherLRQ.endPoint() >= Indexes->getMBBEndIdx(MBB))
return CR_Impossible;
// There are still some things that could go wrong besides clobbered lanes
// being read, for example OtherVNI may be only partially redefined in MBB,
// and some clobbered lanes could escape the block. Save this analysis for
// resolveConflicts() when all values have been mapped. We need to know
// RedefVNI and WriteLanes for any later defs in MBB, and we can't compute
// that now - the recursive analyzeValue() calls must go upwards in the
// dominator tree.
return CR_Unresolved;
}
void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) {
Val &V = Vals[ValNo];
if (V.isAnalyzed()) {
// Recursion should always move up the dominator tree, so ValNo is not
// supposed to reappear before it has been assigned.
assert(Assignments[ValNo] != -1 && "Bad recursion?");
return;
}
switch ((V.Resolution = analyzeValue(ValNo, Other))) {
case CR_Erase:
case CR_Merge:
// Merge this ValNo into OtherVNI.
assert(V.OtherVNI && "OtherVNI not assigned, can't merge.");
assert(Other.Vals[V.OtherVNI->id].isAnalyzed() && "Missing recursion");
Assignments[ValNo] = Other.Assignments[V.OtherVNI->id];
DEBUG(dbgs() << "\t\tmerge " << PrintReg(Reg) << ':' << ValNo << '@'
<< LR.getValNumInfo(ValNo)->def << " into "
<< PrintReg(Other.Reg) << ':' << V.OtherVNI->id << '@'
<< V.OtherVNI->def << " --> @"
<< NewVNInfo[Assignments[ValNo]]->def << '\n');
break;
case CR_Replace:
case CR_Unresolved: {
// The other value is going to be pruned if this join is successful.
assert(V.OtherVNI && "OtherVNI not assigned, can't prune");
Val &OtherV = Other.Vals[V.OtherVNI->id];
// We cannot erase an IMPLICIT_DEF if we don't have valid values for all
// its lanes.
if ((OtherV.WriteLanes & ~V.ValidLanes) != 0 && TrackSubRegLiveness)
OtherV.ErasableImplicitDef = false;
OtherV.Pruned = true;
}
// Fall through.
default:
// This value number needs to go in the final joined live range.
Assignments[ValNo] = NewVNInfo.size();
NewVNInfo.push_back(LR.getValNumInfo(ValNo));
break;
}
}
bool JoinVals::mapValues(JoinVals &Other) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
computeAssignment(i, Other);
if (Vals[i].Resolution == CR_Impossible) {
DEBUG(dbgs() << "\t\tinterference at " << PrintReg(Reg) << ':' << i
<< '@' << LR.getValNumInfo(i)->def << '\n');
return false;
}
}
return true;
}
bool JoinVals::
taintExtent(unsigned ValNo, unsigned TaintedLanes, JoinVals &Other,
SmallVectorImpl<std::pair<SlotIndex, unsigned> > &TaintExtent) {
VNInfo *VNI = LR.getValNumInfo(ValNo);
MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB);
// Scan Other.LR from VNI.def to MBBEnd.
LiveInterval::iterator OtherI = Other.LR.find(VNI->def);
assert(OtherI != Other.LR.end() && "No conflict?");
do {
// OtherI is pointing to a tainted value. Abort the join if the tainted
// lanes escape the block.
SlotIndex End = OtherI->end;
if (End >= MBBEnd) {
DEBUG(dbgs() << "\t\ttaints global " << PrintReg(Other.Reg) << ':'
<< OtherI->valno->id << '@' << OtherI->start << '\n');
return false;
}
DEBUG(dbgs() << "\t\ttaints local " << PrintReg(Other.Reg) << ':'
<< OtherI->valno->id << '@' << OtherI->start
<< " to " << End << '\n');
// A dead def is not a problem.
if (End.isDead())
break;
TaintExtent.push_back(std::make_pair(End, TaintedLanes));
// Check for another def in the MBB.
if (++OtherI == Other.LR.end() || OtherI->start >= MBBEnd)
break;
// Lanes written by the new def are no longer tainted.
const Val &OV = Other.Vals[OtherI->valno->id];
TaintedLanes &= ~OV.WriteLanes;
if (!OV.RedefVNI)
break;
} while (TaintedLanes);
return true;
}
bool JoinVals::usesLanes(const MachineInstr *MI, unsigned Reg, unsigned SubIdx,
unsigned Lanes) const {
if (MI->isDebugValue())
return false;
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || MO.isDef() || MO.getReg() != Reg)
continue;
if (!MO.readsReg())
continue;
if (Lanes & TRI->getSubRegIndexLaneMask(
TRI->composeSubRegIndices(SubIdx, MO.getSubReg())))
return true;
}
return false;
}
bool JoinVals::resolveConflicts(JoinVals &Other) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
Val &V = Vals[i];
assert (V.Resolution != CR_Impossible && "Unresolvable conflict");
if (V.Resolution != CR_Unresolved)
continue;
DEBUG(dbgs() << "\t\tconflict at " << PrintReg(Reg) << ':' << i
<< '@' << LR.getValNumInfo(i)->def << '\n');
if (SubRangeJoin)
return false;
++NumLaneConflicts;
assert(V.OtherVNI && "Inconsistent conflict resolution.");
VNInfo *VNI = LR.getValNumInfo(i);
const Val &OtherV = Other.Vals[V.OtherVNI->id];
// VNI is known to clobber some lanes in OtherVNI. If we go ahead with the
// join, those lanes will be tainted with a wrong value. Get the extent of
// the tainted lanes.
unsigned TaintedLanes = V.WriteLanes & OtherV.ValidLanes;
SmallVector<std::pair<SlotIndex, unsigned>, 8> TaintExtent;
if (!taintExtent(i, TaintedLanes, Other, TaintExtent))
// Tainted lanes would extend beyond the basic block.
return false;
assert(!TaintExtent.empty() && "There should be at least one conflict.");
// Now look at the instructions from VNI->def to TaintExtent (inclusive).
MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
MachineBasicBlock::iterator MI = MBB->begin();
if (!VNI->isPHIDef()) {
MI = Indexes->getInstructionFromIndex(VNI->def);
// No need to check the instruction defining VNI for reads.
++MI;
}
assert(!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) &&
"Interference ends on VNI->def. Should have been handled earlier");
MachineInstr *LastMI =
Indexes->getInstructionFromIndex(TaintExtent.front().first);
assert(LastMI && "Range must end at a proper instruction");
unsigned TaintNum = 0;
for(;;) {
assert(MI != MBB->end() && "Bad LastMI");
if (usesLanes(MI, Other.Reg, Other.SubIdx, TaintedLanes)) {
DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI);
return false;
}
// LastMI is the last instruction to use the current value.
if (&*MI == LastMI) {
if (++TaintNum == TaintExtent.size())
break;
LastMI = Indexes->getInstructionFromIndex(TaintExtent[TaintNum].first);
assert(LastMI && "Range must end at a proper instruction");
TaintedLanes = TaintExtent[TaintNum].second;
}
++MI;
}
// The tainted lanes are unused.
V.Resolution = CR_Replace;
++NumLaneResolves;
}
return true;
}
bool JoinVals::isPrunedValue(unsigned ValNo, JoinVals &Other) {
Val &V = Vals[ValNo];
if (V.Pruned || V.PrunedComputed)
return V.Pruned;
if (V.Resolution != CR_Erase && V.Resolution != CR_Merge)
return V.Pruned;
// Follow copies up the dominator tree and check if any intermediate value
// has been pruned.
V.PrunedComputed = true;
V.Pruned = Other.isPrunedValue(V.OtherVNI->id, *this);
return V.Pruned;
}
void JoinVals::pruneValues(JoinVals &Other,
SmallVectorImpl<SlotIndex> &EndPoints,
bool changeInstrs) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
SlotIndex Def = LR.getValNumInfo(i)->def;
switch (Vals[i].Resolution) {
case CR_Keep:
break;
case CR_Replace: {
// This value takes precedence over the value in Other.LR.
LIS->pruneValue(Other.LR, Def, &EndPoints);
// Check if we're replacing an IMPLICIT_DEF value. The IMPLICIT_DEF
// instructions are only inserted to provide a live-out value for PHI
// predecessors, so the instruction should simply go away once its value
// has been replaced.
Val &OtherV = Other.Vals[Vals[i].OtherVNI->id];
bool EraseImpDef = OtherV.ErasableImplicitDef &&
OtherV.Resolution == CR_Keep;
if (!Def.isBlock()) {
if (changeInstrs) {
// Remove <def,read-undef> flags. This def is now a partial redef.
// Also remove <def,dead> flags since the joined live range will
// continue past this instruction.
for (MachineOperand &MO :
Indexes->getInstructionFromIndex(Def)->operands()) {
if (MO.isReg() && MO.isDef() && MO.getReg() == Reg) {
MO.setIsUndef(EraseImpDef);
MO.setIsDead(false);
}
}
}
// This value will reach instructions below, but we need to make sure
// the live range also reaches the instruction at Def.
if (!EraseImpDef)
EndPoints.push_back(Def);
}
DEBUG(dbgs() << "\t\tpruned " << PrintReg(Other.Reg) << " at " << Def
<< ": " << Other.LR << '\n');
break;
}
case CR_Erase:
case CR_Merge:
if (isPrunedValue(i, Other)) {
// This value is ultimately a copy of a pruned value in LR or Other.LR.
// We can no longer trust the value mapping computed by
// computeAssignment(), the value that was originally copied could have
// been replaced.
LIS->pruneValue(LR, Def, &EndPoints);
DEBUG(dbgs() << "\t\tpruned all of " << PrintReg(Reg) << " at "
<< Def << ": " << LR << '\n');
}
break;
case CR_Unresolved:
case CR_Impossible:
llvm_unreachable("Unresolved conflicts");
}
}
}
void JoinVals::pruneSubRegValues(LiveInterval &LI, unsigned &ShrinkMask)
{
// Look for values being erased.
bool DidPrune = false;
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
if (Vals[i].Resolution != CR_Erase)
continue;
// Check subranges at the point where the copy will be removed.
SlotIndex Def = LR.getValNumInfo(i)->def;
for (LiveInterval::SubRange &S : LI.subranges()) {
LiveQueryResult Q = S.Query(Def);
// If a subrange starts at the copy then an undefined value has been
// copied and we must remove that subrange value as well.
VNInfo *ValueOut = Q.valueOutOrDead();
if (ValueOut != nullptr && Q.valueIn() == nullptr) {
DEBUG(dbgs() << "\t\tPrune sublane " << format("%04X", S.LaneMask)
<< " at " << Def << "\n");
LIS->pruneValue(S, Def, nullptr);
DidPrune = true;
// Mark value number as unused.
ValueOut->markUnused();
continue;
}
// If a subrange ends at the copy, then a value was copied but only
// partially used later. Shrink the subregister range apropriately.
if (Q.valueIn() != nullptr && Q.valueOut() == nullptr) {
DEBUG(dbgs() << "\t\tDead uses at sublane "
<< format("%04X", S.LaneMask) << " at " << Def << "\n");
ShrinkMask |= S.LaneMask;
}
}
}
if (DidPrune)
LI.removeEmptySubRanges();
}
void JoinVals::removeImplicitDefs() {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
Val &V = Vals[i];
if (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned)
continue;
VNInfo *VNI = LR.getValNumInfo(i);
VNI->markUnused();
LR.removeValNo(VNI);
}
}
void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
SmallVectorImpl<unsigned> &ShrinkRegs) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
// Get the def location before markUnused() below invalidates it.
SlotIndex Def = LR.getValNumInfo(i)->def;
switch (Vals[i].Resolution) {
case CR_Keep: {
// If an IMPLICIT_DEF value is pruned, it doesn't serve a purpose any
// longer. The IMPLICIT_DEF instructions are only inserted by
// PHIElimination to guarantee that all PHI predecessors have a value.
if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned)
break;
// Remove value number i from LR.
VNInfo *VNI = LR.getValNumInfo(i);
LR.removeValNo(VNI);
// Note that this VNInfo is reused and still referenced in NewVNInfo,
// make it appear like an unused value number.
VNI->markUnused();
DEBUG(dbgs() << "\t\tremoved " << i << '@' << Def << ": " << LR << '\n');
// FALL THROUGH.
}
case CR_Erase: {
MachineInstr *MI = Indexes->getInstructionFromIndex(Def);
assert(MI && "No instruction to erase");
if (MI->isCopy()) {
unsigned Reg = MI->getOperand(1).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg) &&
Reg != CP.getSrcReg() && Reg != CP.getDstReg())
ShrinkRegs.push_back(Reg);
}
ErasedInstrs.insert(MI);
DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI);
LIS->RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
break;
}
default:
break;
}
}
}
bool RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
unsigned LaneMask,
const CoalescerPair &CP) {
SmallVector<VNInfo*, 16> NewVNInfo;
JoinVals RHSVals(RRange, CP.getSrcReg(), CP.getSrcIdx(), LaneMask,
NewVNInfo, CP, LIS, TRI, true, true);
JoinVals LHSVals(LRange, CP.getDstReg(), CP.getDstIdx(), LaneMask,
NewVNInfo, CP, LIS, TRI, true, true);
// Compute NewVNInfo and resolve conflicts (see also joinVirtRegs())
// We should be able to resolve all conflicts here as we could successfully do
// it on the mainrange already. There is however a problem when multiple
// ranges get mapped to the "overflow" lane mask bit which creates unexpected
// interferences.
if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) {
DEBUG(dbgs() << "*** Couldn't join subrange!\n");
return false;
}
if (!LHSVals.resolveConflicts(RHSVals) ||
!RHSVals.resolveConflicts(LHSVals)) {
DEBUG(dbgs() << "*** Couldn't join subrange!\n");
return false;
}
// The merging algorithm in LiveInterval::join() can't handle conflicting
// value mappings, so we need to remove any live ranges that overlap a
// CR_Replace resolution. Collect a set of end points that can be used to
// restore the live range after joining.
SmallVector<SlotIndex, 8> EndPoints;
LHSVals.pruneValues(RHSVals, EndPoints, false);
RHSVals.pruneValues(LHSVals, EndPoints, false);
LHSVals.removeImplicitDefs();
RHSVals.removeImplicitDefs();
LRange.verify();
RRange.verify();
// Join RRange into LHS.
LRange.join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
NewVNInfo);
DEBUG(dbgs() << "\t\tjoined lanes: " << LRange << "\n");
if (EndPoints.empty())
return true;
// Recompute the parts of the live range we had to remove because of
// CR_Replace conflicts.
DEBUG(dbgs() << "\t\trestoring liveness to " << EndPoints.size()
<< " points: " << LRange << '\n');
LIS->extendToIndices(LRange, EndPoints);
return true;
}
bool RegisterCoalescer::mergeSubRangeInto(LiveInterval &LI,
const LiveRange &ToMerge,
unsigned LaneMask, CoalescerPair &CP) {
BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
for (LiveInterval::SubRange &R : LI.subranges()) {
unsigned RMask = R.LaneMask;
// LaneMask of subregisters common to subrange R and ToMerge.
unsigned Common = RMask & LaneMask;
// There is nothing to do without common subregs.
if (Common == 0)
continue;
DEBUG(dbgs() << format("\t\tCopy+Merge %04X into %04X\n", RMask, Common));
// LaneMask of subregisters contained in the R range but not in ToMerge,
// they have to split into their own subrange.
unsigned LRest = RMask & ~LaneMask;
LiveInterval::SubRange *CommonRange;
if (LRest != 0) {
R.LaneMask = LRest;
DEBUG(dbgs() << format("\t\tReduce Lane to %04X\n", LRest));
// Duplicate SubRange for newly merged common stuff.
CommonRange = LI.createSubRangeFrom(Allocator, Common, R);
} else {
// Reuse the existing range.
R.LaneMask = Common;
CommonRange = &R;
}
LiveRange RangeCopy(ToMerge, Allocator);
if (!joinSubRegRanges(*CommonRange, RangeCopy, Common, CP))
return false;
LaneMask &= ~RMask;
}
if (LaneMask != 0) {
DEBUG(dbgs() << format("\t\tNew Lane %04X\n", LaneMask));
LI.createSubRangeFrom(Allocator, LaneMask, ToMerge);
}
return true;
}
bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
SmallVector<VNInfo*, 16> NewVNInfo;
LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
LiveInterval &LHS = LIS->getInterval(CP.getDstReg());
bool TrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(*CP.getNewRC());
JoinVals RHSVals(RHS, CP.getSrcReg(), CP.getSrcIdx(), 0, NewVNInfo, CP, LIS,
TRI, false, TrackSubRegLiveness);
JoinVals LHSVals(LHS, CP.getDstReg(), CP.getDstIdx(), 0, NewVNInfo, CP, LIS,
TRI, false, TrackSubRegLiveness);
DEBUG(dbgs() << "\t\tRHS = " << RHS
<< "\n\t\tLHS = " << LHS
<< '\n');
// First compute NewVNInfo and the simple value mappings.
// Detect impossible conflicts early.
if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
return false;
// Some conflicts can only be resolved after all values have been mapped.
if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
return false;
// All clear, the live ranges can be merged.
if (RHS.hasSubRanges() || LHS.hasSubRanges()) {
BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
// Transform lanemasks from the LHS to masks in the coalesced register and
// create initial subranges if necessary.
unsigned DstIdx = CP.getDstIdx();
if (!LHS.hasSubRanges()) {
unsigned Mask = DstIdx == 0 ? CP.getNewRC()->getLaneMask()
: TRI->getSubRegIndexLaneMask(DstIdx);
// LHS must support subregs or we wouldn't be in this codepath.
assert(Mask != 0);
LHS.createSubRangeFrom(Allocator, Mask, LHS);
} else if (DstIdx != 0) {
// Transform LHS lanemasks to new register class if necessary.
for (LiveInterval::SubRange &R : LHS.subranges()) {
unsigned Mask = TRI->composeSubRegIndexLaneMask(DstIdx, R.LaneMask);
R.LaneMask = Mask;
}
}
DEBUG(dbgs() << "\t\tLHST = " << PrintReg(CP.getDstReg())
<< ' ' << LHS << '\n');
// Determine lanemasks of RHS in the coalesced register and merge subranges.
unsigned SrcIdx = CP.getSrcIdx();
bool Abort = false;
if (!RHS.hasSubRanges()) {
unsigned Mask = SrcIdx == 0 ? CP.getNewRC()->getLaneMask()
: TRI->getSubRegIndexLaneMask(SrcIdx);
if (!mergeSubRangeInto(LHS, RHS, Mask, CP))
Abort = true;
} else {
// Pair up subranges and merge.
for (LiveInterval::SubRange &R : RHS.subranges()) {
unsigned Mask = TRI->composeSubRegIndexLaneMask(SrcIdx, R.LaneMask);
if (!mergeSubRangeInto(LHS, R, Mask, CP)) {
Abort = true;
break;
}
}
}
if (Abort) {
// This shouldn't have happened :-(
// However we are aware of at least one existing problem where we
// can't merge subranges when multiple ranges end up in the
// "overflow bit" 32. As a workaround we drop all subregister ranges
// which means we loose some precision but are back to a well defined
// state.
assert(TargetRegisterInfo::isImpreciseLaneMask(
CP.getNewRC()->getLaneMask())
&& "SubRange merge should only fail when merging into bit 32.");
DEBUG(dbgs() << "\tSubrange join aborted!\n");
LHS.clearSubRanges();
RHS.clearSubRanges();
} else {
DEBUG(dbgs() << "\tJoined SubRanges " << LHS << "\n");
LHSVals.pruneSubRegValues(LHS, ShrinkMask);
RHSVals.pruneSubRegValues(LHS, ShrinkMask);
}
}
// The merging algorithm in LiveInterval::join() can't handle conflicting
// value mappings, so we need to remove any live ranges that overlap a
// CR_Replace resolution. Collect a set of end points that can be used to
// restore the live range after joining.
SmallVector<SlotIndex, 8> EndPoints;
LHSVals.pruneValues(RHSVals, EndPoints, true);
RHSVals.pruneValues(LHSVals, EndPoints, true);
// Erase COPY and IMPLICIT_DEF instructions. This may cause some external
// registers to require trimming.
SmallVector<unsigned, 8> ShrinkRegs;
LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
while (!ShrinkRegs.empty())
shrinkToUses(&LIS->getInterval(ShrinkRegs.pop_back_val()));
// Join RHS into LHS.
LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
// Kill flags are going to be wrong if the live ranges were overlapping.
// Eventually, we should simply clear all kill flags when computing live
// ranges. They are reinserted after register allocation.
MRI->clearKillFlags(LHS.reg);
MRI->clearKillFlags(RHS.reg);
if (!EndPoints.empty()) {
// Recompute the parts of the live range we had to remove because of
// CR_Replace conflicts.
DEBUG(dbgs() << "\t\trestoring liveness to " << EndPoints.size()
<< " points: " << LHS << '\n');
LIS->extendToIndices((LiveRange&)LHS, EndPoints);
}
return true;
}
bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) {
return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP);
}
namespace {
/// Information concerning MBB coalescing priority.
struct MBBPriorityInfo {
MachineBasicBlock *MBB;
unsigned Depth;
bool IsSplit;
MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit)
: MBB(mbb), Depth(depth), IsSplit(issplit) {}
};
}
/// C-style comparator that sorts first based on the loop depth of the basic
/// block (the unsigned), and then on the MBB number.
///
/// EnableGlobalCopies assumes that the primary sort key is loop depth.
// HLSL Change: changed calling convention to __cdecl
static int __cdecl compareMBBPriority(const MBBPriorityInfo *LHS,
const MBBPriorityInfo *RHS) {
// Deeper loops first
if (LHS->Depth != RHS->Depth)
return LHS->Depth > RHS->Depth ? -1 : 1;
// Try to unsplit critical edges next.
if (LHS->IsSplit != RHS->IsSplit)
return LHS->IsSplit ? -1 : 1;
// Prefer blocks that are more connected in the CFG. This takes care of
// the most difficult copies first while intervals are short.
unsigned cl = LHS->MBB->pred_size() + LHS->MBB->succ_size();
unsigned cr = RHS->MBB->pred_size() + RHS->MBB->succ_size();
if (cl != cr)
return cl > cr ? -1 : 1;
// As a last resort, sort by block number.
return LHS->MBB->getNumber() < RHS->MBB->getNumber() ? -1 : 1;
}
/// \returns true if the given copy uses or defines a local live range.
static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) {
if (!Copy->isCopy())
return false;
if (Copy->getOperand(1).isUndef())
return false;
unsigned SrcReg = Copy->getOperand(1).getReg();
unsigned DstReg = Copy->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(SrcReg)
|| TargetRegisterInfo::isPhysicalRegister(DstReg))
return false;
return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg))
|| LIS->intervalIsInOneMBB(LIS->getInterval(DstReg));
}
bool RegisterCoalescer::
copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList) {
bool Progress = false;
for (unsigned i = 0, e = CurrList.size(); i != e; ++i) {
if (!CurrList[i])
continue;
// Skip instruction pointers that have already been erased, for example by
// dead code elimination.
if (ErasedInstrs.erase(CurrList[i])) {
CurrList[i] = nullptr;
continue;
}
bool Again = false;
bool Success = joinCopy(CurrList[i], Again);
Progress |= Success;
if (Success || !Again)
CurrList[i] = nullptr;
}
return Progress;
}
/// Check if DstReg is a terminal node.
/// I.e., it does not have any affinity other than \p Copy.
static bool isTerminalReg(unsigned DstReg, const MachineInstr &Copy,
const MachineRegisterInfo *MRI) {
assert(Copy.isCopyLike());
// Check if the destination of this copy as any other affinity.
for (const MachineInstr &MI : MRI->reg_nodbg_instructions(DstReg))
if (&MI != &Copy && MI.isCopyLike())
return false;
return true;
}
bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
assert(Copy.isCopyLike());
if (!UseTerminalRule)
return false;
unsigned DstReg, DstSubReg, SrcReg, SrcSubReg;
isMoveInstr(*TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg);
// Check if the destination of this copy has any other affinity.
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
// If SrcReg is a physical register, the copy won't be coalesced.
// Ignoring it may have other side effect (like missing
// rematerialization). So keep it.
TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
!isTerminalReg(DstReg, Copy, MRI))
return false;
// DstReg is a terminal node. Check if it inteferes with any other
// copy involving SrcReg.
const MachineBasicBlock *OrigBB = Copy.getParent();
const LiveInterval &DstLI = LIS->getInterval(DstReg);
for (const MachineInstr &MI : MRI->reg_nodbg_instructions(SrcReg)) {
// Technically we should check if the weight of the new copy is
// interesting compared to the other one and update the weight
// of the copies accordingly. However, this would only work if
// we would gather all the copies first then coalesce, whereas
// right now we interleave both actions.
// For now, just consider the copies that are in the same block.
if (&MI == &Copy || !MI.isCopyLike() || MI.getParent() != OrigBB)
continue;
unsigned OtherReg, OtherSubReg, OtherSrcReg, OtherSrcSubReg;
isMoveInstr(*TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
OtherSubReg);
if (OtherReg == SrcReg)
OtherReg = OtherSrcReg;
// Check if OtherReg is a non-terminal.
if (TargetRegisterInfo::isPhysicalRegister(OtherReg) ||
isTerminalReg(OtherReg, MI, MRI))
continue;
// Check that OtherReg interfere with DstReg.
if (LIS->getInterval(OtherReg).overlaps(DstLI)) {
DEBUG(dbgs() << "Apply terminal rule for: " << PrintReg(DstReg) << '\n');
return true;
}
}
return false;
}
void
RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
DEBUG(dbgs() << MBB->getName() << ":\n");
// Collect all copy-like instructions in MBB. Don't start coalescing anything
// yet, it might invalidate the iterator.
const unsigned PrevSize = WorkList.size();
if (JoinGlobalCopies) {
SmallVector<MachineInstr*, 2> LocalTerminals;
SmallVector<MachineInstr*, 2> GlobalTerminals;
// Coalesce copies bottom-up to coalesce local defs before local uses. They
// are not inherently easier to resolve, but slightly preferable until we
// have local live range splitting. In particular this is required by
// cmp+jmp macro fusion.
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
MII != E; ++MII) {
if (!MII->isCopyLike())
continue;
bool ApplyTerminalRule = applyTerminalRule(*MII);
if (isLocalCopy(&(*MII), LIS)) {
if (ApplyTerminalRule)
LocalTerminals.push_back(&(*MII));
else
LocalWorkList.push_back(&(*MII));
} else {
if (ApplyTerminalRule)
GlobalTerminals.push_back(&(*MII));
else
WorkList.push_back(&(*MII));
}
}
// Append the copies evicted by the terminal rule at the end of the list.
LocalWorkList.append(LocalTerminals.begin(), LocalTerminals.end());
WorkList.append(GlobalTerminals.begin(), GlobalTerminals.end());
}
else {
SmallVector<MachineInstr*, 2> Terminals;
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
MII != E; ++MII)
if (MII->isCopyLike()) {
if (applyTerminalRule(*MII))
Terminals.push_back(&(*MII));
else
WorkList.push_back(MII);
}
// Append the copies evicted by the terminal rule at the end of the list.
WorkList.append(Terminals.begin(), Terminals.end());
}
// Try coalescing the collected copies immediately, and remove the nulls.
// This prevents the WorkList from getting too large since most copies are
// joinable on the first attempt.
MutableArrayRef<MachineInstr*>
CurrList(WorkList.begin() + PrevSize, WorkList.end());
if (copyCoalesceWorkList(CurrList))
WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(),
(MachineInstr*)nullptr), WorkList.end());
}
void RegisterCoalescer::coalesceLocals() {
copyCoalesceWorkList(LocalWorkList);
for (unsigned j = 0, je = LocalWorkList.size(); j != je; ++j) {
if (LocalWorkList[j])
WorkList.push_back(LocalWorkList[j]);
}
LocalWorkList.clear();
}
void RegisterCoalescer::joinAllIntervals() {
DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
assert(WorkList.empty() && LocalWorkList.empty() && "Old data still around.");
std::vector<MBBPriorityInfo> MBBs;
MBBs.reserve(MF->size());
for (MachineFunction::iterator I = MF->begin(), E = MF->end();I != E;++I){
MachineBasicBlock *MBB = I;
MBBs.push_back(MBBPriorityInfo(MBB, Loops->getLoopDepth(MBB),
JoinSplitEdges && isSplitEdge(MBB)));
}
array_pod_sort(MBBs.begin(), MBBs.end(), compareMBBPriority);
// Coalesce intervals in MBB priority order.
unsigned CurrDepth = UINT_MAX;
for (unsigned i = 0, e = MBBs.size(); i != e; ++i) {
// Try coalescing the collected local copies for deeper loops.
if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth) {
coalesceLocals();
CurrDepth = MBBs[i].Depth;
}
copyCoalesceInMBB(MBBs[i].MBB);
}
coalesceLocals();
// Joining intervals can allow other intervals to be joined. Iteratively join
// until we make no progress.
while (copyCoalesceWorkList(WorkList))
/* empty */ ;
}
void RegisterCoalescer::releaseMemory() {
ErasedInstrs.clear();
WorkList.clear();
DeadDefs.clear();
InflateRegs.clear();
}
bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
MF = &fn;
MRI = &fn.getRegInfo();
TM = &fn.getTarget();
const TargetSubtargetInfo &STI = fn.getSubtarget();
TRI = STI.getRegisterInfo();
TII = STI.getInstrInfo();
LIS = &getAnalysis<LiveIntervals>();
AA = &getAnalysis<AliasAnalysis>();
Loops = &getAnalysis<MachineLoopInfo>();
if (EnableGlobalCopies == cl::BOU_UNSET)
JoinGlobalCopies = STI.enableJoinGlobalCopies();
else
JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE);
// The MachineScheduler does not currently require JoinSplitEdges. This will
// either be enabled unconditionally or replaced by a more general live range
// splitting optimization.
JoinSplitEdges = EnableJoinSplits;
DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
<< "********** Function: " << MF->getName() << '\n');
if (VerifyCoalescing)
MF->verify(this, "Before register coalescing");
RegClassInfo.runOnMachineFunction(fn);
// Join (coalesce) intervals if requested.
if (EnableJoining)
joinAllIntervals();
// After deleting a lot of copies, register classes may be less constrained.
// Removing sub-register operands may allow GR32_ABCD -> GR32 and DPR_VFP2 ->
// DPR inflation.
array_pod_sort(InflateRegs.begin(), InflateRegs.end());
InflateRegs.erase(std::unique(InflateRegs.begin(), InflateRegs.end()),
InflateRegs.end());
DEBUG(dbgs() << "Trying to inflate " << InflateRegs.size() << " regs.\n");
for (unsigned i = 0, e = InflateRegs.size(); i != e; ++i) {
unsigned Reg = InflateRegs[i];
if (MRI->reg_nodbg_empty(Reg))
continue;
if (MRI->recomputeRegClass(Reg)) {
DEBUG(dbgs() << PrintReg(Reg) << " inflated to "
<< TRI->getRegClassName(MRI->getRegClass(Reg)) << '\n');
LiveInterval &LI = LIS->getInterval(Reg);
unsigned MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
if (MaxMask == 0) {
// If the inflated register class does not support subregisters anymore
// remove the subranges.
LI.clearSubRanges();
} else {
#ifndef NDEBUG
// If subranges are still supported, then the same subregs should still
// be supported.
for (LiveInterval::SubRange &S : LI.subranges()) {
assert ((S.LaneMask & ~MaxMask) == 0);
}
#endif
}
++NumInflated;
}
}
DEBUG(dump());
if (VerifyCoalescing)
MF->verify(this, "After register coalescing");
return true;
}
void RegisterCoalescer::print(raw_ostream &O, const Module* m) const {
LIS->print(O, m);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/WinEHPrepare.cpp | //===-- WinEHPrepare - Prepare exception handling for code generation ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass lowers LLVM IR exception handling into something closer to what the
// backend wants for functions using a personality function from a runtime
// provided by MSVC. Functions with other personality functions are left alone
// and may be prepared by other passes. In particular, all supported MSVC
// personality functions require cleanup code to be outlined, and the C++
// personality requires catch handler code to be outlined.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/LibCallSemantics.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <memory>
using namespace llvm;
using namespace llvm::PatternMatch;
#define DEBUG_TYPE "winehprepare"
namespace {
// This map is used to model frame variable usage during outlining, to
// construct a structure type to hold the frame variables in a frame
// allocation block, and to remap the frame variable allocas (including
// spill locations as needed) to GEPs that get the variable from the
// frame allocation structure.
typedef MapVector<Value *, TinyPtrVector<AllocaInst *>> FrameVarInfoMap;
// TinyPtrVector cannot hold nullptr, so we need our own sentinel that isn't
// quite null.
AllocaInst *getCatchObjectSentinel() {
return static_cast<AllocaInst *>(nullptr) + 1;
}
typedef SmallSet<BasicBlock *, 4> VisitedBlockSet;
class LandingPadActions;
class LandingPadMap;
typedef DenseMap<const BasicBlock *, CatchHandler *> CatchHandlerMapTy;
typedef DenseMap<const BasicBlock *, CleanupHandler *> CleanupHandlerMapTy;
class WinEHPrepare : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.
WinEHPrepare(const TargetMachine *TM = nullptr)
: FunctionPass(ID) {
if (TM)
TheTriple = TM->getTargetTriple();
}
bool runOnFunction(Function &Fn) override;
bool doFinalization(Module &M) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
StringRef getPassName() const override {
return "Windows exception handling preparation";
}
private:
bool prepareExceptionHandlers(Function &F,
SmallVectorImpl<LandingPadInst *> &LPads);
void identifyEHBlocks(Function &F, SmallVectorImpl<LandingPadInst *> &LPads);
void promoteLandingPadValues(LandingPadInst *LPad);
void demoteValuesLiveAcrossHandlers(Function &F,
SmallVectorImpl<LandingPadInst *> &LPads);
void findSEHEHReturnPoints(Function &F,
SetVector<BasicBlock *> &EHReturnBlocks);
void findCXXEHReturnPoints(Function &F,
SetVector<BasicBlock *> &EHReturnBlocks);
void getPossibleReturnTargets(Function *ParentF, Function *HandlerF,
SetVector<BasicBlock*> &Targets);
void completeNestedLandingPad(Function *ParentFn,
LandingPadInst *OutlinedLPad,
const LandingPadInst *OriginalLPad,
FrameVarInfoMap &VarInfo);
Function *createHandlerFunc(Function *ParentFn, Type *RetTy,
const Twine &Name, Module *M, Value *&ParentFP);
bool outlineHandler(ActionHandler *Action, Function *SrcFn,
LandingPadInst *LPad, BasicBlock *StartBB,
FrameVarInfoMap &VarInfo);
void addStubInvokeToHandlerIfNeeded(Function *Handler);
void mapLandingPadBlocks(LandingPadInst *LPad, LandingPadActions &Actions);
CatchHandler *findCatchHandler(BasicBlock *BB, BasicBlock *&NextBB,
VisitedBlockSet &VisitedBlocks);
void findCleanupHandlers(LandingPadActions &Actions, BasicBlock *StartBB,
BasicBlock *EndBB);
void processSEHCatchHandler(CatchHandler *Handler, BasicBlock *StartBB);
Triple TheTriple;
// All fields are reset by runOnFunction.
DominatorTree *DT = nullptr;
const TargetLibraryInfo *LibInfo = nullptr;
EHPersonality Personality = EHPersonality::Unknown;
CatchHandlerMapTy CatchHandlerMap;
CleanupHandlerMapTy CleanupHandlerMap;
DenseMap<const LandingPadInst *, LandingPadMap> LPadMaps;
SmallPtrSet<BasicBlock *, 4> NormalBlocks;
SmallPtrSet<BasicBlock *, 4> EHBlocks;
SetVector<BasicBlock *> EHReturnBlocks;
// This maps landing pad instructions found in outlined handlers to
// the landing pad instruction in the parent function from which they
// were cloned. The cloned/nested landing pad is used as the key
// because the landing pad may be cloned into multiple handlers.
// This map will be used to add the llvm.eh.actions call to the nested
// landing pads after all handlers have been outlined.
DenseMap<LandingPadInst *, const LandingPadInst *> NestedLPtoOriginalLP;
// This maps blocks in the parent function which are destinations of
// catch handlers to cloned blocks in (other) outlined handlers. This
// handles the case where a nested landing pads has a catch handler that
// returns to a handler function rather than the parent function.
// The original block is used as the key here because there should only
// ever be one handler function from which the cloned block is not pruned.
// The original block will be pruned from the parent function after all
// handlers have been outlined. This map will be used to adjust the
// return instructions of handlers which return to the block that was
// outlined into a handler. This is done after all handlers have been
// outlined but before the outlined code is pruned from the parent function.
DenseMap<const BasicBlock *, BasicBlock *> LPadTargetBlocks;
// Map from outlined handler to call to parent local address. Only used for
// 32-bit EH.
DenseMap<Function *, Value *> HandlerToParentFP;
AllocaInst *SEHExceptionCodeSlot = nullptr;
};
class WinEHFrameVariableMaterializer : public ValueMaterializer {
public:
WinEHFrameVariableMaterializer(Function *OutlinedFn, Value *ParentFP,
FrameVarInfoMap &FrameVarInfo);
~WinEHFrameVariableMaterializer() override {}
Value *materializeValueFor(Value *V) override;
void escapeCatchObject(Value *V);
private:
FrameVarInfoMap &FrameVarInfo;
IRBuilder<> Builder;
};
class LandingPadMap {
public:
LandingPadMap() : OriginLPad(nullptr) {}
void mapLandingPad(const LandingPadInst *LPad);
bool isInitialized() { return OriginLPad != nullptr; }
bool isOriginLandingPadBlock(const BasicBlock *BB) const;
bool isLandingPadSpecificInst(const Instruction *Inst) const;
void remapEHValues(ValueToValueMapTy &VMap, Value *EHPtrValue,
Value *SelectorValue) const;
private:
const LandingPadInst *OriginLPad;
// We will normally only see one of each of these instructions, but
// if more than one occurs for some reason we can handle that.
TinyPtrVector<const ExtractValueInst *> ExtractedEHPtrs;
TinyPtrVector<const ExtractValueInst *> ExtractedSelectors;
};
class WinEHCloningDirectorBase : public CloningDirector {
public:
WinEHCloningDirectorBase(Function *HandlerFn, Value *ParentFP,
FrameVarInfoMap &VarInfo, LandingPadMap &LPadMap)
: Materializer(HandlerFn, ParentFP, VarInfo),
SelectorIDType(Type::getInt32Ty(HandlerFn->getContext())),
Int8PtrType(Type::getInt8PtrTy(HandlerFn->getContext())),
LPadMap(LPadMap), ParentFP(ParentFP) {}
CloningAction handleInstruction(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) override;
virtual CloningAction handleBeginCatch(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) = 0;
virtual CloningAction handleEndCatch(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) = 0;
virtual CloningAction handleTypeIdFor(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) = 0;
virtual CloningAction handleIndirectBr(ValueToValueMapTy &VMap,
const IndirectBrInst *IBr,
BasicBlock *NewBB) = 0;
virtual CloningAction handleInvoke(ValueToValueMapTy &VMap,
const InvokeInst *Invoke,
BasicBlock *NewBB) = 0;
virtual CloningAction handleResume(ValueToValueMapTy &VMap,
const ResumeInst *Resume,
BasicBlock *NewBB) = 0;
virtual CloningAction handleCompare(ValueToValueMapTy &VMap,
const CmpInst *Compare,
BasicBlock *NewBB) = 0;
virtual CloningAction handleLandingPad(ValueToValueMapTy &VMap,
const LandingPadInst *LPad,
BasicBlock *NewBB) = 0;
ValueMaterializer *getValueMaterializer() override { return &Materializer; }
protected:
WinEHFrameVariableMaterializer Materializer;
Type *SelectorIDType;
Type *Int8PtrType;
LandingPadMap &LPadMap;
/// The value representing the parent frame pointer.
Value *ParentFP;
};
class WinEHCatchDirector : public WinEHCloningDirectorBase {
public:
WinEHCatchDirector(
Function *CatchFn, Value *ParentFP, Value *Selector,
FrameVarInfoMap &VarInfo, LandingPadMap &LPadMap,
DenseMap<LandingPadInst *, const LandingPadInst *> &NestedLPads,
DominatorTree *DT, SmallPtrSetImpl<BasicBlock *> &EHBlocks)
: WinEHCloningDirectorBase(CatchFn, ParentFP, VarInfo, LPadMap),
CurrentSelector(Selector->stripPointerCasts()),
ExceptionObjectVar(nullptr), NestedLPtoOriginalLP(NestedLPads),
DT(DT), EHBlocks(EHBlocks) {}
CloningAction handleBeginCatch(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) override;
CloningAction handleEndCatch(ValueToValueMapTy &VMap, const Instruction *Inst,
BasicBlock *NewBB) override;
CloningAction handleTypeIdFor(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) override;
CloningAction handleIndirectBr(ValueToValueMapTy &VMap,
const IndirectBrInst *IBr,
BasicBlock *NewBB) override;
CloningAction handleInvoke(ValueToValueMapTy &VMap, const InvokeInst *Invoke,
BasicBlock *NewBB) override;
CloningAction handleResume(ValueToValueMapTy &VMap, const ResumeInst *Resume,
BasicBlock *NewBB) override;
CloningAction handleCompare(ValueToValueMapTy &VMap, const CmpInst *Compare,
BasicBlock *NewBB) override;
CloningAction handleLandingPad(ValueToValueMapTy &VMap,
const LandingPadInst *LPad,
BasicBlock *NewBB) override;
Value *getExceptionVar() { return ExceptionObjectVar; }
TinyPtrVector<BasicBlock *> &getReturnTargets() { return ReturnTargets; }
private:
Value *CurrentSelector;
Value *ExceptionObjectVar;
TinyPtrVector<BasicBlock *> ReturnTargets;
// This will be a reference to the field of the same name in the WinEHPrepare
// object which instantiates this WinEHCatchDirector object.
DenseMap<LandingPadInst *, const LandingPadInst *> &NestedLPtoOriginalLP;
DominatorTree *DT;
SmallPtrSetImpl<BasicBlock *> &EHBlocks;
};
class WinEHCleanupDirector : public WinEHCloningDirectorBase {
public:
WinEHCleanupDirector(Function *CleanupFn, Value *ParentFP,
FrameVarInfoMap &VarInfo, LandingPadMap &LPadMap)
: WinEHCloningDirectorBase(CleanupFn, ParentFP, VarInfo,
LPadMap) {}
CloningAction handleBeginCatch(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) override;
CloningAction handleEndCatch(ValueToValueMapTy &VMap, const Instruction *Inst,
BasicBlock *NewBB) override;
CloningAction handleTypeIdFor(ValueToValueMapTy &VMap,
const Instruction *Inst,
BasicBlock *NewBB) override;
CloningAction handleIndirectBr(ValueToValueMapTy &VMap,
const IndirectBrInst *IBr,
BasicBlock *NewBB) override;
CloningAction handleInvoke(ValueToValueMapTy &VMap, const InvokeInst *Invoke,
BasicBlock *NewBB) override;
CloningAction handleResume(ValueToValueMapTy &VMap, const ResumeInst *Resume,
BasicBlock *NewBB) override;
CloningAction handleCompare(ValueToValueMapTy &VMap, const CmpInst *Compare,
BasicBlock *NewBB) override;
CloningAction handleLandingPad(ValueToValueMapTy &VMap,
const LandingPadInst *LPad,
BasicBlock *NewBB) override;
};
class LandingPadActions {
public:
LandingPadActions() : HasCleanupHandlers(false) {}
void insertCatchHandler(CatchHandler *Action) { Actions.push_back(Action); }
void insertCleanupHandler(CleanupHandler *Action) {
Actions.push_back(Action);
HasCleanupHandlers = true;
}
bool includesCleanup() const { return HasCleanupHandlers; }
SmallVectorImpl<ActionHandler *> &actions() { return Actions; }
SmallVectorImpl<ActionHandler *>::iterator begin() { return Actions.begin(); }
SmallVectorImpl<ActionHandler *>::iterator end() { return Actions.end(); }
private:
// Note that this class does not own the ActionHandler objects in this vector.
// The ActionHandlers are owned by the CatchHandlerMap and CleanupHandlerMap
// in the WinEHPrepare class.
SmallVector<ActionHandler *, 4> Actions;
bool HasCleanupHandlers;
};
} // end anonymous namespace
char WinEHPrepare::ID = 0;
INITIALIZE_TM_PASS(WinEHPrepare, "winehprepare", "Prepare Windows exceptions",
false, false)
FunctionPass *llvm::createWinEHPass(const TargetMachine *TM) {
return new WinEHPrepare(TM);
}
bool WinEHPrepare::runOnFunction(Function &Fn) {
// No need to prepare outlined handlers.
if (Fn.hasFnAttribute("wineh-parent"))
return false;
SmallVector<LandingPadInst *, 4> LPads;
SmallVector<ResumeInst *, 4> Resumes;
for (BasicBlock &BB : Fn) {
if (auto *LP = BB.getLandingPadInst())
LPads.push_back(LP);
if (auto *Resume = dyn_cast<ResumeInst>(BB.getTerminator()))
Resumes.push_back(Resume);
}
// No need to prepare functions that lack landing pads.
if (LPads.empty())
return false;
// Classify the personality to see what kind of preparation we need.
Personality = classifyEHPersonality(Fn.getPersonalityFn());
// Do nothing if this is not an MSVC personality.
if (!isMSVCEHPersonality(Personality))
return false;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// If there were any landing pads, prepareExceptionHandlers will make changes.
prepareExceptionHandlers(Fn, LPads);
return true;
}
bool WinEHPrepare::doFinalization(Module &M) { return false; }
void WinEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
static bool isSelectorDispatch(BasicBlock *BB, BasicBlock *&CatchHandler,
Constant *&Selector, BasicBlock *&NextBB);
// Finds blocks reachable from the starting set Worklist. Does not follow unwind
// edges or blocks listed in StopPoints.
static void findReachableBlocks(SmallPtrSetImpl<BasicBlock *> &ReachableBBs,
SetVector<BasicBlock *> &Worklist,
const SetVector<BasicBlock *> *StopPoints) {
while (!Worklist.empty()) {
BasicBlock *BB = Worklist.pop_back_val();
// Don't cross blocks that we should stop at.
if (StopPoints && StopPoints->count(BB))
continue;
if (!ReachableBBs.insert(BB).second)
continue; // Already visited.
// Don't follow unwind edges of invokes.
if (auto *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
Worklist.insert(II->getNormalDest());
continue;
}
// Otherwise, follow all successors.
Worklist.insert(succ_begin(BB), succ_end(BB));
}
}
// Attempt to find an instruction where a block can be split before
// a call to llvm.eh.begincatch and its operands. If the block
// begins with the begincatch call or one of its adjacent operands
// the block will not be split.
static Instruction *findBeginCatchSplitPoint(BasicBlock *BB,
IntrinsicInst *II) {
// If the begincatch call is already the first instruction in the block,
// don't split.
Instruction *FirstNonPHI = BB->getFirstNonPHI();
if (II == FirstNonPHI)
return nullptr;
// If either operand is in the same basic block as the instruction and
// isn't used by another instruction before the begincatch call, include it
// in the split block.
auto *Op0 = dyn_cast<Instruction>(II->getOperand(0));
auto *Op1 = dyn_cast<Instruction>(II->getOperand(1));
Instruction *I = II->getPrevNode();
Instruction *LastI = II;
while (I == Op0 || I == Op1) {
// If the block begins with one of the operands and there are no other
// instructions between the operand and the begincatch call, don't split.
if (I == FirstNonPHI)
return nullptr;
LastI = I;
I = I->getPrevNode();
}
// If there is at least one instruction in the block before the begincatch
// call and its operands, split the block at either the begincatch or
// its operand.
return LastI;
}
/// Find all points where exceptional control rejoins normal control flow via
/// llvm.eh.endcatch. Add them to the normal bb reachability worklist.
void WinEHPrepare::findCXXEHReturnPoints(
Function &F, SetVector<BasicBlock *> &EHReturnBlocks) {
for (auto BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
BasicBlock *BB = BBI;
for (Instruction &I : *BB) {
if (match(&I, m_Intrinsic<Intrinsic::eh_begincatch>())) {
Instruction *SplitPt =
findBeginCatchSplitPoint(BB, cast<IntrinsicInst>(&I));
if (SplitPt) {
// Split the block before the llvm.eh.begincatch call to allow
// cleanup and catch code to be distinguished later.
// Do not update BBI because we still need to process the
// portion of the block that we are splitting off.
SplitBlock(BB, SplitPt, DT);
break;
}
}
if (match(&I, m_Intrinsic<Intrinsic::eh_endcatch>())) {
// Split the block after the call to llvm.eh.endcatch if there is
// anything other than an unconditional branch, or if the successor
// starts with a phi.
auto *Br = dyn_cast<BranchInst>(I.getNextNode());
if (!Br || !Br->isUnconditional() ||
isa<PHINode>(Br->getSuccessor(0)->begin())) {
DEBUG(dbgs() << "splitting block " << BB->getName()
<< " with llvm.eh.endcatch\n");
BBI = SplitBlock(BB, I.getNextNode(), DT);
}
// The next BB is normal control flow.
EHReturnBlocks.insert(BB->getTerminator()->getSuccessor(0));
break;
}
}
}
}
static bool isCatchAllLandingPad(const BasicBlock *BB) {
const LandingPadInst *LP = BB->getLandingPadInst();
if (!LP)
return false;
unsigned N = LP->getNumClauses();
return (N > 0 && LP->isCatch(N - 1) &&
isa<ConstantPointerNull>(LP->getClause(N - 1)));
}
/// Find all points where exceptions control rejoins normal control flow via
/// selector dispatch.
void WinEHPrepare::findSEHEHReturnPoints(
Function &F, SetVector<BasicBlock *> &EHReturnBlocks) {
for (auto BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
BasicBlock *BB = BBI;
// If the landingpad is a catch-all, treat the whole lpad as if it is
// reachable from normal control flow.
// FIXME: This is imprecise. We need a better way of identifying where a
// catch-all starts and cleanups stop. As far as LLVM is concerned, there
// is no difference.
if (isCatchAllLandingPad(BB)) {
EHReturnBlocks.insert(BB);
continue;
}
BasicBlock *CatchHandler;
BasicBlock *NextBB;
Constant *Selector;
if (isSelectorDispatch(BB, CatchHandler, Selector, NextBB)) {
// Split the edge if there are multiple predecessors. This creates a place
// where we can insert EH recovery code.
if (!CatchHandler->getSinglePredecessor()) {
DEBUG(dbgs() << "splitting EH return edge from " << BB->getName()
<< " to " << CatchHandler->getName() << '\n');
BBI = CatchHandler = SplitCriticalEdge(
BB, std::find(succ_begin(BB), succ_end(BB), CatchHandler));
}
EHReturnBlocks.insert(CatchHandler);
}
}
}
void WinEHPrepare::identifyEHBlocks(Function &F,
SmallVectorImpl<LandingPadInst *> &LPads) {
DEBUG(dbgs() << "Demoting values live across exception handlers in function "
<< F.getName() << '\n');
// Build a set of all non-exceptional blocks and exceptional blocks.
// - Non-exceptional blocks are blocks reachable from the entry block while
// not following invoke unwind edges.
// - Exceptional blocks are blocks reachable from landingpads. Analysis does
// not follow llvm.eh.endcatch blocks, which mark a transition from
// exceptional to normal control.
if (Personality == EHPersonality::MSVC_CXX)
findCXXEHReturnPoints(F, EHReturnBlocks);
else
findSEHEHReturnPoints(F, EHReturnBlocks);
DEBUG({
dbgs() << "identified the following blocks as EH return points:\n";
for (BasicBlock *BB : EHReturnBlocks)
dbgs() << " " << BB->getName() << '\n';
});
// Join points should not have phis at this point, unless they are a
// landingpad, in which case we will demote their phis later.
#ifndef NDEBUG
for (BasicBlock *BB : EHReturnBlocks)
assert((BB->isLandingPad() || !isa<PHINode>(BB->begin())) &&
"non-lpad EH return block has phi");
#endif
// Normal blocks are the blocks reachable from the entry block and all EH
// return points.
SetVector<BasicBlock *> Worklist;
Worklist = EHReturnBlocks;
Worklist.insert(&F.getEntryBlock());
findReachableBlocks(NormalBlocks, Worklist, nullptr);
DEBUG({
dbgs() << "marked the following blocks as normal:\n";
for (BasicBlock *BB : NormalBlocks)
dbgs() << " " << BB->getName() << '\n';
});
// Exceptional blocks are the blocks reachable from landingpads that don't
// cross EH return points.
Worklist.clear();
for (auto *LPI : LPads)
Worklist.insert(LPI->getParent());
findReachableBlocks(EHBlocks, Worklist, &EHReturnBlocks);
DEBUG({
dbgs() << "marked the following blocks as exceptional:\n";
for (BasicBlock *BB : EHBlocks)
dbgs() << " " << BB->getName() << '\n';
});
}
/// Ensure that all values live into and out of exception handlers are stored
/// in memory.
/// FIXME: This falls down when values are defined in one handler and live into
/// another handler. For example, a cleanup defines a value used only by a
/// catch handler.
void WinEHPrepare::demoteValuesLiveAcrossHandlers(
Function &F, SmallVectorImpl<LandingPadInst *> &LPads) {
DEBUG(dbgs() << "Demoting values live across exception handlers in function "
<< F.getName() << '\n');
// identifyEHBlocks() should have been called before this function.
assert(!NormalBlocks.empty());
// Try to avoid demoting EH pointer and selector values. They get in the way
// of our pattern matching.
SmallPtrSet<Instruction *, 10> EHVals;
for (BasicBlock &BB : F) {
LandingPadInst *LP = BB.getLandingPadInst();
if (!LP)
continue;
EHVals.insert(LP);
for (User *U : LP->users()) {
auto *EI = dyn_cast<ExtractValueInst>(U);
if (!EI)
continue;
EHVals.insert(EI);
for (User *U2 : EI->users()) {
if (auto *PN = dyn_cast<PHINode>(U2))
EHVals.insert(PN);
}
}
}
SetVector<Argument *> ArgsToDemote;
SetVector<Instruction *> InstrsToDemote;
for (BasicBlock &BB : F) {
bool IsNormalBB = NormalBlocks.count(&BB);
bool IsEHBB = EHBlocks.count(&BB);
if (!IsNormalBB && !IsEHBB)
continue; // Blocks that are neither normal nor EH are unreachable.
for (Instruction &I : BB) {
for (Value *Op : I.operands()) {
// Don't demote static allocas, constants, and labels.
if (isa<Constant>(Op) || isa<BasicBlock>(Op) || isa<InlineAsm>(Op))
continue;
auto *AI = dyn_cast<AllocaInst>(Op);
if (AI && AI->isStaticAlloca())
continue;
if (auto *Arg = dyn_cast<Argument>(Op)) {
if (IsEHBB) {
DEBUG(dbgs() << "Demoting argument " << *Arg
<< " used by EH instr: " << I << "\n");
ArgsToDemote.insert(Arg);
}
continue;
}
// Don't demote EH values.
auto *OpI = cast<Instruction>(Op);
if (EHVals.count(OpI))
continue;
BasicBlock *OpBB = OpI->getParent();
// If a value is produced and consumed in the same BB, we don't need to
// demote it.
if (OpBB == &BB)
continue;
bool IsOpNormalBB = NormalBlocks.count(OpBB);
bool IsOpEHBB = EHBlocks.count(OpBB);
if (IsNormalBB != IsOpNormalBB || IsEHBB != IsOpEHBB) {
DEBUG({
dbgs() << "Demoting instruction live in-out from EH:\n";
dbgs() << "Instr: " << *OpI << '\n';
dbgs() << "User: " << I << '\n';
});
InstrsToDemote.insert(OpI);
}
}
}
}
// Demote values live into and out of handlers.
// FIXME: This demotion is inefficient. We should insert spills at the point
// of definition, insert one reload in each handler that uses the value, and
// insert reloads in the BB used to rejoin normal control flow.
Instruction *AllocaInsertPt = F.getEntryBlock().getFirstInsertionPt();
for (Instruction *I : InstrsToDemote)
DemoteRegToStack(*I, false, AllocaInsertPt);
// Demote arguments separately, and only for uses in EH blocks.
for (Argument *Arg : ArgsToDemote) {
auto *Slot = new AllocaInst(Arg->getType(), nullptr,
Arg->getName() + ".reg2mem", AllocaInsertPt);
SmallVector<User *, 4> Users(Arg->user_begin(), Arg->user_end());
for (User *U : Users) {
auto *I = dyn_cast<Instruction>(U);
if (I && EHBlocks.count(I->getParent())) {
auto *Reload = new LoadInst(Slot, Arg->getName() + ".reload", false, I);
U->replaceUsesOfWith(Arg, Reload);
}
}
new StoreInst(Arg, Slot, AllocaInsertPt);
}
// Demote landingpad phis, as the landingpad will be removed from the machine
// CFG.
for (LandingPadInst *LPI : LPads) {
BasicBlock *BB = LPI->getParent();
while (auto *Phi = dyn_cast<PHINode>(BB->begin()))
DemotePHIToStack(Phi, AllocaInsertPt);
}
DEBUG(dbgs() << "Demoted " << InstrsToDemote.size() << " instructions and "
<< ArgsToDemote.size() << " arguments for WinEHPrepare\n\n");
}
bool WinEHPrepare::prepareExceptionHandlers(
Function &F, SmallVectorImpl<LandingPadInst *> &LPads) {
// Don't run on functions that are already prepared.
for (LandingPadInst *LPad : LPads) {
BasicBlock *LPadBB = LPad->getParent();
for (Instruction &Inst : *LPadBB)
if (match(&Inst, m_Intrinsic<Intrinsic::eh_actions>()))
return false;
}
identifyEHBlocks(F, LPads);
demoteValuesLiveAcrossHandlers(F, LPads);
// These containers are used to re-map frame variables that are used in
// outlined catch and cleanup handlers. They will be populated as the
// handlers are outlined.
FrameVarInfoMap FrameVarInfo;
bool HandlersOutlined = false;
Module *M = F.getParent();
LLVMContext &Context = M->getContext();
// Create a new function to receive the handler contents.
PointerType *Int8PtrType = Type::getInt8PtrTy(Context);
Type *Int32Type = Type::getInt32Ty(Context);
Function *ActionIntrin = Intrinsic::getDeclaration(M, Intrinsic::eh_actions);
if (isAsynchronousEHPersonality(Personality)) {
// FIXME: Switch the ehptr type to i32 and then switch this.
SEHExceptionCodeSlot =
new AllocaInst(Int8PtrType, nullptr, "seh_exception_code",
F.getEntryBlock().getFirstInsertionPt());
}
// In order to handle the case where one outlined catch handler returns
// to a block within another outlined catch handler that would otherwise
// be unreachable, we need to outline the nested landing pad before we
// outline the landing pad which encloses it.
if (!isAsynchronousEHPersonality(Personality))
std::sort(LPads.begin(), LPads.end(),
[this](LandingPadInst *const &L, LandingPadInst *const &R) {
return DT->properlyDominates(R->getParent(), L->getParent());
});
// This container stores the llvm.eh.recover and IndirectBr instructions
// that make up the body of each landing pad after it has been outlined.
// We need to defer the population of the target list for the indirectbr
// until all landing pads have been outlined so that we can handle the
// case of blocks in the target that are reached only from nested
// landing pads.
SmallVector<std::pair<CallInst*, IndirectBrInst *>, 4> LPadImpls;
for (LandingPadInst *LPad : LPads) {
// Look for evidence that this landingpad has already been processed.
bool LPadHasActionList = false;
BasicBlock *LPadBB = LPad->getParent();
for (Instruction &Inst : *LPadBB) {
if (match(&Inst, m_Intrinsic<Intrinsic::eh_actions>())) {
LPadHasActionList = true;
break;
}
}
// If we've already outlined the handlers for this landingpad,
// there's nothing more to do here.
if (LPadHasActionList)
continue;
// If either of the values in the aggregate returned by the landing pad is
// extracted and stored to memory, promote the stored value to a register.
promoteLandingPadValues(LPad);
LandingPadActions Actions;
mapLandingPadBlocks(LPad, Actions);
HandlersOutlined |= !Actions.actions().empty();
for (ActionHandler *Action : Actions) {
if (Action->hasBeenProcessed())
continue;
BasicBlock *StartBB = Action->getStartBlock();
// SEH doesn't do any outlining for catches. Instead, pass the handler
// basic block addr to llvm.eh.actions and list the block as a return
// target.
if (isAsynchronousEHPersonality(Personality)) {
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
processSEHCatchHandler(CatchAction, StartBB);
continue;
}
}
outlineHandler(Action, &F, LPad, StartBB, FrameVarInfo);
}
// Split the block after the landingpad instruction so that it is just a
// call to llvm.eh.actions followed by indirectbr.
assert(!isa<PHINode>(LPadBB->begin()) && "lpad phi not removed");
SplitBlock(LPadBB, LPad->getNextNode(), DT);
// Erase the branch inserted by the split so we can insert indirectbr.
LPadBB->getTerminator()->eraseFromParent();
// Replace all extracted values with undef and ultimately replace the
// landingpad with undef.
SmallVector<Instruction *, 4> SEHCodeUses;
SmallVector<Instruction *, 4> EHUndefs;
for (User *U : LPad->users()) {
auto *E = dyn_cast<ExtractValueInst>(U);
if (!E)
continue;
assert(E->getNumIndices() == 1 &&
"Unexpected operation: extracting both landing pad values");
unsigned Idx = *E->idx_begin();
assert((Idx == 0 || Idx == 1) && "unexpected index");
if (Idx == 0 && isAsynchronousEHPersonality(Personality))
SEHCodeUses.push_back(E);
else
EHUndefs.push_back(E);
}
for (Instruction *E : EHUndefs) {
E->replaceAllUsesWith(UndefValue::get(E->getType()));
E->eraseFromParent();
}
LPad->replaceAllUsesWith(UndefValue::get(LPad->getType()));
// Rewrite uses of the exception pointer to loads of an alloca.
while (!SEHCodeUses.empty()) {
Instruction *E = SEHCodeUses.pop_back_val();
SmallVector<Use *, 4> Uses;
for (Use &U : E->uses())
Uses.push_back(&U);
for (Use *U : Uses) {
auto *I = cast<Instruction>(U->getUser());
if (isa<ResumeInst>(I))
continue;
if (auto *Phi = dyn_cast<PHINode>(I))
SEHCodeUses.push_back(Phi);
else
U->set(new LoadInst(SEHExceptionCodeSlot, "sehcode", false, I));
}
E->replaceAllUsesWith(UndefValue::get(E->getType()));
E->eraseFromParent();
}
// Add a call to describe the actions for this landing pad.
std::vector<Value *> ActionArgs;
for (ActionHandler *Action : Actions) {
// Action codes from docs are: 0 cleanup, 1 catch.
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
ActionArgs.push_back(ConstantInt::get(Int32Type, 1));
ActionArgs.push_back(CatchAction->getSelector());
// Find the frame escape index of the exception object alloca in the
// parent.
int FrameEscapeIdx = -1;
Value *EHObj = const_cast<Value *>(CatchAction->getExceptionVar());
if (EHObj && !isa<ConstantPointerNull>(EHObj)) {
auto I = FrameVarInfo.find(EHObj);
assert(I != FrameVarInfo.end() &&
"failed to map llvm.eh.begincatch var");
FrameEscapeIdx = std::distance(FrameVarInfo.begin(), I);
}
ActionArgs.push_back(ConstantInt::get(Int32Type, FrameEscapeIdx));
} else {
ActionArgs.push_back(ConstantInt::get(Int32Type, 0));
}
ActionArgs.push_back(Action->getHandlerBlockOrFunc());
}
CallInst *Recover =
CallInst::Create(ActionIntrin, ActionArgs, "recover", LPadBB);
SetVector<BasicBlock *> ReturnTargets;
for (ActionHandler *Action : Actions) {
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
const auto &CatchTargets = CatchAction->getReturnTargets();
ReturnTargets.insert(CatchTargets.begin(), CatchTargets.end());
}
}
IndirectBrInst *Branch =
IndirectBrInst::Create(Recover, ReturnTargets.size(), LPadBB);
for (BasicBlock *Target : ReturnTargets)
Branch->addDestination(Target);
if (!isAsynchronousEHPersonality(Personality)) {
// C++ EH must repopulate the targets later to handle the case of
// targets that are reached indirectly through nested landing pads.
LPadImpls.push_back(std::make_pair(Recover, Branch));
}
} // End for each landingpad
// If nothing got outlined, there is no more processing to be done.
if (!HandlersOutlined)
return false;
// Replace any nested landing pad stubs with the correct action handler.
// This must be done before we remove unreachable blocks because it
// cleans up references to outlined blocks that will be deleted.
for (auto &LPadPair : NestedLPtoOriginalLP)
completeNestedLandingPad(&F, LPadPair.first, LPadPair.second, FrameVarInfo);
NestedLPtoOriginalLP.clear();
// Update the indirectbr instructions' target lists if necessary.
SetVector<BasicBlock*> CheckedTargets;
SmallVector<std::unique_ptr<ActionHandler>, 4> ActionList;
for (auto &LPadImplPair : LPadImpls) {
IntrinsicInst *Recover = cast<IntrinsicInst>(LPadImplPair.first);
IndirectBrInst *Branch = LPadImplPair.second;
// Get a list of handlers called by
parseEHActions(Recover, ActionList);
// Add an indirect branch listing possible successors of the catch handlers.
SetVector<BasicBlock *> ReturnTargets;
for (const auto &Action : ActionList) {
if (auto *CA = dyn_cast<CatchHandler>(Action.get())) {
Function *Handler = cast<Function>(CA->getHandlerBlockOrFunc());
getPossibleReturnTargets(&F, Handler, ReturnTargets);
}
}
ActionList.clear();
// Clear any targets we already knew about.
for (unsigned int I = 0, E = Branch->getNumDestinations(); I < E; ++I) {
BasicBlock *KnownTarget = Branch->getDestination(I);
if (ReturnTargets.count(KnownTarget))
ReturnTargets.remove(KnownTarget);
}
for (BasicBlock *Target : ReturnTargets) {
Branch->addDestination(Target);
// The target may be a block that we excepted to get pruned.
// If it is, it may contain a call to llvm.eh.endcatch.
if (CheckedTargets.insert(Target)) {
// Earlier preparations guarantee that all calls to llvm.eh.endcatch
// will be followed by an unconditional branch.
auto *Br = dyn_cast<BranchInst>(Target->getTerminator());
if (Br && Br->isUnconditional() &&
Br != Target->getFirstNonPHIOrDbgOrLifetime()) {
Instruction *Prev = Br->getPrevNode();
if (match(cast<Value>(Prev), m_Intrinsic<Intrinsic::eh_endcatch>()))
Prev->eraseFromParent();
}
}
}
}
LPadImpls.clear();
F.addFnAttr("wineh-parent", F.getName());
// Delete any blocks that were only used by handlers that were outlined above.
removeUnreachableBlocks(F);
BasicBlock *Entry = &F.getEntryBlock();
IRBuilder<> Builder(F.getParent()->getContext());
Builder.SetInsertPoint(Entry->getFirstInsertionPt());
Function *FrameEscapeFn =
Intrinsic::getDeclaration(M, Intrinsic::localescape);
Function *RecoverFrameFn =
Intrinsic::getDeclaration(M, Intrinsic::localrecover);
SmallVector<Value *, 8> AllocasToEscape;
// Scan the entry block for an existing call to llvm.localescape. We need to
// keep escaping those objects.
for (Instruction &I : F.front()) {
auto *II = dyn_cast<IntrinsicInst>(&I);
if (II && II->getIntrinsicID() == Intrinsic::localescape) {
auto Args = II->arg_operands();
AllocasToEscape.append(Args.begin(), Args.end());
II->eraseFromParent();
break;
}
}
// Finally, replace all of the temporary allocas for frame variables used in
// the outlined handlers with calls to llvm.localrecover.
for (auto &VarInfoEntry : FrameVarInfo) {
Value *ParentVal = VarInfoEntry.first;
TinyPtrVector<AllocaInst *> &Allocas = VarInfoEntry.second;
AllocaInst *ParentAlloca = cast<AllocaInst>(ParentVal);
// FIXME: We should try to sink unescaped allocas from the parent frame into
// the child frame. If the alloca is escaped, we have to use the lifetime
// markers to ensure that the alloca is only live within the child frame.
// Add this alloca to the list of things to escape.
AllocasToEscape.push_back(ParentAlloca);
// Next replace all outlined allocas that are mapped to it.
for (AllocaInst *TempAlloca : Allocas) {
if (TempAlloca == getCatchObjectSentinel())
continue; // Skip catch parameter sentinels.
Function *HandlerFn = TempAlloca->getParent()->getParent();
llvm::Value *FP = HandlerToParentFP[HandlerFn];
assert(FP);
// FIXME: Sink this localrecover into the blocks where it is used.
Builder.SetInsertPoint(TempAlloca);
Builder.SetCurrentDebugLocation(TempAlloca->getDebugLoc());
Value *RecoverArgs[] = {
Builder.CreateBitCast(&F, Int8PtrType, ""), FP,
llvm::ConstantInt::get(Int32Type, AllocasToEscape.size() - 1)};
Instruction *RecoveredAlloca =
Builder.CreateCall(RecoverFrameFn, RecoverArgs);
// Add a pointer bitcast if the alloca wasn't an i8.
if (RecoveredAlloca->getType() != TempAlloca->getType()) {
RecoveredAlloca->setName(Twine(TempAlloca->getName()) + ".i8");
RecoveredAlloca = cast<Instruction>(
Builder.CreateBitCast(RecoveredAlloca, TempAlloca->getType()));
}
TempAlloca->replaceAllUsesWith(RecoveredAlloca);
TempAlloca->removeFromParent();
RecoveredAlloca->takeName(TempAlloca);
delete TempAlloca;
}
} // End for each FrameVarInfo entry.
// Insert 'call void (...)* @llvm.localescape(...)' at the end of the entry
// block.
Builder.SetInsertPoint(&F.getEntryBlock().back());
Builder.CreateCall(FrameEscapeFn, AllocasToEscape);
if (SEHExceptionCodeSlot) {
if (isAllocaPromotable(SEHExceptionCodeSlot)) {
SmallPtrSet<BasicBlock *, 4> UserBlocks;
for (User *U : SEHExceptionCodeSlot->users()) {
if (auto *Inst = dyn_cast<Instruction>(U))
UserBlocks.insert(Inst->getParent());
}
PromoteMemToReg(SEHExceptionCodeSlot, *DT);
// After the promotion, kill off dead instructions.
for (BasicBlock *BB : UserBlocks)
SimplifyInstructionsInBlock(BB, LibInfo);
}
}
// Clean up the handler action maps we created for this function
DeleteContainerSeconds(CatchHandlerMap);
CatchHandlerMap.clear();
DeleteContainerSeconds(CleanupHandlerMap);
CleanupHandlerMap.clear();
HandlerToParentFP.clear();
DT = nullptr;
LibInfo = nullptr;
SEHExceptionCodeSlot = nullptr;
EHBlocks.clear();
NormalBlocks.clear();
EHReturnBlocks.clear();
return HandlersOutlined;
}
void WinEHPrepare::promoteLandingPadValues(LandingPadInst *LPad) {
// If the return values of the landing pad instruction are extracted and
// stored to memory, we want to promote the store locations to reg values.
SmallVector<AllocaInst *, 2> EHAllocas;
// The landingpad instruction returns an aggregate value. Typically, its
// value will be passed to a pair of extract value instructions and the
// results of those extracts are often passed to store instructions.
// In unoptimized code the stored value will often be loaded and then stored
// again.
for (auto *U : LPad->users()) {
ExtractValueInst *Extract = dyn_cast<ExtractValueInst>(U);
if (!Extract)
continue;
for (auto *EU : Extract->users()) {
if (auto *Store = dyn_cast<StoreInst>(EU)) {
auto *AV = cast<AllocaInst>(Store->getPointerOperand());
EHAllocas.push_back(AV);
}
}
}
// We can't do this without a dominator tree.
assert(DT);
if (!EHAllocas.empty()) {
PromoteMemToReg(EHAllocas, *DT);
EHAllocas.clear();
}
// After promotion, some extracts may be trivially dead. Remove them.
SmallVector<Value *, 4> Users(LPad->user_begin(), LPad->user_end());
for (auto *U : Users)
RecursivelyDeleteTriviallyDeadInstructions(U);
}
void WinEHPrepare::getPossibleReturnTargets(Function *ParentF,
Function *HandlerF,
SetVector<BasicBlock*> &Targets) {
for (BasicBlock &BB : *HandlerF) {
// If the handler contains landing pads, check for any
// handlers that may return directly to a block in the
// parent function.
if (auto *LPI = BB.getLandingPadInst()) {
IntrinsicInst *Recover = cast<IntrinsicInst>(LPI->getNextNode());
SmallVector<std::unique_ptr<ActionHandler>, 4> ActionList;
parseEHActions(Recover, ActionList);
for (const auto &Action : ActionList) {
if (auto *CH = dyn_cast<CatchHandler>(Action.get())) {
Function *NestedF = cast<Function>(CH->getHandlerBlockOrFunc());
getPossibleReturnTargets(ParentF, NestedF, Targets);
}
}
}
auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator());
if (!Ret)
continue;
// Handler functions must always return a block address.
BlockAddress *BA = cast<BlockAddress>(Ret->getReturnValue());
// If this is the handler for a nested landing pad, the
// return address may have been remapped to a block in the
// parent handler. We're not interested in those.
if (BA->getFunction() != ParentF)
continue;
Targets.insert(BA->getBasicBlock());
}
}
void WinEHPrepare::completeNestedLandingPad(Function *ParentFn,
LandingPadInst *OutlinedLPad,
const LandingPadInst *OriginalLPad,
FrameVarInfoMap &FrameVarInfo) {
// Get the nested block and erase the unreachable instruction that was
// temporarily inserted as its terminator.
LLVMContext &Context = ParentFn->getContext();
BasicBlock *OutlinedBB = OutlinedLPad->getParent();
// If the nested landing pad was outlined before the landing pad that enclosed
// it, it will already be in outlined form. In that case, we just need to see
// if the returns and the enclosing branch instruction need to be updated.
IndirectBrInst *Branch =
dyn_cast<IndirectBrInst>(OutlinedBB->getTerminator());
if (!Branch) {
// If the landing pad wasn't in outlined form, it should be a stub with
// an unreachable terminator.
assert(isa<UnreachableInst>(OutlinedBB->getTerminator()));
OutlinedBB->getTerminator()->eraseFromParent();
// That should leave OutlinedLPad as the last instruction in its block.
assert(&OutlinedBB->back() == OutlinedLPad);
}
// The original landing pad will have already had its action intrinsic
// built by the outlining loop. We need to clone that into the outlined
// location. It may also be necessary to add references to the exception
// variables to the outlined handler in which this landing pad is nested
// and remap return instructions in the nested handlers that should return
// to an address in the outlined handler.
Function *OutlinedHandlerFn = OutlinedBB->getParent();
BasicBlock::const_iterator II = OriginalLPad;
++II;
// The instruction after the landing pad should now be a call to eh.actions.
const Instruction *Recover = II;
const IntrinsicInst *EHActions = cast<IntrinsicInst>(Recover);
// Remap the return target in the nested handler.
SmallVector<BlockAddress *, 4> ActionTargets;
SmallVector<std::unique_ptr<ActionHandler>, 4> ActionList;
parseEHActions(EHActions, ActionList);
for (const auto &Action : ActionList) {
auto *Catch = dyn_cast<CatchHandler>(Action.get());
if (!Catch)
continue;
// The dyn_cast to function here selects C++ catch handlers and skips
// SEH catch handlers.
auto *Handler = dyn_cast<Function>(Catch->getHandlerBlockOrFunc());
if (!Handler)
continue;
// Visit all the return instructions, looking for places that return
// to a location within OutlinedHandlerFn.
for (BasicBlock &NestedHandlerBB : *Handler) {
auto *Ret = dyn_cast<ReturnInst>(NestedHandlerBB.getTerminator());
if (!Ret)
continue;
// Handler functions must always return a block address.
BlockAddress *BA = cast<BlockAddress>(Ret->getReturnValue());
// The original target will have been in the main parent function,
// but if it is the address of a block that has been outlined, it
// should be a block that was outlined into OutlinedHandlerFn.
assert(BA->getFunction() == ParentFn);
// Ignore targets that aren't part of an outlined handler function.
if (!LPadTargetBlocks.count(BA->getBasicBlock()))
continue;
// If the return value is the address ofF a block that we
// previously outlined into the parent handler function, replace
// the return instruction and add the mapped target to the list
// of possible return addresses.
BasicBlock *MappedBB = LPadTargetBlocks[BA->getBasicBlock()];
assert(MappedBB->getParent() == OutlinedHandlerFn);
BlockAddress *NewBA = BlockAddress::get(OutlinedHandlerFn, MappedBB);
Ret->eraseFromParent();
ReturnInst::Create(Context, NewBA, &NestedHandlerBB);
ActionTargets.push_back(NewBA);
}
}
ActionList.clear();
if (Branch) {
// If the landing pad was already in outlined form, just update its targets.
for (unsigned int I = Branch->getNumDestinations(); I > 0; --I)
Branch->removeDestination(I);
// Add the previously collected action targets.
for (auto *Target : ActionTargets)
Branch->addDestination(Target->getBasicBlock());
} else {
// If the landing pad was previously stubbed out, fill in its outlined form.
IntrinsicInst *NewEHActions = cast<IntrinsicInst>(EHActions->clone());
OutlinedBB->getInstList().push_back(NewEHActions);
// Insert an indirect branch into the outlined landing pad BB.
IndirectBrInst *IBr = IndirectBrInst::Create(NewEHActions, 0, OutlinedBB);
// Add the previously collected action targets.
for (auto *Target : ActionTargets)
IBr->addDestination(Target->getBasicBlock());
}
}
// This function examines a block to determine whether the block ends with a
// conditional branch to a catch handler based on a selector comparison.
// This function is used both by the WinEHPrepare::findSelectorComparison() and
// WinEHCleanupDirector::handleTypeIdFor().
static bool isSelectorDispatch(BasicBlock *BB, BasicBlock *&CatchHandler,
Constant *&Selector, BasicBlock *&NextBB) {
ICmpInst::Predicate Pred;
BasicBlock *TBB, *FBB;
Value *LHS, *RHS;
if (!match(BB->getTerminator(),
m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)), TBB, FBB)))
return false;
if (!match(LHS,
m_Intrinsic<Intrinsic::eh_typeid_for>(m_Constant(Selector))) &&
!match(RHS, m_Intrinsic<Intrinsic::eh_typeid_for>(m_Constant(Selector))))
return false;
if (Pred == CmpInst::ICMP_EQ) {
CatchHandler = TBB;
NextBB = FBB;
return true;
}
if (Pred == CmpInst::ICMP_NE) {
CatchHandler = FBB;
NextBB = TBB;
return true;
}
return false;
}
static bool isCatchBlock(BasicBlock *BB) {
for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
II != IE; ++II) {
if (match(cast<Value>(II), m_Intrinsic<Intrinsic::eh_begincatch>()))
return true;
}
return false;
}
static BasicBlock *createStubLandingPad(Function *Handler) {
// FIXME: Finish this!
LLVMContext &Context = Handler->getContext();
BasicBlock *StubBB = BasicBlock::Create(Context, "stub");
Handler->getBasicBlockList().push_back(StubBB);
IRBuilder<> Builder(StubBB);
LandingPadInst *LPad = Builder.CreateLandingPad(
llvm::StructType::get(Type::getInt8PtrTy(Context),
Type::getInt32Ty(Context), nullptr),
0);
// Insert a call to llvm.eh.actions so that we don't try to outline this lpad.
Function *ActionIntrin =
Intrinsic::getDeclaration(Handler->getParent(), Intrinsic::eh_actions);
Builder.CreateCall(ActionIntrin, {}, "recover");
LPad->setCleanup(true);
Builder.CreateUnreachable();
return StubBB;
}
// Cycles through the blocks in an outlined handler function looking for an
// invoke instruction and inserts an invoke of llvm.donothing with an empty
// landing pad if none is found. The code that generates the .xdata tables for
// the handler needs at least one landing pad to identify the parent function's
// personality.
void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler) {
ReturnInst *Ret = nullptr;
UnreachableInst *Unreached = nullptr;
for (BasicBlock &BB : *Handler) {
TerminatorInst *Terminator = BB.getTerminator();
// If we find an invoke, there is nothing to be done.
auto *II = dyn_cast<InvokeInst>(Terminator);
if (II)
return;
// If we've already recorded a return instruction, keep looking for invokes.
if (!Ret)
Ret = dyn_cast<ReturnInst>(Terminator);
// If we haven't recorded an unreachable instruction, try this terminator.
if (!Unreached)
Unreached = dyn_cast<UnreachableInst>(Terminator);
}
// If we got this far, the handler contains no invokes. We should have seen
// at least one return or unreachable instruction. We'll insert an invoke of
// llvm.donothing ahead of that instruction.
assert(Ret || Unreached);
TerminatorInst *Term;
if (Ret)
Term = Ret;
else
Term = Unreached;
BasicBlock *OldRetBB = Term->getParent();
BasicBlock *NewRetBB = SplitBlock(OldRetBB, Term, DT);
// SplitBlock adds an unconditional branch instruction at the end of the
// parent block. We want to replace that with an invoke call, so we can
// erase it now.
OldRetBB->getTerminator()->eraseFromParent();
BasicBlock *StubLandingPad = createStubLandingPad(Handler);
Function *F =
Intrinsic::getDeclaration(Handler->getParent(), Intrinsic::donothing);
InvokeInst::Create(F, NewRetBB, StubLandingPad, None, "", OldRetBB);
}
// FIXME: Consider sinking this into lib/Target/X86 somehow. TargetLowering
// usually doesn't build LLVM IR, so that's probably the wrong place.
Function *WinEHPrepare::createHandlerFunc(Function *ParentFn, Type *RetTy,
const Twine &Name, Module *M,
Value *&ParentFP) {
// x64 uses a two-argument prototype where the parent FP is the second
// argument. x86 uses no arguments, just the incoming EBP value.
LLVMContext &Context = M->getContext();
Type *Int8PtrType = Type::getInt8PtrTy(Context);
FunctionType *FnType;
if (TheTriple.getArch() == Triple::x86_64) {
Type *ArgTys[2] = {Int8PtrType, Int8PtrType};
FnType = FunctionType::get(RetTy, ArgTys, false);
} else {
FnType = FunctionType::get(RetTy, None, false);
}
Function *Handler =
Function::Create(FnType, GlobalVariable::InternalLinkage, Name, M);
BasicBlock *Entry = BasicBlock::Create(Context, "entry");
Handler->getBasicBlockList().push_front(Entry);
if (TheTriple.getArch() == Triple::x86_64) {
ParentFP = &(Handler->getArgumentList().back());
} else {
assert(M);
Function *FrameAddressFn =
Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
Function *RecoverFPFn =
Intrinsic::getDeclaration(M, Intrinsic::x86_seh_recoverfp);
IRBuilder<> Builder(&Handler->getEntryBlock());
Value *EBP =
Builder.CreateCall(FrameAddressFn, {Builder.getInt32(1)}, "ebp");
Value *ParentI8Fn = Builder.CreateBitCast(ParentFn, Int8PtrType);
ParentFP = Builder.CreateCall(RecoverFPFn, {ParentI8Fn, EBP});
}
return Handler;
}
bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
LandingPadInst *LPad, BasicBlock *StartBB,
FrameVarInfoMap &VarInfo) {
Module *M = SrcFn->getParent();
LLVMContext &Context = M->getContext();
Type *Int8PtrType = Type::getInt8PtrTy(Context);
// Create a new function to receive the handler contents.
Value *ParentFP;
Function *Handler;
if (Action->getType() == Catch) {
Handler = createHandlerFunc(SrcFn, Int8PtrType, SrcFn->getName() + ".catch", M,
ParentFP);
} else {
Handler = createHandlerFunc(SrcFn, Type::getVoidTy(Context),
SrcFn->getName() + ".cleanup", M, ParentFP);
}
Handler->setPersonalityFn(SrcFn->getPersonalityFn());
HandlerToParentFP[Handler] = ParentFP;
Handler->addFnAttr("wineh-parent", SrcFn->getName());
BasicBlock *Entry = &Handler->getEntryBlock();
// Generate a standard prolog to setup the frame recovery structure.
IRBuilder<> Builder(Context);
Builder.SetInsertPoint(Entry);
Builder.SetCurrentDebugLocation(LPad->getDebugLoc());
std::unique_ptr<WinEHCloningDirectorBase> Director;
ValueToValueMapTy VMap;
LandingPadMap &LPadMap = LPadMaps[LPad];
if (!LPadMap.isInitialized())
LPadMap.mapLandingPad(LPad);
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
Constant *Sel = CatchAction->getSelector();
Director.reset(new WinEHCatchDirector(Handler, ParentFP, Sel, VarInfo,
LPadMap, NestedLPtoOriginalLP, DT,
EHBlocks));
LPadMap.remapEHValues(VMap, UndefValue::get(Int8PtrType),
ConstantInt::get(Type::getInt32Ty(Context), 1));
} else {
Director.reset(
new WinEHCleanupDirector(Handler, ParentFP, VarInfo, LPadMap));
LPadMap.remapEHValues(VMap, UndefValue::get(Int8PtrType),
UndefValue::get(Type::getInt32Ty(Context)));
}
SmallVector<ReturnInst *, 8> Returns;
ClonedCodeInfo OutlinedFunctionInfo;
// If the start block contains PHI nodes, we need to map them.
BasicBlock::iterator II = StartBB->begin();
while (auto *PN = dyn_cast<PHINode>(II)) {
bool Mapped = false;
// Look for PHI values that we have already mapped (such as the selector).
for (Value *Val : PN->incoming_values()) {
if (VMap.count(Val)) {
VMap[PN] = VMap[Val];
Mapped = true;
}
}
// If we didn't find a match for this value, map it as an undef.
if (!Mapped) {
VMap[PN] = UndefValue::get(PN->getType());
}
++II;
}
// The landing pad value may be used by PHI nodes. It will ultimately be
// eliminated, but we need it in the map for intermediate handling.
VMap[LPad] = UndefValue::get(LPad->getType());
// Skip over PHIs and, if applicable, landingpad instructions.
II = StartBB->getFirstInsertionPt();
CloneAndPruneIntoFromInst(Handler, SrcFn, II, VMap,
/*ModuleLevelChanges=*/false, Returns, "",
&OutlinedFunctionInfo, Director.get());
// Move all the instructions in the cloned "entry" block into our entry block.
// Depending on how the parent function was laid out, the block that will
// correspond to the outlined entry block may not be the first block in the
// list. We can recognize it, however, as the cloned block which has no
// predecessors. Any other block wouldn't have been cloned if it didn't
// have a predecessor which was also cloned.
Function::iterator ClonedIt = std::next(Function::iterator(Entry));
while (!pred_empty(ClonedIt))
++ClonedIt;
BasicBlock *ClonedEntryBB = ClonedIt;
assert(ClonedEntryBB);
Entry->getInstList().splice(Entry->end(), ClonedEntryBB->getInstList());
ClonedEntryBB->eraseFromParent();
// Make sure we can identify the handler's personality later.
addStubInvokeToHandlerIfNeeded(Handler);
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
WinEHCatchDirector *CatchDirector =
reinterpret_cast<WinEHCatchDirector *>(Director.get());
CatchAction->setExceptionVar(CatchDirector->getExceptionVar());
CatchAction->setReturnTargets(CatchDirector->getReturnTargets());
// Look for blocks that are not part of the landing pad that we just
// outlined but terminate with a call to llvm.eh.endcatch and a
// branch to a block that is in the handler we just outlined.
// These blocks will be part of a nested landing pad that intends to
// return to an address in this handler. This case is best handled
// after both landing pads have been outlined, so for now we'll just
// save the association of the blocks in LPadTargetBlocks. The
// return instructions which are created from these branches will be
// replaced after all landing pads have been outlined.
for (const auto MapEntry : VMap) {
// VMap maps all values and blocks that were just cloned, but dead
// blocks which were pruned will map to nullptr.
if (!isa<BasicBlock>(MapEntry.first) || MapEntry.second == nullptr)
continue;
const BasicBlock *MappedBB = cast<BasicBlock>(MapEntry.first);
for (auto *Pred : predecessors(const_cast<BasicBlock *>(MappedBB))) {
auto *Branch = dyn_cast<BranchInst>(Pred->getTerminator());
if (!Branch || !Branch->isUnconditional() || Pred->size() <= 1)
continue;
BasicBlock::iterator II = const_cast<BranchInst *>(Branch);
--II;
if (match(cast<Value>(II), m_Intrinsic<Intrinsic::eh_endcatch>())) {
// This would indicate that a nested landing pad wants to return
// to a block that is outlined into two different handlers.
assert(!LPadTargetBlocks.count(MappedBB));
LPadTargetBlocks[MappedBB] = cast<BasicBlock>(MapEntry.second);
}
}
}
} // End if (CatchAction)
Action->setHandlerBlockOrFunc(Handler);
return true;
}
/// This BB must end in a selector dispatch. All we need to do is pass the
/// handler block to llvm.eh.actions and list it as a possible indirectbr
/// target.
void WinEHPrepare::processSEHCatchHandler(CatchHandler *CatchAction,
BasicBlock *StartBB) {
BasicBlock *HandlerBB;
BasicBlock *NextBB;
Constant *Selector;
bool Res = isSelectorDispatch(StartBB, HandlerBB, Selector, NextBB);
if (Res) {
// If this was EH dispatch, this must be a conditional branch to the handler
// block.
// FIXME: Handle instructions in the dispatch block. Currently we drop them,
// leading to crashes if some optimization hoists stuff here.
assert(CatchAction->getSelector() && HandlerBB &&
"expected catch EH dispatch");
} else {
// This must be a catch-all. Split the block after the landingpad.
assert(CatchAction->getSelector()->isNullValue() && "expected catch-all");
HandlerBB = SplitBlock(StartBB, StartBB->getFirstInsertionPt(), DT);
}
IRBuilder<> Builder(HandlerBB->getFirstInsertionPt());
Function *EHCodeFn = Intrinsic::getDeclaration(
StartBB->getParent()->getParent(), Intrinsic::eh_exceptioncode);
Value *Code = Builder.CreateCall(EHCodeFn, {}, "sehcode");
Code = Builder.CreateIntToPtr(Code, SEHExceptionCodeSlot->getAllocatedType());
Builder.CreateStore(Code, SEHExceptionCodeSlot);
CatchAction->setHandlerBlockOrFunc(BlockAddress::get(HandlerBB));
TinyPtrVector<BasicBlock *> Targets(HandlerBB);
CatchAction->setReturnTargets(Targets);
}
void LandingPadMap::mapLandingPad(const LandingPadInst *LPad) {
// Each instance of this class should only ever be used to map a single
// landing pad.
assert(OriginLPad == nullptr || OriginLPad == LPad);
// If the landing pad has already been mapped, there's nothing more to do.
if (OriginLPad == LPad)
return;
OriginLPad = LPad;
// The landingpad instruction returns an aggregate value. Typically, its
// value will be passed to a pair of extract value instructions and the
// results of those extracts will have been promoted to reg values before
// this routine is called.
for (auto *U : LPad->users()) {
const ExtractValueInst *Extract = dyn_cast<ExtractValueInst>(U);
if (!Extract)
continue;
assert(Extract->getNumIndices() == 1 &&
"Unexpected operation: extracting both landing pad values");
unsigned int Idx = *(Extract->idx_begin());
assert((Idx == 0 || Idx == 1) &&
"Unexpected operation: extracting an unknown landing pad element");
if (Idx == 0) {
ExtractedEHPtrs.push_back(Extract);
} else if (Idx == 1) {
ExtractedSelectors.push_back(Extract);
}
}
}
bool LandingPadMap::isOriginLandingPadBlock(const BasicBlock *BB) const {
return BB->getLandingPadInst() == OriginLPad;
}
bool LandingPadMap::isLandingPadSpecificInst(const Instruction *Inst) const {
if (Inst == OriginLPad)
return true;
for (auto *Extract : ExtractedEHPtrs) {
if (Inst == Extract)
return true;
}
for (auto *Extract : ExtractedSelectors) {
if (Inst == Extract)
return true;
}
return false;
}
void LandingPadMap::remapEHValues(ValueToValueMapTy &VMap, Value *EHPtrValue,
Value *SelectorValue) const {
// Remap all landing pad extract instructions to the specified values.
for (auto *Extract : ExtractedEHPtrs)
VMap[Extract] = EHPtrValue;
for (auto *Extract : ExtractedSelectors)
VMap[Extract] = SelectorValue;
}
static bool isLocalAddressCall(const Value *V) {
return match(const_cast<Value *>(V), m_Intrinsic<Intrinsic::localaddress>());
}
CloningDirector::CloningAction WinEHCloningDirectorBase::handleInstruction(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
// If this is one of the boilerplate landing pad instructions, skip it.
// The instruction will have already been remapped in VMap.
if (LPadMap.isLandingPadSpecificInst(Inst))
return CloningDirector::SkipInstruction;
// Nested landing pads that have not already been outlined will be cloned as
// stubs, with just the landingpad instruction and an unreachable instruction.
// When all landingpads have been outlined, we'll replace this with the
// llvm.eh.actions call and indirect branch created when the landing pad was
// outlined.
if (auto *LPad = dyn_cast<LandingPadInst>(Inst)) {
return handleLandingPad(VMap, LPad, NewBB);
}
// Nested landing pads that have already been outlined will be cloned in their
// outlined form, but we need to intercept the ibr instruction to filter out
// targets that do not return to the handler we are outlining.
if (auto *IBr = dyn_cast<IndirectBrInst>(Inst)) {
return handleIndirectBr(VMap, IBr, NewBB);
}
if (auto *Invoke = dyn_cast<InvokeInst>(Inst))
return handleInvoke(VMap, Invoke, NewBB);
if (auto *Resume = dyn_cast<ResumeInst>(Inst))
return handleResume(VMap, Resume, NewBB);
if (auto *Cmp = dyn_cast<CmpInst>(Inst))
return handleCompare(VMap, Cmp, NewBB);
if (match(Inst, m_Intrinsic<Intrinsic::eh_begincatch>()))
return handleBeginCatch(VMap, Inst, NewBB);
if (match(Inst, m_Intrinsic<Intrinsic::eh_endcatch>()))
return handleEndCatch(VMap, Inst, NewBB);
if (match(Inst, m_Intrinsic<Intrinsic::eh_typeid_for>()))
return handleTypeIdFor(VMap, Inst, NewBB);
// When outlining llvm.localaddress(), remap that to the second argument,
// which is the FP of the parent.
if (isLocalAddressCall(Inst)) {
VMap[Inst] = ParentFP;
return CloningDirector::SkipInstruction;
}
// Continue with the default cloning behavior.
return CloningDirector::CloneInstruction;
}
CloningDirector::CloningAction WinEHCatchDirector::handleLandingPad(
ValueToValueMapTy &VMap, const LandingPadInst *LPad, BasicBlock *NewBB) {
// If the instruction after the landing pad is a call to llvm.eh.actions
// the landing pad has already been outlined. In this case, we should
// clone it because it may return to a block in the handler we are
// outlining now that would otherwise be unreachable. The landing pads
// are sorted before outlining begins to enable this case to work
// properly.
const Instruction *NextI = LPad->getNextNode();
if (match(NextI, m_Intrinsic<Intrinsic::eh_actions>()))
return CloningDirector::CloneInstruction;
// If the landing pad hasn't been outlined yet, the landing pad we are
// outlining now does not dominate it and so it cannot return to a block
// in this handler. In that case, we can just insert a stub landing
// pad now and patch it up later.
Instruction *NewInst = LPad->clone();
if (LPad->hasName())
NewInst->setName(LPad->getName());
// Save this correlation for later processing.
NestedLPtoOriginalLP[cast<LandingPadInst>(NewInst)] = LPad;
VMap[LPad] = NewInst;
BasicBlock::InstListType &InstList = NewBB->getInstList();
InstList.push_back(NewInst);
InstList.push_back(new UnreachableInst(NewBB->getContext()));
return CloningDirector::StopCloningBB;
}
CloningDirector::CloningAction WinEHCatchDirector::handleBeginCatch(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
// The argument to the call is some form of the first element of the
// landingpad aggregate value, but that doesn't matter. It isn't used
// here.
// The second argument is an outparameter where the exception object will be
// stored. Typically the exception object is a scalar, but it can be an
// aggregate when catching by value.
// FIXME: Leave something behind to indicate where the exception object lives
// for this handler. Should it be part of llvm.eh.actions?
assert(ExceptionObjectVar == nullptr && "Multiple calls to "
"llvm.eh.begincatch found while "
"outlining catch handler.");
ExceptionObjectVar = Inst->getOperand(1)->stripPointerCasts();
if (isa<ConstantPointerNull>(ExceptionObjectVar))
return CloningDirector::SkipInstruction;
assert(cast<AllocaInst>(ExceptionObjectVar)->isStaticAlloca() &&
"catch parameter is not static alloca");
Materializer.escapeCatchObject(ExceptionObjectVar);
return CloningDirector::SkipInstruction;
}
CloningDirector::CloningAction
WinEHCatchDirector::handleEndCatch(ValueToValueMapTy &VMap,
const Instruction *Inst, BasicBlock *NewBB) {
auto *IntrinCall = dyn_cast<IntrinsicInst>(Inst);
// It might be interesting to track whether or not we are inside a catch
// function, but that might make the algorithm more brittle than it needs
// to be.
// The end catch call can occur in one of two places: either in a
// landingpad block that is part of the catch handlers exception mechanism,
// or at the end of the catch block. However, a catch-all handler may call
// end catch from the original landing pad. If the call occurs in a nested
// landing pad block, we must skip it and continue so that the landing pad
// gets cloned.
auto *ParentBB = IntrinCall->getParent();
if (ParentBB->isLandingPad() && !LPadMap.isOriginLandingPadBlock(ParentBB))
return CloningDirector::SkipInstruction;
// If an end catch occurs anywhere else we want to terminate the handler
// with a return to the code that follows the endcatch call. If the
// next instruction is not an unconditional branch, we need to split the
// block to provide a clear target for the return instruction.
BasicBlock *ContinueBB;
auto Next = std::next(BasicBlock::const_iterator(IntrinCall));
const BranchInst *Branch = dyn_cast<BranchInst>(Next);
if (!Branch || !Branch->isUnconditional()) {
// We're interrupting the cloning process at this location, so the
// const_cast we're doing here will not cause a problem.
ContinueBB = SplitBlock(const_cast<BasicBlock *>(ParentBB),
const_cast<Instruction *>(cast<Instruction>(Next)));
} else {
ContinueBB = Branch->getSuccessor(0);
}
ReturnInst::Create(NewBB->getContext(), BlockAddress::get(ContinueBB), NewBB);
ReturnTargets.push_back(ContinueBB);
// We just added a terminator to the cloned block.
// Tell the caller to stop processing the current basic block so that
// the branch instruction will be skipped.
return CloningDirector::StopCloningBB;
}
CloningDirector::CloningAction WinEHCatchDirector::handleTypeIdFor(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
auto *IntrinCall = dyn_cast<IntrinsicInst>(Inst);
Value *Selector = IntrinCall->getArgOperand(0)->stripPointerCasts();
// This causes a replacement that will collapse the landing pad CFG based
// on the filter function we intend to match.
if (Selector == CurrentSelector)
VMap[Inst] = ConstantInt::get(SelectorIDType, 1);
else
VMap[Inst] = ConstantInt::get(SelectorIDType, 0);
// Tell the caller not to clone this instruction.
return CloningDirector::SkipInstruction;
}
CloningDirector::CloningAction WinEHCatchDirector::handleIndirectBr(
ValueToValueMapTy &VMap,
const IndirectBrInst *IBr,
BasicBlock *NewBB) {
// If this indirect branch is not part of a landing pad block, just clone it.
const BasicBlock *ParentBB = IBr->getParent();
if (!ParentBB->isLandingPad())
return CloningDirector::CloneInstruction;
// If it is part of a landing pad, we want to filter out target blocks
// that are not part of the handler we are outlining.
const LandingPadInst *LPad = ParentBB->getLandingPadInst();
// Save this correlation for later processing.
NestedLPtoOriginalLP[cast<LandingPadInst>(VMap[LPad])] = LPad;
// We should only get here for landing pads that have already been outlined.
assert(match(LPad->getNextNode(), m_Intrinsic<Intrinsic::eh_actions>()));
// Copy the indirectbr, but only include targets that were previously
// identified as EH blocks and are dominated by the nested landing pad.
SetVector<const BasicBlock *> ReturnTargets;
for (int I = 0, E = IBr->getNumDestinations(); I < E; ++I) {
auto *TargetBB = IBr->getDestination(I);
if (EHBlocks.count(const_cast<BasicBlock*>(TargetBB)) &&
DT->dominates(ParentBB, TargetBB)) {
DEBUG(dbgs() << " Adding destination " << TargetBB->getName() << "\n");
ReturnTargets.insert(TargetBB);
}
}
IndirectBrInst *NewBranch =
IndirectBrInst::Create(const_cast<Value *>(IBr->getAddress()),
ReturnTargets.size(), NewBB);
for (auto *Target : ReturnTargets)
NewBranch->addDestination(const_cast<BasicBlock*>(Target));
// The operands and targets of the branch instruction are remapped later
// because it is a terminator. Tell the cloning code to clone the
// blocks we just added to the target list.
return CloningDirector::CloneSuccessors;
}
CloningDirector::CloningAction
WinEHCatchDirector::handleInvoke(ValueToValueMapTy &VMap,
const InvokeInst *Invoke, BasicBlock *NewBB) {
return CloningDirector::CloneInstruction;
}
CloningDirector::CloningAction
WinEHCatchDirector::handleResume(ValueToValueMapTy &VMap,
const ResumeInst *Resume, BasicBlock *NewBB) {
// Resume instructions shouldn't be reachable from catch handlers.
// We still need to handle it, but it will be pruned.
BasicBlock::InstListType &InstList = NewBB->getInstList();
InstList.push_back(new UnreachableInst(NewBB->getContext()));
return CloningDirector::StopCloningBB;
}
CloningDirector::CloningAction
WinEHCatchDirector::handleCompare(ValueToValueMapTy &VMap,
const CmpInst *Compare, BasicBlock *NewBB) {
const IntrinsicInst *IntrinCall = nullptr;
if (match(Compare->getOperand(0), m_Intrinsic<Intrinsic::eh_typeid_for>())) {
IntrinCall = dyn_cast<IntrinsicInst>(Compare->getOperand(0));
} else if (match(Compare->getOperand(1),
m_Intrinsic<Intrinsic::eh_typeid_for>())) {
IntrinCall = dyn_cast<IntrinsicInst>(Compare->getOperand(1));
}
if (IntrinCall) {
Value *Selector = IntrinCall->getArgOperand(0)->stripPointerCasts();
// This causes a replacement that will collapse the landing pad CFG based
// on the filter function we intend to match.
if (Selector == CurrentSelector->stripPointerCasts()) {
VMap[Compare] = ConstantInt::get(SelectorIDType, 1);
} else {
VMap[Compare] = ConstantInt::get(SelectorIDType, 0);
}
return CloningDirector::SkipInstruction;
}
return CloningDirector::CloneInstruction;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleLandingPad(
ValueToValueMapTy &VMap, const LandingPadInst *LPad, BasicBlock *NewBB) {
// The MS runtime will terminate the process if an exception occurs in a
// cleanup handler, so we shouldn't encounter landing pads in the actual
// cleanup code, but they may appear in catch blocks. Depending on where
// we started cloning we may see one, but it will get dropped during dead
// block pruning.
Instruction *NewInst = new UnreachableInst(NewBB->getContext());
VMap[LPad] = NewInst;
BasicBlock::InstListType &InstList = NewBB->getInstList();
InstList.push_back(NewInst);
return CloningDirector::StopCloningBB;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleBeginCatch(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
// Cleanup code may flow into catch blocks or the catch block may be part
// of a branch that will be optimized away. We'll insert a return
// instruction now, but it may be pruned before the cloning process is
// complete.
ReturnInst::Create(NewBB->getContext(), nullptr, NewBB);
return CloningDirector::StopCloningBB;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleEndCatch(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
// Cleanup handlers nested within catch handlers may begin with a call to
// eh.endcatch. We can just ignore that instruction.
return CloningDirector::SkipInstruction;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleTypeIdFor(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
// If we encounter a selector comparison while cloning a cleanup handler,
// we want to stop cloning immediately. Anything after the dispatch
// will be outlined into a different handler.
BasicBlock *CatchHandler;
Constant *Selector;
BasicBlock *NextBB;
if (isSelectorDispatch(const_cast<BasicBlock *>(Inst->getParent()),
CatchHandler, Selector, NextBB)) {
ReturnInst::Create(NewBB->getContext(), nullptr, NewBB);
return CloningDirector::StopCloningBB;
}
// If eg.typeid.for is called for any other reason, it can be ignored.
VMap[Inst] = ConstantInt::get(SelectorIDType, 0);
return CloningDirector::SkipInstruction;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleIndirectBr(
ValueToValueMapTy &VMap,
const IndirectBrInst *IBr,
BasicBlock *NewBB) {
// No special handling is required for cleanup cloning.
return CloningDirector::CloneInstruction;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleInvoke(
ValueToValueMapTy &VMap, const InvokeInst *Invoke, BasicBlock *NewBB) {
// All invokes in cleanup handlers can be replaced with calls.
SmallVector<Value *, 16> CallArgs(Invoke->op_begin(), Invoke->op_end() - 3);
// Insert a normal call instruction...
CallInst *NewCall =
CallInst::Create(const_cast<Value *>(Invoke->getCalledValue()), CallArgs,
Invoke->getName(), NewBB);
NewCall->setCallingConv(Invoke->getCallingConv());
NewCall->setAttributes(Invoke->getAttributes());
NewCall->setDebugLoc(Invoke->getDebugLoc());
VMap[Invoke] = NewCall;
// Remap the operands.
llvm::RemapInstruction(NewCall, VMap, RF_None, nullptr, &Materializer);
// Insert an unconditional branch to the normal destination.
BranchInst::Create(Invoke->getNormalDest(), NewBB);
// The unwind destination won't be cloned into the new function, so
// we don't need to clean up its phi nodes.
// We just added a terminator to the cloned block.
// Tell the caller to stop processing the current basic block.
return CloningDirector::CloneSuccessors;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleResume(
ValueToValueMapTy &VMap, const ResumeInst *Resume, BasicBlock *NewBB) {
ReturnInst::Create(NewBB->getContext(), nullptr, NewBB);
// We just added a terminator to the cloned block.
// Tell the caller to stop processing the current basic block so that
// the branch instruction will be skipped.
return CloningDirector::StopCloningBB;
}
CloningDirector::CloningAction
WinEHCleanupDirector::handleCompare(ValueToValueMapTy &VMap,
const CmpInst *Compare, BasicBlock *NewBB) {
if (match(Compare->getOperand(0), m_Intrinsic<Intrinsic::eh_typeid_for>()) ||
match(Compare->getOperand(1), m_Intrinsic<Intrinsic::eh_typeid_for>())) {
VMap[Compare] = ConstantInt::get(SelectorIDType, 1);
return CloningDirector::SkipInstruction;
}
return CloningDirector::CloneInstruction;
}
WinEHFrameVariableMaterializer::WinEHFrameVariableMaterializer(
Function *OutlinedFn, Value *ParentFP, FrameVarInfoMap &FrameVarInfo)
: FrameVarInfo(FrameVarInfo), Builder(OutlinedFn->getContext()) {
BasicBlock *EntryBB = &OutlinedFn->getEntryBlock();
// New allocas should be inserted in the entry block, but after the parent FP
// is established if it is an instruction.
Instruction *InsertPoint = EntryBB->getFirstInsertionPt();
if (auto *FPInst = dyn_cast<Instruction>(ParentFP))
InsertPoint = FPInst->getNextNode();
Builder.SetInsertPoint(EntryBB, InsertPoint);
}
Value *WinEHFrameVariableMaterializer::materializeValueFor(Value *V) {
// If we're asked to materialize a static alloca, we temporarily create an
// alloca in the outlined function and add this to the FrameVarInfo map. When
// all the outlining is complete, we'll replace these temporary allocas with
// calls to llvm.localrecover.
if (auto *AV = dyn_cast<AllocaInst>(V)) {
assert(AV->isStaticAlloca() &&
"cannot materialize un-demoted dynamic alloca");
AllocaInst *NewAlloca = dyn_cast<AllocaInst>(AV->clone());
Builder.Insert(NewAlloca, AV->getName());
FrameVarInfo[AV].push_back(NewAlloca);
return NewAlloca;
}
if (isa<Instruction>(V) || isa<Argument>(V)) {
Function *Parent = isa<Instruction>(V)
? cast<Instruction>(V)->getParent()->getParent()
: cast<Argument>(V)->getParent();
errs()
<< "Failed to demote instruction used in exception handler of function "
<< GlobalValue::getRealLinkageName(Parent->getName()) << ":\n";
errs() << " " << *V << '\n';
report_fatal_error("WinEHPrepare failed to demote instruction");
}
// Don't materialize other values.
return nullptr;
}
void WinEHFrameVariableMaterializer::escapeCatchObject(Value *V) {
// Catch parameter objects have to live in the parent frame. When we see a use
// of a catch parameter, add a sentinel to the multimap to indicate that it's
// used from another handler. This will prevent us from trying to sink the
// alloca into the handler and ensure that the catch parameter is present in
// the call to llvm.localescape.
FrameVarInfo[V].push_back(getCatchObjectSentinel());
}
// This function maps the catch and cleanup handlers that are reachable from the
// specified landing pad. The landing pad sequence will have this basic shape:
//
// <cleanup handler>
// <selector comparison>
// <catch handler>
// <cleanup handler>
// <selector comparison>
// <catch handler>
// <cleanup handler>
// ...
//
// Any of the cleanup slots may be absent. The cleanup slots may be occupied by
// any arbitrary control flow, but all paths through the cleanup code must
// eventually reach the next selector comparison and no path can skip to a
// different selector comparisons, though some paths may terminate abnormally.
// Therefore, we will use a depth first search from the start of any given
// cleanup block and stop searching when we find the next selector comparison.
//
// If the landingpad instruction does not have a catch clause, we will assume
// that any instructions other than selector comparisons and catch handlers can
// be ignored. In practice, these will only be the boilerplate instructions.
//
// The catch handlers may also have any control structure, but we are only
// interested in the start of the catch handlers, so we don't need to actually
// follow the flow of the catch handlers. The start of the catch handlers can
// be located from the compare instructions, but they can be skipped in the
// flow by following the contrary branch.
void WinEHPrepare::mapLandingPadBlocks(LandingPadInst *LPad,
LandingPadActions &Actions) {
unsigned int NumClauses = LPad->getNumClauses();
unsigned int HandlersFound = 0;
BasicBlock *BB = LPad->getParent();
DEBUG(dbgs() << "Mapping landing pad: " << BB->getName() << "\n");
if (NumClauses == 0) {
findCleanupHandlers(Actions, BB, nullptr);
return;
}
VisitedBlockSet VisitedBlocks;
while (HandlersFound != NumClauses) {
BasicBlock *NextBB = nullptr;
// Skip over filter clauses.
if (LPad->isFilter(HandlersFound)) {
++HandlersFound;
continue;
}
// See if the clause we're looking for is a catch-all.
// If so, the catch begins immediately.
Constant *ExpectedSelector =
LPad->getClause(HandlersFound)->stripPointerCasts();
if (isa<ConstantPointerNull>(ExpectedSelector)) {
// The catch all must occur last.
assert(HandlersFound == NumClauses - 1);
// There can be additional selector dispatches in the call chain that we
// need to ignore.
BasicBlock *CatchBlock = nullptr;
Constant *Selector;
while (BB && isSelectorDispatch(BB, CatchBlock, Selector, NextBB)) {
DEBUG(dbgs() << " Found extra catch dispatch in block "
<< CatchBlock->getName() << "\n");
BB = NextBB;
}
// Add the catch handler to the action list.
CatchHandler *Action = nullptr;
if (CatchHandlerMap.count(BB) && CatchHandlerMap[BB] != nullptr) {
// If the CatchHandlerMap already has an entry for this BB, re-use it.
Action = CatchHandlerMap[BB];
assert(Action->getSelector() == ExpectedSelector);
} else {
// We don't expect a selector dispatch, but there may be a call to
// llvm.eh.begincatch, which separates catch handling code from
// cleanup code in the same control flow. This call looks for the
// begincatch intrinsic.
Action = findCatchHandler(BB, NextBB, VisitedBlocks);
if (Action) {
// For C++ EH, check if there is any interesting cleanup code before
// we begin the catch. This is important because cleanups cannot
// rethrow exceptions but code called from catches can. For SEH, it
// isn't important if some finally code before a catch-all is executed
// out of line or after recovering from the exception.
if (Personality == EHPersonality::MSVC_CXX)
findCleanupHandlers(Actions, BB, BB);
} else {
// If an action was not found, it means that the control flows
// directly into the catch-all handler and there is no cleanup code.
// That's an expected situation and we must create a catch action.
// Since this is a catch-all handler, the selector won't actually
// appear in the code anywhere. ExpectedSelector here is the constant
// null ptr that we got from the landing pad instruction.
Action = new CatchHandler(BB, ExpectedSelector, nullptr);
CatchHandlerMap[BB] = Action;
}
}
Actions.insertCatchHandler(Action);
DEBUG(dbgs() << " Catch all handler at block " << BB->getName() << "\n");
++HandlersFound;
// Once we reach a catch-all, don't expect to hit a resume instruction.
BB = nullptr;
break;
}
CatchHandler *CatchAction = findCatchHandler(BB, NextBB, VisitedBlocks);
assert(CatchAction);
// See if there is any interesting code executed before the dispatch.
findCleanupHandlers(Actions, BB, CatchAction->getStartBlock());
// When the source program contains multiple nested try blocks the catch
// handlers can get strung together in such a way that we can encounter
// a dispatch for a selector that we've already had a handler for.
if (CatchAction->getSelector()->stripPointerCasts() == ExpectedSelector) {
++HandlersFound;
// Add the catch handler to the action list.
DEBUG(dbgs() << " Found catch dispatch in block "
<< CatchAction->getStartBlock()->getName() << "\n");
Actions.insertCatchHandler(CatchAction);
} else {
// Under some circumstances optimized IR will flow unconditionally into a
// handler block without checking the selector. This can only happen if
// the landing pad has a catch-all handler and the handler for the
// preceeding catch clause is identical to the catch-call handler
// (typically an empty catch). In this case, the handler must be shared
// by all remaining clauses.
if (isa<ConstantPointerNull>(
CatchAction->getSelector()->stripPointerCasts())) {
DEBUG(dbgs() << " Applying early catch-all handler in block "
<< CatchAction->getStartBlock()->getName()
<< " to all remaining clauses.\n");
Actions.insertCatchHandler(CatchAction);
return;
}
DEBUG(dbgs() << " Found extra catch dispatch in block "
<< CatchAction->getStartBlock()->getName() << "\n");
}
// Move on to the block after the catch handler.
BB = NextBB;
}
// If we didn't wind up in a catch-all, see if there is any interesting code
// executed before the resume.
findCleanupHandlers(Actions, BB, BB);
// It's possible that some optimization moved code into a landingpad that
// wasn't
// previously being used for cleanup. If that happens, we need to execute
// that
// extra code from a cleanup handler.
if (Actions.includesCleanup() && !LPad->isCleanup())
LPad->setCleanup(true);
}
// This function searches starting with the input block for the next
// block that terminates with a branch whose condition is based on a selector
// comparison. This may be the input block. See the mapLandingPadBlocks
// comments for a discussion of control flow assumptions.
//
CatchHandler *WinEHPrepare::findCatchHandler(BasicBlock *BB,
BasicBlock *&NextBB,
VisitedBlockSet &VisitedBlocks) {
// See if we've already found a catch handler use it.
// Call count() first to avoid creating a null entry for blocks
// we haven't seen before.
if (CatchHandlerMap.count(BB) && CatchHandlerMap[BB] != nullptr) {
CatchHandler *Action = cast<CatchHandler>(CatchHandlerMap[BB]);
NextBB = Action->getNextBB();
return Action;
}
// VisitedBlocks applies only to the current search. We still
// need to consider blocks that we've visited while mapping other
// landing pads.
VisitedBlocks.insert(BB);
BasicBlock *CatchBlock = nullptr;
Constant *Selector = nullptr;
// If this is the first time we've visited this block from any landing pad
// look to see if it is a selector dispatch block.
if (!CatchHandlerMap.count(BB)) {
if (isSelectorDispatch(BB, CatchBlock, Selector, NextBB)) {
CatchHandler *Action = new CatchHandler(BB, Selector, NextBB);
CatchHandlerMap[BB] = Action;
return Action;
}
// If we encounter a block containing an llvm.eh.begincatch before we
// find a selector dispatch block, the handler is assumed to be
// reached unconditionally. This happens for catch-all blocks, but
// it can also happen for other catch handlers that have been combined
// with the catch-all handler during optimization.
if (isCatchBlock(BB)) {
PointerType *Int8PtrTy = Type::getInt8PtrTy(BB->getContext());
Constant *NullSelector = ConstantPointerNull::get(Int8PtrTy);
CatchHandler *Action = new CatchHandler(BB, NullSelector, nullptr);
CatchHandlerMap[BB] = Action;
return Action;
}
}
// Visit each successor, looking for the dispatch.
// FIXME: We expect to find the dispatch quickly, so this will probably
// work better as a breadth first search.
for (BasicBlock *Succ : successors(BB)) {
if (VisitedBlocks.count(Succ))
continue;
CatchHandler *Action = findCatchHandler(Succ, NextBB, VisitedBlocks);
if (Action)
return Action;
}
return nullptr;
}
// These are helper functions to combine repeated code from findCleanupHandlers.
static void createCleanupHandler(LandingPadActions &Actions,
CleanupHandlerMapTy &CleanupHandlerMap,
BasicBlock *BB) {
CleanupHandler *Action = new CleanupHandler(BB);
CleanupHandlerMap[BB] = Action;
Actions.insertCleanupHandler(Action);
DEBUG(dbgs() << " Found cleanup code in block "
<< Action->getStartBlock()->getName() << "\n");
}
static CallSite matchOutlinedFinallyCall(BasicBlock *BB,
Instruction *MaybeCall) {
// Look for finally blocks that Clang has already outlined for us.
// %fp = call i8* @llvm.localaddress()
// call void @"fin$parent"(iN 1, i8* %fp)
if (isLocalAddressCall(MaybeCall) && MaybeCall != BB->getTerminator())
MaybeCall = MaybeCall->getNextNode();
CallSite FinallyCall(MaybeCall);
if (!FinallyCall || FinallyCall.arg_size() != 2)
return CallSite();
if (!match(FinallyCall.getArgument(0), m_SpecificInt(1)))
return CallSite();
if (!isLocalAddressCall(FinallyCall.getArgument(1)))
return CallSite();
return FinallyCall;
}
static BasicBlock *followSingleUnconditionalBranches(BasicBlock *BB) {
// Skip single ubr blocks.
while (BB->getFirstNonPHIOrDbg() == BB->getTerminator()) {
auto *Br = dyn_cast<BranchInst>(BB->getTerminator());
if (Br && Br->isUnconditional())
BB = Br->getSuccessor(0);
else
return BB;
}
return BB;
}
// This function searches starting with the input block for the next block that
// contains code that is not part of a catch handler and would not be eliminated
// during handler outlining.
//
void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions,
BasicBlock *StartBB, BasicBlock *EndBB) {
// Here we will skip over the following:
//
// landing pad prolog:
//
// Unconditional branches
//
// Selector dispatch
//
// Resume pattern
//
// Anything else marks the start of an interesting block
BasicBlock *BB = StartBB;
// Anything other than an unconditional branch will kick us out of this loop
// one way or another.
while (BB) {
BB = followSingleUnconditionalBranches(BB);
// If we've already scanned this block, don't scan it again. If it is
// a cleanup block, there will be an action in the CleanupHandlerMap.
// If we've scanned it and it is not a cleanup block, there will be a
// nullptr in the CleanupHandlerMap. If we have not scanned it, there will
// be no entry in the CleanupHandlerMap. We must call count() first to
// avoid creating a null entry for blocks we haven't scanned.
if (CleanupHandlerMap.count(BB)) {
if (auto *Action = CleanupHandlerMap[BB]) {
Actions.insertCleanupHandler(Action);
DEBUG(dbgs() << " Found cleanup code in block "
<< Action->getStartBlock()->getName() << "\n");
// FIXME: This cleanup might chain into another, and we need to discover
// that.
return;
} else {
// Here we handle the case where the cleanup handler map contains a
// value for this block but the value is a nullptr. This means that
// we have previously analyzed the block and determined that it did
// not contain any cleanup code. Based on the earlier analysis, we
// know the block must end in either an unconditional branch, a
// resume or a conditional branch that is predicated on a comparison
// with a selector. Either the resume or the selector dispatch
// would terminate the search for cleanup code, so the unconditional
// branch is the only case for which we might need to continue
// searching.
BasicBlock *SuccBB = followSingleUnconditionalBranches(BB);
if (SuccBB == BB || SuccBB == EndBB)
return;
BB = SuccBB;
continue;
}
}
// Create an entry in the cleanup handler map for this block. Initially
// we create an entry that says this isn't a cleanup block. If we find
// cleanup code, the caller will replace this entry.
CleanupHandlerMap[BB] = nullptr;
TerminatorInst *Terminator = BB->getTerminator();
// Landing pad blocks have extra instructions we need to accept.
LandingPadMap *LPadMap = nullptr;
if (BB->isLandingPad()) {
LandingPadInst *LPad = BB->getLandingPadInst();
LPadMap = &LPadMaps[LPad];
if (!LPadMap->isInitialized())
LPadMap->mapLandingPad(LPad);
}
// Look for the bare resume pattern:
// %lpad.val1 = insertvalue { i8*, i32 } undef, i8* %exn, 0
// %lpad.val2 = insertvalue { i8*, i32 } %lpad.val1, i32 %sel, 1
// resume { i8*, i32 } %lpad.val2
if (auto *Resume = dyn_cast<ResumeInst>(Terminator)) {
InsertValueInst *Insert1 = nullptr;
InsertValueInst *Insert2 = nullptr;
Value *ResumeVal = Resume->getOperand(0);
// If the resume value isn't a phi or landingpad value, it should be a
// series of insertions. Identify them so we can avoid them when scanning
// for cleanups.
if (!isa<PHINode>(ResumeVal) && !isa<LandingPadInst>(ResumeVal)) {
Insert2 = dyn_cast<InsertValueInst>(ResumeVal);
if (!Insert2)
return createCleanupHandler(Actions, CleanupHandlerMap, BB);
Insert1 = dyn_cast<InsertValueInst>(Insert2->getAggregateOperand());
if (!Insert1)
return createCleanupHandler(Actions, CleanupHandlerMap, BB);
}
for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
II != IE; ++II) {
Instruction *Inst = II;
if (LPadMap && LPadMap->isLandingPadSpecificInst(Inst))
continue;
if (Inst == Insert1 || Inst == Insert2 || Inst == Resume)
continue;
if (!Inst->hasOneUse() ||
(Inst->user_back() != Insert1 && Inst->user_back() != Insert2)) {
return createCleanupHandler(Actions, CleanupHandlerMap, BB);
}
}
return;
}
BranchInst *Branch = dyn_cast<BranchInst>(Terminator);
if (Branch && Branch->isConditional()) {
// Look for the selector dispatch.
// %2 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIf to i8*))
// %matches = icmp eq i32 %sel, %2
// br i1 %matches, label %catch14, label %eh.resume
CmpInst *Compare = dyn_cast<CmpInst>(Branch->getCondition());
if (!Compare || !Compare->isEquality())
return createCleanupHandler(Actions, CleanupHandlerMap, BB);
for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
II != IE; ++II) {
Instruction *Inst = II;
if (LPadMap && LPadMap->isLandingPadSpecificInst(Inst))
continue;
if (Inst == Compare || Inst == Branch)
continue;
if (match(Inst, m_Intrinsic<Intrinsic::eh_typeid_for>()))
continue;
return createCleanupHandler(Actions, CleanupHandlerMap, BB);
}
// The selector dispatch block should always terminate our search.
assert(BB == EndBB);
return;
}
if (isAsynchronousEHPersonality(Personality)) {
// If this is a landingpad block, split the block at the first non-landing
// pad instruction.
Instruction *MaybeCall = BB->getFirstNonPHIOrDbg();
if (LPadMap) {
while (MaybeCall != BB->getTerminator() &&
LPadMap->isLandingPadSpecificInst(MaybeCall))
MaybeCall = MaybeCall->getNextNode();
}
// Look for outlined finally calls on x64, since those happen to match the
// prototype provided by the runtime.
if (TheTriple.getArch() == Triple::x86_64) {
if (CallSite FinallyCall = matchOutlinedFinallyCall(BB, MaybeCall)) {
Function *Fin = FinallyCall.getCalledFunction();
assert(Fin && "outlined finally call should be direct");
auto *Action = new CleanupHandler(BB);
Action->setHandlerBlockOrFunc(Fin);
Actions.insertCleanupHandler(Action);
CleanupHandlerMap[BB] = Action;
DEBUG(dbgs() << " Found frontend-outlined finally call to "
<< Fin->getName() << " in block "
<< Action->getStartBlock()->getName() << "\n");
// Split the block if there were more interesting instructions and
// look for finally calls in the normal successor block.
BasicBlock *SuccBB = BB;
if (FinallyCall.getInstruction() != BB->getTerminator() &&
FinallyCall.getInstruction()->getNextNode() !=
BB->getTerminator()) {
SuccBB =
SplitBlock(BB, FinallyCall.getInstruction()->getNextNode(), DT);
} else {
if (FinallyCall.isInvoke()) {
SuccBB = cast<InvokeInst>(FinallyCall.getInstruction())
->getNormalDest();
} else {
SuccBB = BB->getUniqueSuccessor();
assert(SuccBB &&
"splitOutlinedFinallyCalls didn't insert a branch");
}
}
BB = SuccBB;
if (BB == EndBB)
return;
continue;
}
}
}
// Anything else is either a catch block or interesting cleanup code.
for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
II != IE; ++II) {
Instruction *Inst = II;
if (LPadMap && LPadMap->isLandingPadSpecificInst(Inst))
continue;
// Unconditional branches fall through to this loop.
if (Inst == Branch)
continue;
// If this is a catch block, there is no cleanup code to be found.
if (match(Inst, m_Intrinsic<Intrinsic::eh_begincatch>()))
return;
// If this a nested landing pad, it may contain an endcatch call.
if (match(Inst, m_Intrinsic<Intrinsic::eh_endcatch>()))
return;
// Anything else makes this interesting cleanup code.
return createCleanupHandler(Actions, CleanupHandlerMap, BB);
}
// Only unconditional branches in empty blocks should get this far.
assert(Branch && Branch->isUnconditional());
if (BB == EndBB)
return;
BB = Branch->getSuccessor(0);
}
}
// This is a public function, declared in WinEHFuncInfo.h and is also
// referenced by WinEHNumbering in FunctionLoweringInfo.cpp.
void llvm::parseEHActions(
const IntrinsicInst *II,
SmallVectorImpl<std::unique_ptr<ActionHandler>> &Actions) {
assert(II->getIntrinsicID() == Intrinsic::eh_actions &&
"attempted to parse non eh.actions intrinsic");
for (unsigned I = 0, E = II->getNumArgOperands(); I != E;) {
uint64_t ActionKind =
cast<ConstantInt>(II->getArgOperand(I))->getZExtValue();
if (ActionKind == /*catch=*/1) {
auto *Selector = cast<Constant>(II->getArgOperand(I + 1));
ConstantInt *EHObjIndex = cast<ConstantInt>(II->getArgOperand(I + 2));
int64_t EHObjIndexVal = EHObjIndex->getSExtValue();
Constant *Handler = cast<Constant>(II->getArgOperand(I + 3));
I += 4;
auto CH = make_unique<CatchHandler>(/*BB=*/nullptr, Selector,
/*NextBB=*/nullptr);
CH->setHandlerBlockOrFunc(Handler);
CH->setExceptionVarIndex(EHObjIndexVal);
Actions.push_back(std::move(CH));
} else if (ActionKind == 0) {
Constant *Handler = cast<Constant>(II->getArgOperand(I + 1));
I += 2;
auto CH = make_unique<CleanupHandler>(/*BB=*/nullptr);
CH->setHandlerBlockOrFunc(Handler);
Actions.push_back(std::move(CH));
} else {
llvm_unreachable("Expected either a catch or cleanup handler!");
}
}
std::reverse(Actions.begin(), Actions.end());
}
namespace {
struct WinEHNumbering {
WinEHNumbering(WinEHFuncInfo &FuncInfo) : FuncInfo(FuncInfo),
CurrentBaseState(-1), NextState(0) {}
WinEHFuncInfo &FuncInfo;
int CurrentBaseState;
int NextState;
SmallVector<std::unique_ptr<ActionHandler>, 4> HandlerStack;
SmallPtrSet<const Function *, 4> VisitedHandlers;
int currentEHNumber() const {
return HandlerStack.empty() ? CurrentBaseState : HandlerStack.back()->getEHState();
}
void createUnwindMapEntry(int ToState, ActionHandler *AH);
void createTryBlockMapEntry(int TryLow, int TryHigh,
ArrayRef<CatchHandler *> Handlers);
void processCallSite(MutableArrayRef<std::unique_ptr<ActionHandler>> Actions,
ImmutableCallSite CS);
void popUnmatchedActions(int FirstMismatch);
void calculateStateNumbers(const Function &F);
void findActionRootLPads(const Function &F);
};
}
void WinEHNumbering::createUnwindMapEntry(int ToState, ActionHandler *AH) {
WinEHUnwindMapEntry UME;
UME.ToState = ToState;
if (auto *CH = dyn_cast_or_null<CleanupHandler>(AH))
UME.Cleanup = cast<Function>(CH->getHandlerBlockOrFunc());
else
UME.Cleanup = nullptr;
FuncInfo.UnwindMap.push_back(UME);
}
void WinEHNumbering::createTryBlockMapEntry(int TryLow, int TryHigh,
ArrayRef<CatchHandler *> Handlers) {
// See if we already have an entry for this set of handlers.
// This is using iterators rather than a range-based for loop because
// if we find the entry we're looking for we'll need the iterator to erase it.
int NumHandlers = Handlers.size();
auto I = FuncInfo.TryBlockMap.begin();
auto E = FuncInfo.TryBlockMap.end();
for ( ; I != E; ++I) {
auto &Entry = *I;
if (Entry.HandlerArray.size() != (size_t)NumHandlers)
continue;
int N;
for (N = 0; N < NumHandlers; ++N) {
if (Entry.HandlerArray[N].Handler != Handlers[N]->getHandlerBlockOrFunc())
break; // breaks out of inner loop
}
// If all the handlers match, this is what we were looking for.
if (N == NumHandlers) {
break;
}
}
// If we found an existing entry for this set of handlers, extend the range
// but move the entry to the end of the map vector. The order of entries
// in the map is critical to the way that the runtime finds handlers.
// FIXME: Depending on what has happened with block ordering, this may
// incorrectly combine entries that should remain separate.
if (I != E) {
// Copy the existing entry.
WinEHTryBlockMapEntry Entry = *I;
Entry.TryLow = std::min(TryLow, Entry.TryLow);
Entry.TryHigh = std::max(TryHigh, Entry.TryHigh);
assert(Entry.TryLow <= Entry.TryHigh);
// Erase the old entry and add this one to the back.
FuncInfo.TryBlockMap.erase(I);
FuncInfo.TryBlockMap.push_back(Entry);
return;
}
// If we didn't find an entry, create a new one.
WinEHTryBlockMapEntry TBME;
TBME.TryLow = TryLow;
TBME.TryHigh = TryHigh;
assert(TBME.TryLow <= TBME.TryHigh);
for (CatchHandler *CH : Handlers) {
WinEHHandlerType HT;
if (CH->getSelector()->isNullValue()) {
HT.Adjectives = 0x40;
HT.TypeDescriptor = nullptr;
} else {
auto *GV = cast<GlobalVariable>(CH->getSelector()->stripPointerCasts());
// Selectors are always pointers to GlobalVariables with 'struct' type.
// The struct has two fields, adjectives and a type descriptor.
auto *CS = cast<ConstantStruct>(GV->getInitializer());
HT.Adjectives =
cast<ConstantInt>(CS->getAggregateElement(0U))->getZExtValue();
HT.TypeDescriptor =
cast<GlobalVariable>(CS->getAggregateElement(1)->stripPointerCasts());
}
HT.Handler = cast<Function>(CH->getHandlerBlockOrFunc());
HT.CatchObjRecoverIdx = CH->getExceptionVarIndex();
TBME.HandlerArray.push_back(HT);
}
FuncInfo.TryBlockMap.push_back(TBME);
}
static void print_name(const Value *V) {
#ifndef NDEBUG
if (!V) {
DEBUG(dbgs() << "null");
return;
}
if (const auto *F = dyn_cast<Function>(V))
DEBUG(dbgs() << F->getName());
else
DEBUG(V->dump());
#endif
}
void WinEHNumbering::processCallSite(
MutableArrayRef<std::unique_ptr<ActionHandler>> Actions,
ImmutableCallSite CS) {
DEBUG(dbgs() << "processCallSite (EH state = " << currentEHNumber()
<< ") for: ");
print_name(CS ? CS.getCalledValue() : nullptr);
DEBUG(dbgs() << '\n');
DEBUG(dbgs() << "HandlerStack: \n");
for (int I = 0, E = HandlerStack.size(); I < E; ++I) {
DEBUG(dbgs() << " ");
print_name(HandlerStack[I]->getHandlerBlockOrFunc());
DEBUG(dbgs() << '\n');
}
DEBUG(dbgs() << "Actions: \n");
for (int I = 0, E = Actions.size(); I < E; ++I) {
DEBUG(dbgs() << " ");
print_name(Actions[I]->getHandlerBlockOrFunc());
DEBUG(dbgs() << '\n');
}
int FirstMismatch = 0;
for (int E = std::min(HandlerStack.size(), Actions.size()); FirstMismatch < E;
++FirstMismatch) {
if (HandlerStack[FirstMismatch]->getHandlerBlockOrFunc() !=
Actions[FirstMismatch]->getHandlerBlockOrFunc())
break;
}
// Remove unmatched actions from the stack and process their EH states.
popUnmatchedActions(FirstMismatch);
DEBUG(dbgs() << "Pushing actions for CallSite: ");
print_name(CS ? CS.getCalledValue() : nullptr);
DEBUG(dbgs() << '\n');
bool LastActionWasCatch = false;
const LandingPadInst *LastRootLPad = nullptr;
for (size_t I = FirstMismatch; I != Actions.size(); ++I) {
// We can reuse eh states when pushing two catches for the same invoke.
bool CurrActionIsCatch = isa<CatchHandler>(Actions[I].get());
auto *Handler = cast<Function>(Actions[I]->getHandlerBlockOrFunc());
// Various conditions can lead to a handler being popped from the
// stack and re-pushed later. That shouldn't create a new state.
// FIXME: Can code optimization lead to re-used handlers?
if (FuncInfo.HandlerEnclosedState.count(Handler)) {
// If we already assigned the state enclosed by this handler re-use it.
Actions[I]->setEHState(FuncInfo.HandlerEnclosedState[Handler]);
continue;
}
const LandingPadInst* RootLPad = FuncInfo.RootLPad[Handler];
if (CurrActionIsCatch && LastActionWasCatch && RootLPad == LastRootLPad) {
DEBUG(dbgs() << "setEHState for handler to " << currentEHNumber() << "\n");
Actions[I]->setEHState(currentEHNumber());
} else {
DEBUG(dbgs() << "createUnwindMapEntry(" << currentEHNumber() << ", ");
print_name(Actions[I]->getHandlerBlockOrFunc());
DEBUG(dbgs() << ") with EH state " << NextState << "\n");
createUnwindMapEntry(currentEHNumber(), Actions[I].get());
DEBUG(dbgs() << "setEHState for handler to " << NextState << "\n");
Actions[I]->setEHState(NextState);
NextState++;
}
HandlerStack.push_back(std::move(Actions[I]));
LastActionWasCatch = CurrActionIsCatch;
LastRootLPad = RootLPad;
}
// This is used to defer numbering states for a handler until after the
// last time it appears in an invoke action list.
if (CS.isInvoke()) {
for (int I = 0, E = HandlerStack.size(); I < E; ++I) {
auto *Handler = cast<Function>(HandlerStack[I]->getHandlerBlockOrFunc());
if (FuncInfo.LastInvoke[Handler] != cast<InvokeInst>(CS.getInstruction()))
continue;
FuncInfo.LastInvokeVisited[Handler] = true;
DEBUG(dbgs() << "Last invoke of ");
print_name(Handler);
DEBUG(dbgs() << " has been visited.\n");
}
}
DEBUG(dbgs() << "In EHState " << currentEHNumber() << " for CallSite: ");
print_name(CS ? CS.getCalledValue() : nullptr);
DEBUG(dbgs() << '\n');
}
void WinEHNumbering::popUnmatchedActions(int FirstMismatch) {
// Don't recurse while we are looping over the handler stack. Instead, defer
// the numbering of the catch handlers until we are done popping.
SmallVector<CatchHandler *, 4> PoppedCatches;
for (int I = HandlerStack.size() - 1; I >= FirstMismatch; --I) {
std::unique_ptr<ActionHandler> Handler = HandlerStack.pop_back_val();
if (isa<CatchHandler>(Handler.get()))
PoppedCatches.push_back(cast<CatchHandler>(Handler.release()));
}
int TryHigh = NextState - 1;
int LastTryLowIdx = 0;
for (int I = 0, E = PoppedCatches.size(); I != E; ++I) {
CatchHandler *CH = PoppedCatches[I];
DEBUG(dbgs() << "Popped handler with state " << CH->getEHState() << "\n");
if (I + 1 == E || CH->getEHState() != PoppedCatches[I + 1]->getEHState()) {
int TryLow = CH->getEHState();
auto Handlers =
makeArrayRef(&PoppedCatches[LastTryLowIdx], I - LastTryLowIdx + 1);
DEBUG(dbgs() << "createTryBlockMapEntry(" << TryLow << ", " << TryHigh);
for (size_t J = 0; J < Handlers.size(); ++J) {
DEBUG(dbgs() << ", ");
print_name(Handlers[J]->getHandlerBlockOrFunc());
}
DEBUG(dbgs() << ")\n");
createTryBlockMapEntry(TryLow, TryHigh, Handlers);
LastTryLowIdx = I + 1;
}
}
for (CatchHandler *CH : PoppedCatches) {
if (auto *F = dyn_cast<Function>(CH->getHandlerBlockOrFunc())) {
if (FuncInfo.LastInvokeVisited[F]) {
DEBUG(dbgs() << "Assigning base state " << NextState << " to ");
print_name(F);
DEBUG(dbgs() << '\n');
FuncInfo.HandlerBaseState[F] = NextState;
DEBUG(dbgs() << "createUnwindMapEntry(" << currentEHNumber()
<< ", null)\n");
createUnwindMapEntry(currentEHNumber(), nullptr);
++NextState;
calculateStateNumbers(*F);
}
else {
DEBUG(dbgs() << "Deferring handling of ");
print_name(F);
DEBUG(dbgs() << " until last invoke visited.\n");
}
}
delete CH;
}
}
void WinEHNumbering::calculateStateNumbers(const Function &F) {
auto I = VisitedHandlers.insert(&F);
if (!I.second)
return; // We've already visited this handler, don't renumber it.
int OldBaseState = CurrentBaseState;
if (FuncInfo.HandlerBaseState.count(&F)) {
CurrentBaseState = FuncInfo.HandlerBaseState[&F];
}
size_t SavedHandlerStackSize = HandlerStack.size();
DEBUG(dbgs() << "Calculating state numbers for: " << F.getName() << '\n');
SmallVector<std::unique_ptr<ActionHandler>, 4> ActionList;
for (const BasicBlock &BB : F) {
for (const Instruction &I : BB) {
const auto *CI = dyn_cast<CallInst>(&I);
if (!CI || CI->doesNotThrow())
continue;
processCallSite(None, CI);
}
const auto *II = dyn_cast<InvokeInst>(BB.getTerminator());
if (!II)
continue;
const LandingPadInst *LPI = II->getLandingPadInst();
auto *ActionsCall = dyn_cast<IntrinsicInst>(LPI->getNextNode());
if (!ActionsCall)
continue;
parseEHActions(ActionsCall, ActionList);
if (ActionList.empty())
continue;
processCallSite(ActionList, II);
ActionList.clear();
FuncInfo.LandingPadStateMap[LPI] = currentEHNumber();
DEBUG(dbgs() << "Assigning state " << currentEHNumber()
<< " to landing pad at " << LPI->getParent()->getName()
<< '\n');
}
// Pop any actions that were pushed on the stack for this function.
popUnmatchedActions(SavedHandlerStackSize);
DEBUG(dbgs() << "Assigning max state " << NextState - 1
<< " to " << F.getName() << '\n');
FuncInfo.CatchHandlerMaxState[&F] = NextState - 1;
CurrentBaseState = OldBaseState;
}
// This function follows the same basic traversal as calculateStateNumbers
// but it is necessary to identify the root landing pad associated
// with each action before we start assigning state numbers.
void WinEHNumbering::findActionRootLPads(const Function &F) {
auto I = VisitedHandlers.insert(&F);
if (!I.second)
return; // We've already visited this handler, don't revisit it.
SmallVector<std::unique_ptr<ActionHandler>, 4> ActionList;
for (const BasicBlock &BB : F) {
const auto *II = dyn_cast<InvokeInst>(BB.getTerminator());
if (!II)
continue;
const LandingPadInst *LPI = II->getLandingPadInst();
auto *ActionsCall = dyn_cast<IntrinsicInst>(LPI->getNextNode());
if (!ActionsCall)
continue;
assert(ActionsCall->getIntrinsicID() == Intrinsic::eh_actions);
parseEHActions(ActionsCall, ActionList);
if (ActionList.empty())
continue;
for (int I = 0, E = ActionList.size(); I < E; ++I) {
if (auto *Handler
= dyn_cast<Function>(ActionList[I]->getHandlerBlockOrFunc())) {
FuncInfo.LastInvoke[Handler] = II;
// Don't replace the root landing pad if we previously saw this
// handler in a different function.
if (FuncInfo.RootLPad.count(Handler) &&
FuncInfo.RootLPad[Handler]->getParent()->getParent() != &F)
continue;
DEBUG(dbgs() << "Setting root lpad for ");
print_name(Handler);
DEBUG(dbgs() << " to " << LPI->getParent()->getName() << '\n');
FuncInfo.RootLPad[Handler] = LPI;
}
}
// Walk the actions again and look for nested handlers. This has to
// happen after all of the actions have been processed in the current
// function.
for (int I = 0, E = ActionList.size(); I < E; ++I)
if (auto *Handler
= dyn_cast<Function>(ActionList[I]->getHandlerBlockOrFunc()))
findActionRootLPads(*Handler);
ActionList.clear();
}
}
void llvm::calculateWinCXXEHStateNumbers(const Function *ParentFn,
WinEHFuncInfo &FuncInfo) {
// Return if it's already been done.
if (!FuncInfo.LandingPadStateMap.empty())
return;
WinEHNumbering Num(FuncInfo);
Num.findActionRootLPads(*ParentFn);
// The VisitedHandlers list is used by both findActionRootLPads and
// calculateStateNumbers, but both functions need to visit all handlers.
Num.VisitedHandlers.clear();
Num.calculateStateNumbers(*ParentFn);
// Pop everything on the handler stack.
// It may be necessary to call this more than once because a handler can
// be pushed on the stack as a result of clearing the stack.
while (!Num.HandlerStack.empty())
Num.processCallSite(None, ImmutableCallSite());
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/StackSlotColoring.cpp | //===-- StackSlotColoring.cpp - Stack slot coloring pass. -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the stack slot coloring pass.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "stackslotcoloring"
static cl::opt<bool>
DisableSharing("no-stack-slot-sharing",
cl::init(false), cl::Hidden,
cl::desc("Suppress slot sharing during stack coloring"));
static cl::opt<int> DCELimit("ssc-dce-limit", cl::init(-1), cl::Hidden);
STATISTIC(NumEliminated, "Number of stack slots eliminated due to coloring");
STATISTIC(NumDead, "Number of trivially dead stack accesses eliminated");
namespace {
class StackSlotColoring : public MachineFunctionPass {
LiveStacks* LS;
MachineFrameInfo *MFI;
const TargetInstrInfo *TII;
const MachineBlockFrequencyInfo *MBFI;
// SSIntervals - Spill slot intervals.
std::vector<LiveInterval*> SSIntervals;
// SSRefs - Keep a list of MachineMemOperands for each spill slot.
// MachineMemOperands can be shared between instructions, so we need
// to be careful that renames like [FI0, FI1] -> [FI1, FI2] do not
// become FI0 -> FI1 -> FI2.
SmallVector<SmallVector<MachineMemOperand *, 8>, 16> SSRefs;
// OrigAlignments - Alignments of stack objects before coloring.
SmallVector<unsigned, 16> OrigAlignments;
// OrigSizes - Sizess of stack objects before coloring.
SmallVector<unsigned, 16> OrigSizes;
// AllColors - If index is set, it's a spill slot, i.e. color.
// FIXME: This assumes PEI locate spill slot with smaller indices
// closest to stack pointer / frame pointer. Therefore, smaller
// index == better color.
BitVector AllColors;
// NextColor - Next "color" that's not yet used.
int NextColor;
// UsedColors - "Colors" that have been assigned.
BitVector UsedColors;
// Assignments - Color to intervals mapping.
SmallVector<SmallVector<LiveInterval*,4>, 16> Assignments;
public:
static char ID; // Pass identification
StackSlotColoring() :
MachineFunctionPass(ID), NextColor(-1) {
initializeStackSlotColoringPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveStacks>();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addPreserved<MachineBlockFrequencyInfo>();
AU.addPreservedID(MachineDominatorsID);
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override;
private:
void InitializeSlots();
void ScanForSpillSlotRefs(MachineFunction &MF);
bool OverlapWithAssignments(LiveInterval *li, int Color) const;
int ColorSlot(LiveInterval *li);
bool ColorSlots(MachineFunction &MF);
void RewriteInstruction(MachineInstr *MI, SmallVectorImpl<int> &SlotMapping,
MachineFunction &MF);
bool RemoveDeadStores(MachineBasicBlock* MBB);
};
} // end anonymous namespace
char StackSlotColoring::ID = 0;
char &llvm::StackSlotColoringID = StackSlotColoring::ID;
INITIALIZE_PASS_BEGIN(StackSlotColoring, "stack-slot-coloring",
"Stack Slot Coloring", false, false)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveStacks)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(StackSlotColoring, "stack-slot-coloring",
"Stack Slot Coloring", false, false)
namespace {
// IntervalSorter - Comparison predicate that sort live intervals by
// their weight.
struct IntervalSorter {
bool operator()(LiveInterval* LHS, LiveInterval* RHS) const {
return LHS->weight > RHS->weight;
}
};
}
/// ScanForSpillSlotRefs - Scan all the machine instructions for spill slot
/// references and update spill slot weights.
void StackSlotColoring::ScanForSpillSlotRefs(MachineFunction &MF) {
SSRefs.resize(MFI->getObjectIndexEnd());
// FIXME: Need the equivalent of MachineRegisterInfo for frameindex operands.
for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = &*MBBI;
for (MachineBasicBlock::iterator MII = MBB->begin(), EE = MBB->end();
MII != EE; ++MII) {
MachineInstr *MI = &*MII;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isFI())
continue;
int FI = MO.getIndex();
if (FI < 0)
continue;
if (!LS->hasInterval(FI))
continue;
LiveInterval &li = LS->getInterval(FI);
if (!MI->isDebugValue())
li.weight += LiveIntervals::getSpillWeight(false, true, MBFI, MI);
}
for (MachineInstr::mmo_iterator MMOI = MI->memoperands_begin(),
EE = MI->memoperands_end(); MMOI != EE; ++MMOI) {
MachineMemOperand *MMO = *MMOI;
if (const FixedStackPseudoSourceValue *FSV =
dyn_cast_or_null<FixedStackPseudoSourceValue>(
MMO->getPseudoValue())) {
int FI = FSV->getFrameIndex();
if (FI >= 0)
SSRefs[FI].push_back(MMO);
}
}
}
}
}
/// InitializeSlots - Process all spill stack slot liveintervals and add them
/// to a sorted (by weight) list.
void StackSlotColoring::InitializeSlots() {
int LastFI = MFI->getObjectIndexEnd();
OrigAlignments.resize(LastFI);
OrigSizes.resize(LastFI);
AllColors.resize(LastFI);
UsedColors.resize(LastFI);
Assignments.resize(LastFI);
typedef std::iterator_traits<LiveStacks::iterator>::value_type Pair;
SmallVector<Pair *, 16> Intervals;
Intervals.reserve(LS->getNumIntervals());
for (auto &I : *LS)
Intervals.push_back(&I);
std::sort(Intervals.begin(), Intervals.end(),
[](Pair *LHS, Pair *RHS) { return LHS->first < RHS->first; });
// Gather all spill slots into a list.
DEBUG(dbgs() << "Spill slot intervals:\n");
for (auto *I : Intervals) {
LiveInterval &li = I->second;
DEBUG(li.dump());
int FI = TargetRegisterInfo::stackSlot2Index(li.reg);
if (MFI->isDeadObjectIndex(FI))
continue;
SSIntervals.push_back(&li);
OrigAlignments[FI] = MFI->getObjectAlignment(FI);
OrigSizes[FI] = MFI->getObjectSize(FI);
AllColors.set(FI);
}
DEBUG(dbgs() << '\n');
// Sort them by weight.
std::stable_sort(SSIntervals.begin(), SSIntervals.end(), IntervalSorter());
// Get first "color".
NextColor = AllColors.find_first();
}
/// OverlapWithAssignments - Return true if LiveInterval overlaps with any
/// LiveIntervals that have already been assigned to the specified color.
bool
StackSlotColoring::OverlapWithAssignments(LiveInterval *li, int Color) const {
const SmallVectorImpl<LiveInterval *> &OtherLIs = Assignments[Color];
for (unsigned i = 0, e = OtherLIs.size(); i != e; ++i) {
LiveInterval *OtherLI = OtherLIs[i];
if (OtherLI->overlaps(*li))
return true;
}
return false;
}
/// ColorSlot - Assign a "color" (stack slot) to the specified stack slot.
///
int StackSlotColoring::ColorSlot(LiveInterval *li) {
int Color = -1;
bool Share = false;
if (!DisableSharing) {
// Check if it's possible to reuse any of the used colors.
Color = UsedColors.find_first();
while (Color != -1) {
if (!OverlapWithAssignments(li, Color)) {
Share = true;
++NumEliminated;
break;
}
Color = UsedColors.find_next(Color);
}
}
// Assign it to the first available color (assumed to be the best) if it's
// not possible to share a used color with other objects.
if (!Share) {
assert(NextColor != -1 && "No more spill slots?");
Color = NextColor;
UsedColors.set(Color);
NextColor = AllColors.find_next(NextColor);
}
// Record the assignment.
Assignments[Color].push_back(li);
int FI = TargetRegisterInfo::stackSlot2Index(li->reg);
DEBUG(dbgs() << "Assigning fi#" << FI << " to fi#" << Color << "\n");
// Change size and alignment of the allocated slot. If there are multiple
// objects sharing the same slot, then make sure the size and alignment
// are large enough for all.
unsigned Align = OrigAlignments[FI];
if (!Share || Align > MFI->getObjectAlignment(Color))
MFI->setObjectAlignment(Color, Align);
int64_t Size = OrigSizes[FI];
if (!Share || Size > MFI->getObjectSize(Color))
MFI->setObjectSize(Color, Size);
return Color;
}
/// Colorslots - Color all spill stack slots and rewrite all frameindex machine
/// operands in the function.
bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
unsigned NumObjs = MFI->getObjectIndexEnd();
SmallVector<int, 16> SlotMapping(NumObjs, -1);
SmallVector<float, 16> SlotWeights(NumObjs, 0.0);
SmallVector<SmallVector<int, 4>, 16> RevMap(NumObjs);
BitVector UsedColors(NumObjs);
DEBUG(dbgs() << "Color spill slot intervals:\n");
bool Changed = false;
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
int NewSS = ColorSlot(li);
assert(NewSS >= 0 && "Stack coloring failed?");
SlotMapping[SS] = NewSS;
RevMap[NewSS].push_back(SS);
SlotWeights[NewSS] += li->weight;
UsedColors.set(NewSS);
Changed |= (SS != NewSS);
}
DEBUG(dbgs() << "\nSpill slots after coloring:\n");
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
li->weight = SlotWeights[SS];
}
// Sort them by new weight.
std::stable_sort(SSIntervals.begin(), SSIntervals.end(), IntervalSorter());
#ifndef NDEBUG
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i)
DEBUG(SSIntervals[i]->dump());
DEBUG(dbgs() << '\n');
#endif
if (!Changed)
return false;
// Rewrite all MachineMemOperands.
for (unsigned SS = 0, SE = SSRefs.size(); SS != SE; ++SS) {
int NewFI = SlotMapping[SS];
if (NewFI == -1 || (NewFI == (int)SS))
continue;
const PseudoSourceValue *NewSV = PseudoSourceValue::getFixedStack(NewFI);
SmallVectorImpl<MachineMemOperand *> &RefMMOs = SSRefs[SS];
for (unsigned i = 0, e = RefMMOs.size(); i != e; ++i)
RefMMOs[i]->setValue(NewSV);
}
// Rewrite all MO_FrameIndex operands. Look for dead stores.
for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = &*MBBI;
for (MachineBasicBlock::iterator MII = MBB->begin(), EE = MBB->end();
MII != EE; ++MII)
RewriteInstruction(MII, SlotMapping, MF);
RemoveDeadStores(MBB);
}
// Delete unused stack slots.
while (NextColor != -1) {
DEBUG(dbgs() << "Removing unused stack object fi#" << NextColor << "\n");
MFI->RemoveStackObject(NextColor);
NextColor = AllColors.find_next(NextColor);
}
return true;
}
/// RewriteInstruction - Rewrite specified instruction by replacing references
/// to old frame index with new one.
void StackSlotColoring::RewriteInstruction(MachineInstr *MI,
SmallVectorImpl<int> &SlotMapping,
MachineFunction &MF) {
// Update the operands.
for (unsigned i = 0, ee = MI->getNumOperands(); i != ee; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isFI())
continue;
int OldFI = MO.getIndex();
if (OldFI < 0)
continue;
int NewFI = SlotMapping[OldFI];
if (NewFI == -1 || NewFI == OldFI)
continue;
MO.setIndex(NewFI);
}
// The MachineMemOperands have already been updated.
}
/// RemoveDeadStores - Scan through a basic block and look for loads followed
/// by stores. If they're both using the same stack slot, then the store is
/// definitely dead. This could obviously be much more aggressive (consider
/// pairs with instructions between them), but such extensions might have a
/// considerable compile time impact.
bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
// FIXME: This could be much more aggressive, but we need to investigate
// the compile time impact of doing so.
bool changed = false;
SmallVector<MachineInstr*, 4> toErase;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ++I) {
if (DCELimit != -1 && (int)NumDead >= DCELimit)
break;
int FirstSS, SecondSS;
if (TII->isStackSlotCopy(I, FirstSS, SecondSS) &&
FirstSS == SecondSS &&
FirstSS != -1) {
++NumDead;
changed = true;
toErase.push_back(I);
continue;
}
MachineBasicBlock::iterator NextMI = std::next(I);
if (NextMI == MBB->end()) continue;
unsigned LoadReg = 0;
unsigned StoreReg = 0;
if (!(LoadReg = TII->isLoadFromStackSlot(I, FirstSS))) continue;
if (!(StoreReg = TII->isStoreToStackSlot(NextMI, SecondSS))) continue;
if (FirstSS != SecondSS || LoadReg != StoreReg || FirstSS == -1) continue;
++NumDead;
changed = true;
if (NextMI->findRegisterUseOperandIdx(LoadReg, true, nullptr) != -1) {
++NumDead;
toErase.push_back(I);
}
toErase.push_back(NextMI);
++I;
}
for (SmallVectorImpl<MachineInstr *>::iterator I = toErase.begin(),
E = toErase.end(); I != E; ++I)
(*I)->eraseFromParent();
return changed;
}
bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
DEBUG({
dbgs() << "********** Stack Slot Coloring **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
MFI = MF.getFrameInfo();
TII = MF.getSubtarget().getInstrInfo();
LS = &getAnalysis<LiveStacks>();
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
bool Changed = false;
unsigned NumSlots = LS->getNumIntervals();
if (NumSlots == 0)
// Nothing to do!
return false;
// If there are calls to setjmp or sigsetjmp, don't perform stack slot
// coloring. The stack could be modified before the longjmp is executed,
// resulting in the wrong value being used afterwards. (See
// <rdar://problem/8007500>.)
if (MF.exposesReturnsTwice())
return false;
// Gather spill slot references
ScanForSpillSlotRefs(MF);
InitializeSlots();
Changed = ColorSlots(MF);
NextColor = -1;
SSIntervals.clear();
for (unsigned i = 0, e = SSRefs.size(); i != e; ++i)
SSRefs[i].clear();
SSRefs.clear();
OrigAlignments.clear();
OrigSizes.clear();
AllColors.clear();
UsedColors.clear();
for (unsigned i = 0, e = Assignments.size(); i != e; ++i)
Assignments[i].clear();
Assignments.clear();
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveIntervalUnion.cpp | //===-- LiveIntervalUnion.cpp - Live interval union data structure --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// LiveIntervalUnion represents a coalesced set of live intervals. This may be
// used during coalescing to represent a congruence class, or during register
// allocation to model liveness of a physical register.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveIntervalUnion.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
// Merge a LiveInterval's segments. Guarantee no overlaps.
void LiveIntervalUnion::unify(LiveInterval &VirtReg, const LiveRange &Range) {
if (Range.empty())
return;
++Tag;
// Insert each of the virtual register's live segments into the map.
LiveRange::const_iterator RegPos = Range.begin();
LiveRange::const_iterator RegEnd = Range.end();
SegmentIter SegPos = Segments.find(RegPos->start);
while (SegPos.valid()) {
SegPos.insert(RegPos->start, RegPos->end, &VirtReg);
if (++RegPos == RegEnd)
return;
SegPos.advanceTo(RegPos->start);
}
// We have reached the end of Segments, so it is no longer necessary to search
// for the insertion position.
// It is faster to insert the end first.
--RegEnd;
SegPos.insert(RegEnd->start, RegEnd->end, &VirtReg);
for (; RegPos != RegEnd; ++RegPos, ++SegPos)
SegPos.insert(RegPos->start, RegPos->end, &VirtReg);
}
// Remove a live virtual register's segments from this union.
void LiveIntervalUnion::extract(LiveInterval &VirtReg, const LiveRange &Range) {
if (Range.empty())
return;
++Tag;
// Remove each of the virtual register's live segments from the map.
LiveRange::const_iterator RegPos = Range.begin();
LiveRange::const_iterator RegEnd = Range.end();
SegmentIter SegPos = Segments.find(RegPos->start);
for (;;) {
assert(SegPos.value() == &VirtReg && "Inconsistent LiveInterval");
SegPos.erase();
if (!SegPos.valid())
return;
// Skip all segments that may have been coalesced.
RegPos = Range.advanceTo(RegPos, SegPos.start());
if (RegPos == RegEnd)
return;
SegPos.advanceTo(RegPos->start);
}
}
void
LiveIntervalUnion::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
if (empty()) {
OS << " empty\n";
return;
}
for (LiveSegments::const_iterator SI = Segments.begin(); SI.valid(); ++SI) {
OS << " [" << SI.start() << ' ' << SI.stop() << "):"
<< PrintReg(SI.value()->reg, TRI);
}
OS << '\n';
}
#ifndef NDEBUG
// Verify the live intervals in this union and add them to the visited set.
void LiveIntervalUnion::verify(LiveVirtRegBitSet& VisitedVRegs) {
for (SegmentIter SI = Segments.begin(); SI.valid(); ++SI)
VisitedVRegs.set(SI.value()->reg);
}
#endif //!NDEBUG
// Scan the vector of interfering virtual registers in this union. Assume it's
// quite small.
bool LiveIntervalUnion::Query::isSeenInterference(LiveInterval *VirtReg) const {
SmallVectorImpl<LiveInterval*>::const_iterator I =
std::find(InterferingVRegs.begin(), InterferingVRegs.end(), VirtReg);
return I != InterferingVRegs.end();
}
// Collect virtual registers in this union that interfere with this
// query's live virtual register.
//
// The query state is one of:
//
// 1. CheckedFirstInterference == false: Iterators are uninitialized.
// 2. SeenAllInterferences == true: InterferingVRegs complete, iterators unused.
// 3. Iterators left at the last seen intersection.
//
unsigned LiveIntervalUnion::Query::
collectInterferingVRegs(unsigned MaxInterferingRegs) {
// Fast path return if we already have the desired information.
if (SeenAllInterferences || InterferingVRegs.size() >= MaxInterferingRegs)
return InterferingVRegs.size();
// Set up iterators on the first call.
if (!CheckedFirstInterference) {
CheckedFirstInterference = true;
// Quickly skip interference check for empty sets.
if (VirtReg->empty() || LiveUnion->empty()) {
SeenAllInterferences = true;
return 0;
}
// In most cases, the union will start before VirtReg.
VirtRegI = VirtReg->begin();
LiveUnionI.setMap(LiveUnion->getMap());
LiveUnionI.find(VirtRegI->start);
}
LiveInterval::iterator VirtRegEnd = VirtReg->end();
LiveInterval *RecentReg = nullptr;
while (LiveUnionI.valid()) {
assert(VirtRegI != VirtRegEnd && "Reached end of VirtReg");
// Check for overlapping interference.
while (VirtRegI->start < LiveUnionI.stop() &&
VirtRegI->end > LiveUnionI.start()) {
// This is an overlap, record the interfering register.
LiveInterval *VReg = LiveUnionI.value();
if (VReg != RecentReg && !isSeenInterference(VReg)) {
RecentReg = VReg;
InterferingVRegs.push_back(VReg);
if (InterferingVRegs.size() >= MaxInterferingRegs)
return InterferingVRegs.size();
}
// This LiveUnion segment is no longer interesting.
if (!(++LiveUnionI).valid()) {
SeenAllInterferences = true;
return InterferingVRegs.size();
}
}
// The iterators are now not overlapping, LiveUnionI has been advanced
// beyond VirtRegI.
assert(VirtRegI->end <= LiveUnionI.start() && "Expected non-overlap");
// Advance the iterator that ends first.
VirtRegI = VirtReg->advanceTo(VirtRegI, LiveUnionI.start());
if (VirtRegI == VirtRegEnd)
break;
// Detect overlap, handle above.
if (VirtRegI->start < LiveUnionI.stop())
continue;
// Still not overlapping. Catch up LiveUnionI.
LiveUnionI.advanceTo(VirtRegI->start);
}
SeenAllInterferences = true;
return InterferingVRegs.size();
}
void LiveIntervalUnion::Array::init(LiveIntervalUnion::Allocator &Alloc,
unsigned NSize) {
// Reuse existing allocation.
if (NSize == Size)
return;
clear();
Size = NSize;
LIUs = static_cast<LiveIntervalUnion*>(
new char[sizeof(LiveIntervalUnion)*NSize]); // HLSL Change: Use overridable operator new
for (unsigned i = 0; i != Size; ++i)
new(LIUs + i) LiveIntervalUnion(Alloc);
}
void LiveIntervalUnion::Array::clear() {
if (!LIUs)
return;
for (unsigned i = 0; i != Size; ++i)
LIUs[i].~LiveIntervalUnion();
delete[] static_cast<char*>(LIUs); // HLSL Change: Use overridable operator delete
Size = 0;
LIUs = nullptr;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/OptimizePHIs.cpp | //===-- OptimizePHIs.cpp - Optimize machine instruction PHIs --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass optimizes machine instruction PHIs to take advantage of
// opportunities created during DAG legalization.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "phi-opt"
STATISTIC(NumPHICycles, "Number of PHI cycles replaced");
STATISTIC(NumDeadPHICycles, "Number of dead PHI cycles");
namespace {
class OptimizePHIs : public MachineFunctionPass {
MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
public:
static char ID; // Pass identification
OptimizePHIs() : MachineFunctionPass(ID) {
initializeOptimizePHIsPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
private:
typedef SmallPtrSet<MachineInstr*, 16> InstrSet;
typedef SmallPtrSetIterator<MachineInstr*> InstrSetIterator;
bool IsSingleValuePHICycle(MachineInstr *MI, unsigned &SingleValReg,
InstrSet &PHIsInCycle);
bool IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle);
bool OptimizeBB(MachineBasicBlock &MBB);
};
}
char OptimizePHIs::ID = 0;
char &llvm::OptimizePHIsID = OptimizePHIs::ID;
INITIALIZE_PASS(OptimizePHIs, "opt-phis",
"Optimize machine instruction PHIs", false, false)
bool OptimizePHIs::runOnMachineFunction(MachineFunction &Fn) {
if (skipOptnoneFunction(*Fn.getFunction()))
return false;
MRI = &Fn.getRegInfo();
TII = Fn.getSubtarget().getInstrInfo();
// Find dead PHI cycles and PHI cycles that can be replaced by a single
// value. InstCombine does these optimizations, but DAG legalization may
// introduce new opportunities, e.g., when i64 values are split up for
// 32-bit targets.
bool Changed = false;
for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
Changed |= OptimizeBB(*I);
return Changed;
}
/// IsSingleValuePHICycle - Check if MI is a PHI where all the source operands
/// are copies of SingleValReg, possibly via copies through other PHIs. If
/// SingleValReg is zero on entry, it is set to the register with the single
/// non-copy value. PHIsInCycle is a set used to keep track of the PHIs that
/// have been scanned.
bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
unsigned &SingleValReg,
InstrSet &PHIsInCycle) {
assert(MI->isPHI() && "IsSingleValuePHICycle expects a PHI instruction");
unsigned DstReg = MI->getOperand(0).getReg();
// See if we already saw this register.
if (!PHIsInCycle.insert(MI).second)
return true;
// Don't scan crazily complex things.
if (PHIsInCycle.size() == 16)
return false;
// Scan the PHI operands.
for (unsigned i = 1; i != MI->getNumOperands(); i += 2) {
unsigned SrcReg = MI->getOperand(i).getReg();
if (SrcReg == DstReg)
continue;
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
// Skip over register-to-register moves.
if (SrcMI && SrcMI->isCopy() &&
!SrcMI->getOperand(0).getSubReg() &&
!SrcMI->getOperand(1).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(SrcMI->getOperand(1).getReg()))
SrcMI = MRI->getVRegDef(SrcMI->getOperand(1).getReg());
if (!SrcMI)
return false;
if (SrcMI->isPHI()) {
if (!IsSingleValuePHICycle(SrcMI, SingleValReg, PHIsInCycle))
return false;
} else {
// Fail if there is more than one non-phi/non-move register.
if (SingleValReg != 0)
return false;
SingleValReg = SrcReg;
}
}
return true;
}
/// IsDeadPHICycle - Check if the register defined by a PHI is only used by
/// other PHIs in a cycle.
bool OptimizePHIs::IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle) {
assert(MI->isPHI() && "IsDeadPHICycle expects a PHI instruction");
unsigned DstReg = MI->getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(DstReg) &&
"PHI destination is not a virtual register");
// See if we already saw this register.
if (!PHIsInCycle.insert(MI).second)
return true;
// Don't scan crazily complex things.
if (PHIsInCycle.size() == 16)
return false;
for (MachineInstr &UseMI : MRI->use_instructions(DstReg)) {
if (!UseMI.isPHI() || !IsDeadPHICycle(&UseMI, PHIsInCycle))
return false;
}
return true;
}
/// OptimizeBB - Remove dead PHI cycles and PHI cycles that can be replaced by
/// a single value.
bool OptimizePHIs::OptimizeBB(MachineBasicBlock &MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator
MII = MBB.begin(), E = MBB.end(); MII != E; ) {
MachineInstr *MI = &*MII++;
if (!MI->isPHI())
break;
// Check for single-value PHI cycles.
unsigned SingleValReg = 0;
InstrSet PHIsInCycle;
if (IsSingleValuePHICycle(MI, SingleValReg, PHIsInCycle) &&
SingleValReg != 0) {
unsigned OldReg = MI->getOperand(0).getReg();
if (!MRI->constrainRegClass(SingleValReg, MRI->getRegClass(OldReg)))
continue;
MRI->replaceRegWith(OldReg, SingleValReg);
MI->eraseFromParent();
++NumPHICycles;
Changed = true;
continue;
}
// Check for dead PHI cycles.
PHIsInCycle.clear();
if (IsDeadPHICycle(MI, PHIsInCycle)) {
for (InstrSetIterator PI = PHIsInCycle.begin(), PE = PHIsInCycle.end();
PI != PE; ++PI) {
MachineInstr *PhiMI = *PI;
if (&*MII == PhiMI)
++MII;
PhiMI->eraseFromParent();
}
++NumDeadPHICycles;
Changed = true;
}
}
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ScoreboardHazardRecognizer.cpp | //===----- ScoreboardHazardRecognizer.cpp - Scheduler Support -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the ScoreboardHazardRecognizer class, which
// encapsultes hazard-avoidance heuristics for scheduling, based on the
// scheduling itineraries specified for the target.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
#define DEBUG_TYPE ::llvm::ScoreboardHazardRecognizer::DebugType
#ifndef NDEBUG
const char *ScoreboardHazardRecognizer::DebugType = "";
#endif
ScoreboardHazardRecognizer::
ScoreboardHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *SchedDAG,
const char *ParentDebugType) :
ScheduleHazardRecognizer(), ItinData(II), DAG(SchedDAG), IssueWidth(0),
IssueCount(0) {
#ifndef NDEBUG
DebugType = ParentDebugType;
#endif
// Determine the maximum depth of any itinerary. This determines the depth of
// the scoreboard. We always make the scoreboard at least 1 cycle deep to
// avoid dealing with the boundary condition.
unsigned ScoreboardDepth = 1;
if (ItinData && !ItinData->isEmpty()) {
for (unsigned idx = 0; ; ++idx) {
if (ItinData->isEndMarker(idx))
break;
const InstrStage *IS = ItinData->beginStage(idx);
const InstrStage *E = ItinData->endStage(idx);
unsigned CurCycle = 0;
unsigned ItinDepth = 0;
for (; IS != E; ++IS) {
unsigned StageDepth = CurCycle + IS->getCycles();
if (ItinDepth < StageDepth) ItinDepth = StageDepth;
CurCycle += IS->getNextCycles();
}
// Find the next power-of-2 >= ItinDepth
while (ItinDepth > ScoreboardDepth) {
ScoreboardDepth *= 2;
// Don't set MaxLookAhead until we find at least one nonzero stage.
// This way, an itinerary with no stages has MaxLookAhead==0, which
// completely bypasses the scoreboard hazard logic.
MaxLookAhead = ScoreboardDepth;
}
}
}
ReservedScoreboard.reset(ScoreboardDepth);
RequiredScoreboard.reset(ScoreboardDepth);
// If MaxLookAhead is not set above, then we are not enabled.
if (!isEnabled())
DEBUG(dbgs() << "Disabled scoreboard hazard recognizer\n");
else {
// A nonempty itinerary must have a SchedModel.
IssueWidth = ItinData->SchedModel.IssueWidth;
DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
<< ScoreboardDepth << '\n');
}
}
void ScoreboardHazardRecognizer::Reset() {
IssueCount = 0;
RequiredScoreboard.reset();
ReservedScoreboard.reset();
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ScoreboardHazardRecognizer::Scoreboard::dump() const {
dbgs() << "Scoreboard:\n";
unsigned last = Depth - 1;
while ((last > 0) && ((*this)[last] == 0))
last--;
for (unsigned i = 0; i <= last; i++) {
unsigned FUs = (*this)[i];
dbgs() << "\t";
for (int j = 31; j >= 0; j--)
dbgs() << ((FUs & (1 << j)) ? '1' : '0');
dbgs() << '\n';
}
}
#endif
bool ScoreboardHazardRecognizer::atIssueLimit() const {
if (IssueWidth == 0)
return false;
return IssueCount == IssueWidth;
}
ScheduleHazardRecognizer::HazardType
ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
if (!ItinData || ItinData->isEmpty())
return NoHazard;
// Note that stalls will be negative for bottom-up scheduling.
int cycle = Stalls;
// Use the itinerary for the underlying instruction to check for
// free FU's in the scoreboard at the appropriate future cycles.
const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
if (!MCID) {
// Don't check hazards for non-machineinstr Nodes.
return NoHazard;
}
unsigned idx = MCID->getSchedClass();
for (const InstrStage *IS = ItinData->beginStage(idx),
*E = ItinData->endStage(idx); IS != E; ++IS) {
// We must find one of the stage's units free for every cycle the
// stage is occupied. FIXME it would be more accurate to find the
// same unit free in all the cycles.
for (unsigned int i = 0; i < IS->getCycles(); ++i) {
int StageCycle = cycle + (int)i;
if (StageCycle < 0)
continue;
if (StageCycle >= (int)RequiredScoreboard.getDepth()) {
assert((StageCycle - Stalls) < (int)RequiredScoreboard.getDepth() &&
"Scoreboard depth exceeded!");
// This stage was stalled beyond pipeline depth, so cannot conflict.
break;
}
unsigned freeUnits = IS->getUnits();
switch (IS->getReservationKind()) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[StageCycle];
// FALLTHROUGH
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[StageCycle];
break;
}
if (!freeUnits) {
DEBUG(dbgs() << "*** Hazard in cycle +" << StageCycle << ", ");
DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
DEBUG(DAG->dumpNode(SU));
return Hazard;
}
}
// Advance the cycle to the next stage.
cycle += IS->getNextCycles();
}
return NoHazard;
}
void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
if (!ItinData || ItinData->isEmpty())
return;
// Use the itinerary for the underlying instruction to reserve FU's
// in the scoreboard at the appropriate future cycles.
const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
assert(MCID && "The scheduler must filter non-machineinstrs");
if (DAG->TII->isZeroCost(MCID->Opcode))
return;
++IssueCount;
unsigned cycle = 0;
unsigned idx = MCID->getSchedClass();
for (const InstrStage *IS = ItinData->beginStage(idx),
*E = ItinData->endStage(idx); IS != E; ++IS) {
// We must reserve one of the stage's units for every cycle the
// stage is occupied. FIXME it would be more accurate to reserve
// the same unit free in all the cycles.
for (unsigned int i = 0; i < IS->getCycles(); ++i) {
assert(((cycle + i) < RequiredScoreboard.getDepth()) &&
"Scoreboard depth exceeded!");
unsigned freeUnits = IS->getUnits();
switch (IS->getReservationKind()) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[cycle + i];
// FALLTHROUGH
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[cycle + i];
break;
}
// reduce to a single unit
unsigned freeUnit = 0;
do {
freeUnit = freeUnits;
freeUnits = freeUnit & (freeUnit - 1);
} while (freeUnits);
if (IS->getReservationKind() == InstrStage::Required)
RequiredScoreboard[cycle + i] |= freeUnit;
else
ReservedScoreboard[cycle + i] |= freeUnit;
}
// Advance the cycle to the next stage.
cycle += IS->getNextCycles();
}
DEBUG(ReservedScoreboard.dump());
DEBUG(RequiredScoreboard.dump());
}
void ScoreboardHazardRecognizer::AdvanceCycle() {
IssueCount = 0;
ReservedScoreboard[0] = 0; ReservedScoreboard.advance();
RequiredScoreboard[0] = 0; RequiredScoreboard.advance();
}
void ScoreboardHazardRecognizer::RecedeCycle() {
IssueCount = 0;
ReservedScoreboard[ReservedScoreboard.getDepth()-1] = 0;
ReservedScoreboard.recede();
RequiredScoreboard[RequiredScoreboard.getDepth()-1] = 0;
RequiredScoreboard.recede();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ProcessImplicitDefs.cpp | //===---------------------- ProcessImplicitDefs.cpp -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "processimplicitdefs"
namespace {
/// Process IMPLICIT_DEF instructions and make sure there is one implicit_def
/// for each use. Add isUndef marker to implicit_def defs and their uses.
class ProcessImplicitDefs : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
SmallSetVector<MachineInstr*, 16> WorkList;
void processImplicitDef(MachineInstr *MI);
bool canTurnIntoImplicitDef(MachineInstr *MI);
public:
static char ID;
ProcessImplicitDefs() : MachineFunctionPass(ID) {
initializeProcessImplicitDefsPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &au) const override;
bool runOnMachineFunction(MachineFunction &fn) override;
};
} // end anonymous namespace
char ProcessImplicitDefs::ID = 0;
char &llvm::ProcessImplicitDefsID = ProcessImplicitDefs::ID;
INITIALIZE_PASS_BEGIN(ProcessImplicitDefs, "processimpdefs",
"Process Implicit Definitions", false, false)
INITIALIZE_PASS_END(ProcessImplicitDefs, "processimpdefs",
"Process Implicit Definitions", false, false)
void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addPreserved<AliasAnalysis>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool ProcessImplicitDefs::canTurnIntoImplicitDef(MachineInstr *MI) {
if (!MI->isCopyLike() &&
!MI->isInsertSubreg() &&
!MI->isRegSequence() &&
!MI->isPHI())
return false;
for (const MachineOperand &MO : MI->operands())
if (MO.isReg() && MO.isUse() && MO.readsReg())
return false;
return true;
}
void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
DEBUG(dbgs() << "Processing " << *MI);
unsigned Reg = MI->getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
// For virtual registers, mark all uses as <undef>, and convert users to
// implicit-def when possible.
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
MO.setIsUndef();
MachineInstr *UserMI = MO.getParent();
if (!canTurnIntoImplicitDef(UserMI))
continue;
DEBUG(dbgs() << "Converting to IMPLICIT_DEF: " << *UserMI);
UserMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
WorkList.insert(UserMI);
}
MI->eraseFromParent();
return;
}
// This is a physreg implicit-def.
// Look for the first instruction to use or define an alias.
MachineBasicBlock::instr_iterator UserMI = MI;
MachineBasicBlock::instr_iterator UserE = MI->getParent()->instr_end();
bool Found = false;
for (++UserMI; UserMI != UserE; ++UserMI) {
for (MachineOperand &MO : UserMI->operands()) {
if (!MO.isReg())
continue;
unsigned UserReg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(UserReg) ||
!TRI->regsOverlap(Reg, UserReg))
continue;
// UserMI uses or redefines Reg. Set <undef> flags on all uses.
Found = true;
if (MO.isUse())
MO.setIsUndef();
}
if (Found)
break;
}
// If we found the using MI, we can erase the IMPLICIT_DEF.
if (Found) {
DEBUG(dbgs() << "Physreg user: " << *UserMI);
MI->eraseFromParent();
return;
}
// Using instr wasn't found, it could be in another block.
// Leave the physreg IMPLICIT_DEF, but trim any extra operands.
for (unsigned i = MI->getNumOperands() - 1; i; --i)
MI->RemoveOperand(i);
DEBUG(dbgs() << "Keeping physreg: " << *MI);
}
/// processImplicitDefs - Process IMPLICIT_DEF instructions and turn them into
/// <undef> operands.
bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
<< "********** Function: " << MF.getName() << '\n');
bool Changed = false;
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
assert(MRI->isSSA() && "ProcessImplicitDefs only works on SSA form.");
assert(WorkList.empty() && "Inconsistent worklist state");
for (MachineFunction::iterator MFI = MF.begin(), MFE = MF.end();
MFI != MFE; ++MFI) {
// Scan the basic block for implicit defs.
for (MachineBasicBlock::instr_iterator MBBI = MFI->instr_begin(),
MBBE = MFI->instr_end(); MBBI != MBBE; ++MBBI)
if (MBBI->isImplicitDef())
WorkList.insert(MBBI);
if (WorkList.empty())
continue;
DEBUG(dbgs() << "BB#" << MFI->getNumber() << " has " << WorkList.size()
<< " implicit defs.\n");
Changed = true;
// Drain the WorkList to recursively process any new implicit defs.
do processImplicitDef(WorkList.pop_back_val());
while (!WorkList.empty());
}
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/VirtRegMap.cpp | //===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the VirtRegMap class.
//
// It also contains implementations of the Spiller interface, which, given a
// virtual register map and a machine function, eliminates all virtual
// references by replacing them with physical register references - adding spill
// code as necessary.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/VirtRegMap.h"
#include "LiveDebugVariables.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumSpillSlots, "Number of spill slots allocated");
STATISTIC(NumIdCopies, "Number of identity moves eliminated after rewriting");
//===----------------------------------------------------------------------===//
// VirtRegMap implementation
//===----------------------------------------------------------------------===//
char VirtRegMap::ID = 0;
INITIALIZE_PASS(VirtRegMap, "virtregmap", "Virtual Register Map", false, false)
bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
TII = mf.getSubtarget().getInstrInfo();
TRI = mf.getSubtarget().getRegisterInfo();
MF = &mf;
Virt2PhysMap.clear();
Virt2StackSlotMap.clear();
Virt2SplitMap.clear();
grow();
return false;
}
void VirtRegMap::grow() {
unsigned NumRegs = MF->getRegInfo().getNumVirtRegs();
Virt2PhysMap.resize(NumRegs);
Virt2StackSlotMap.resize(NumRegs);
Virt2SplitMap.resize(NumRegs);
}
unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) {
int SS = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
RC->getAlignment());
++NumSpillSlots;
return SS;
}
bool VirtRegMap::hasPreferredPhys(unsigned VirtReg) {
unsigned Hint = MRI->getSimpleHint(VirtReg);
if (!Hint)
return 0;
if (TargetRegisterInfo::isVirtualRegister(Hint))
Hint = getPhys(Hint);
return getPhys(VirtReg) == Hint;
}
bool VirtRegMap::hasKnownPreference(unsigned VirtReg) {
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(VirtReg);
if (TargetRegisterInfo::isPhysicalRegister(Hint.second))
return true;
if (TargetRegisterInfo::isVirtualRegister(Hint.second))
return hasPhys(Hint.second);
return false;
}
int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
"attempt to assign stack slot to already spilled register");
const TargetRegisterClass* RC = MF->getRegInfo().getRegClass(virtReg);
return Virt2StackSlotMap[virtReg] = createSpillSlot(RC);
}
void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
"attempt to assign stack slot to already spilled register");
assert((SS >= 0 ||
(SS >= MF->getFrameInfo()->getObjectIndexBegin())) &&
"illegal fixed frame index");
Virt2StackSlotMap[virtReg] = SS;
}
void VirtRegMap::print(raw_ostream &OS, const Module*) const {
OS << "********** REGISTER MAP **********\n";
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG) {
OS << '[' << PrintReg(Reg, TRI) << " -> "
<< PrintReg(Virt2PhysMap[Reg], TRI) << "] "
<< TRI->getRegClassName(MRI->getRegClass(Reg)) << "\n";
}
}
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (Virt2StackSlotMap[Reg] != VirtRegMap::NO_STACK_SLOT) {
OS << '[' << PrintReg(Reg, TRI) << " -> fi#" << Virt2StackSlotMap[Reg]
<< "] " << TRI->getRegClassName(MRI->getRegClass(Reg)) << "\n";
}
}
OS << '\n';
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VirtRegMap::dump() const {
print(dbgs());
}
#endif
//===----------------------------------------------------------------------===//
// VirtRegRewriter
//===----------------------------------------------------------------------===//
//
// The VirtRegRewriter is the last of the register allocator passes.
// It rewrites virtual registers to physical registers as specified in the
// VirtRegMap analysis. It also updates live-in information on basic blocks
// according to LiveIntervals.
//
namespace {
class VirtRegRewriter : public MachineFunctionPass {
MachineFunction *MF;
const TargetMachine *TM;
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
MachineRegisterInfo *MRI;
SlotIndexes *Indexes;
LiveIntervals *LIS;
VirtRegMap *VRM;
SparseSet<unsigned> PhysRegs;
void rewrite();
void addMBBLiveIns();
bool readsUndefSubreg(const MachineOperand &MO) const;
public:
static char ID;
VirtRegRewriter() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction&) override;
};
} // end anonymous namespace
char &llvm::VirtRegRewriterID = VirtRegRewriter::ID;
INITIALIZE_PASS_BEGIN(VirtRegRewriter, "virtregrewriter",
"Virtual Register Rewriter", false, false)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
INITIALIZE_PASS_DEPENDENCY(LiveStacks)
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
INITIALIZE_PASS_END(VirtRegRewriter, "virtregrewriter",
"Virtual Register Rewriter", false, false)
char VirtRegRewriter::ID = 0;
void VirtRegRewriter::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<LiveIntervals>();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
AU.addRequired<VirtRegMap>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool VirtRegRewriter::runOnMachineFunction(MachineFunction &fn) {
MF = &fn;
TM = &MF->getTarget();
TRI = MF->getSubtarget().getRegisterInfo();
TII = MF->getSubtarget().getInstrInfo();
MRI = &MF->getRegInfo();
Indexes = &getAnalysis<SlotIndexes>();
LIS = &getAnalysis<LiveIntervals>();
VRM = &getAnalysis<VirtRegMap>();
DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
<< "********** Function: "
<< MF->getName() << '\n');
DEBUG(VRM->dump());
// Add kill flags while we still have virtual registers.
LIS->addKillFlags(VRM);
// Live-in lists on basic blocks are required for physregs.
addMBBLiveIns();
// Rewrite virtual registers.
rewrite();
// Write out new DBG_VALUE instructions.
getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
// All machine operands and other references to virtual registers have been
// replaced. Remove the virtual registers and release all the transient data.
VRM->clearAllVirt();
MRI->clearVirtRegs();
return true;
}
// Compute MBB live-in lists from virtual register live ranges and their
// assignments.
void VirtRegRewriter::addMBBLiveIns() {
SmallVector<MachineBasicBlock*, 16> LiveIn;
for (unsigned Idx = 0, IdxE = MRI->getNumVirtRegs(); Idx != IdxE; ++Idx) {
unsigned VirtReg = TargetRegisterInfo::index2VirtReg(Idx);
if (MRI->reg_nodbg_empty(VirtReg))
continue;
LiveInterval &LI = LIS->getInterval(VirtReg);
if (LI.empty() || LIS->intervalIsInOneMBB(LI))
continue;
// This is a virtual register that is live across basic blocks. Its
// assigned PhysReg must be marked as live-in to those blocks.
unsigned PhysReg = VRM->getPhys(VirtReg);
assert(PhysReg != VirtRegMap::NO_PHYS_REG && "Unmapped virtual register.");
if (LI.hasSubRanges()) {
for (LiveInterval::SubRange &S : LI.subranges()) {
for (const auto &Seg : S.segments) {
if (!Indexes->findLiveInMBBs(Seg.start, Seg.end, LiveIn))
continue;
for (MCSubRegIndexIterator SR(PhysReg, TRI); SR.isValid(); ++SR) {
unsigned SubReg = SR.getSubReg();
unsigned SubRegIndex = SR.getSubRegIndex();
unsigned SubRegLaneMask = TRI->getSubRegIndexLaneMask(SubRegIndex);
if ((SubRegLaneMask & S.LaneMask) == 0)
continue;
for (unsigned i = 0, e = LiveIn.size(); i != e; ++i) {
LiveIn[i]->addLiveIn(SubReg);
}
}
LiveIn.clear();
}
}
} else {
// Scan the segments of LI.
for (const auto &Seg : LI.segments) {
if (!Indexes->findLiveInMBBs(Seg.start, Seg.end, LiveIn))
continue;
for (unsigned i = 0, e = LiveIn.size(); i != e; ++i)
LiveIn[i]->addLiveIn(PhysReg);
LiveIn.clear();
}
}
}
// Sort and unique MBB LiveIns as we've not checked if SubReg/PhysReg were in
// each MBB's LiveIns set before calling addLiveIn on them.
for (MachineBasicBlock &MBB : *MF)
MBB.sortUniqueLiveIns();
}
/// Returns true if the given machine operand \p MO only reads undefined lanes.
/// The function only works for use operands with a subregister set.
bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const {
// Shortcut if the operand is already marked undef.
if (MO.isUndef())
return true;
unsigned Reg = MO.getReg();
const LiveInterval &LI = LIS->getInterval(Reg);
const MachineInstr &MI = *MO.getParent();
SlotIndex BaseIndex = LIS->getInstructionIndex(&MI);
// This code is only meant to handle reading undefined subregisters which
// we couldn't properly detect before.
assert(LI.liveAt(BaseIndex) &&
"Reads of completely dead register should be marked undef already");
unsigned SubRegIdx = MO.getSubReg();
unsigned UseMask = TRI->getSubRegIndexLaneMask(SubRegIdx);
// See if any of the relevant subregister liveranges is defined at this point.
for (const LiveInterval::SubRange &SR : LI.subranges()) {
if ((SR.LaneMask & UseMask) != 0 && SR.liveAt(BaseIndex))
return false;
}
return true;
}
void VirtRegRewriter::rewrite() {
bool NoSubRegLiveness = !MRI->subRegLivenessEnabled();
SmallVector<unsigned, 8> SuperDeads;
SmallVector<unsigned, 8> SuperDefs;
SmallVector<unsigned, 8> SuperKills;
SmallPtrSet<const MachineInstr *, 4> NoReturnInsts;
// Here we have a SparseSet to hold which PhysRegs are actually encountered
// in the MF we are about to iterate over so that later when we call
// setPhysRegUsed, we are only doing it for physRegs that were actually found
// in the program and not for all of the possible physRegs for the given
// target architecture. If the target has a lot of physRegs, then for a small
// program there will be a significant compile time reduction here.
PhysRegs.clear();
PhysRegs.setUniverse(TRI->getNumRegs());
// The function with uwtable should guarantee that the stack unwinder
// can unwind the stack to the previous frame. Thus, we can't apply the
// noreturn optimization if the caller function has uwtable attribute.
bool HasUWTable = MF->getFunction()->hasFnAttribute(Attribute::UWTable);
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
DEBUG(MBBI->print(dbgs(), Indexes));
bool IsExitBB = MBBI->succ_empty();
for (MachineBasicBlock::instr_iterator
MII = MBBI->instr_begin(), MIE = MBBI->instr_end(); MII != MIE;) {
MachineInstr *MI = MII;
++MII;
// Check if this instruction is a call to a noreturn function. If this
// is a call to noreturn function and we don't need the stack unwinding
// functionality (i.e. this function does not have uwtable attribute and
// the callee function has the nounwind attribute), then we can ignore
// the definitions set by this instruction.
if (!HasUWTable && IsExitBB && MI->isCall()) {
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
if (!MO.isGlobal())
continue;
const Function *Func = dyn_cast<Function>(MO.getGlobal());
if (!Func || !Func->hasFnAttribute(Attribute::NoReturn) ||
// We need to keep correct unwind information
// even if the function will not return, since the
// runtime may need it.
!Func->hasFnAttribute(Attribute::NoUnwind))
continue;
NoReturnInsts.insert(MI);
break;
}
}
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
// Make sure MRI knows about registers clobbered by regmasks.
if (MO.isRegMask())
MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
// If we encounter a VirtReg or PhysReg then get at the PhysReg and add
// it to the physreg bitset. Later we use only the PhysRegs that were
// actually encountered in the MF to populate the MRI's used physregs.
if (MO.isReg() && MO.getReg())
PhysRegs.insert(
TargetRegisterInfo::isVirtualRegister(MO.getReg()) ?
VRM->getPhys(MO.getReg()) :
MO.getReg());
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
unsigned VirtReg = MO.getReg();
unsigned PhysReg = VRM->getPhys(VirtReg);
assert(PhysReg != VirtRegMap::NO_PHYS_REG &&
"Instruction uses unmapped VirtReg");
assert(!MRI->isReserved(PhysReg) && "Reserved register assignment");
// Preserve semantics of sub-register operands.
unsigned SubReg = MO.getSubReg();
if (SubReg != 0) {
if (NoSubRegLiveness) {
// A virtual register kill refers to the whole register, so we may
// have to add <imp-use,kill> operands for the super-register. A
// partial redef always kills and redefines the super-register.
if (MO.readsReg() && (MO.isDef() || MO.isKill()))
SuperKills.push_back(PhysReg);
if (MO.isDef()) {
// Also add implicit defs for the super-register.
if (MO.isDead())
SuperDeads.push_back(PhysReg);
else
SuperDefs.push_back(PhysReg);
}
} else {
if (MO.isUse()) {
if (readsUndefSubreg(MO))
// We need to add an <undef> flag if the subregister is
// completely undefined (and we are not adding super-register
// defs).
MO.setIsUndef(true);
} else if (!MO.isDead()) {
assert(MO.isDef());
// Things get tricky when we ran out of lane mask bits and
// merged multiple lanes into the overflow bit: In this case
// our subregister liveness tracking isn't precise and we can't
// know what subregister parts are undefined, fall back to the
// implicit super-register def then.
unsigned LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
if (TargetRegisterInfo::isImpreciseLaneMask(LaneMask))
SuperDefs.push_back(PhysReg);
}
}
// The <def,undef> flag only makes sense for sub-register defs, and
// we are substituting a full physreg. An <imp-use,kill> operand
// from the SuperKills list will represent the partial read of the
// super-register.
if (MO.isDef())
MO.setIsUndef(false);
// PhysReg operands cannot have subregister indexes.
PhysReg = TRI->getSubReg(PhysReg, SubReg);
assert(PhysReg && "Invalid SubReg for physical register");
MO.setSubReg(0);
}
// Rewrite. Note we could have used MachineOperand::substPhysReg(), but
// we need the inlining here.
MO.setReg(PhysReg);
}
// Add any missing super-register kills after rewriting the whole
// instruction.
while (!SuperKills.empty())
MI->addRegisterKilled(SuperKills.pop_back_val(), TRI, true);
while (!SuperDeads.empty())
MI->addRegisterDead(SuperDeads.pop_back_val(), TRI, true);
while (!SuperDefs.empty())
MI->addRegisterDefined(SuperDefs.pop_back_val(), TRI);
DEBUG(dbgs() << "> " << *MI);
// Finally, remove any identity copies.
if (MI->isIdentityCopy()) {
++NumIdCopies;
DEBUG(dbgs() << "Deleting identity copy.\n");
if (Indexes)
Indexes->removeMachineInstrFromMaps(MI);
// It's safe to erase MI because MII has already been incremented.
MI->eraseFromParent();
}
}
}
// Tell MRI about physical registers in use.
if (NoReturnInsts.empty()) {
for (SparseSet<unsigned>::iterator
RegI = PhysRegs.begin(), E = PhysRegs.end(); RegI != E; ++RegI)
if (!MRI->reg_nodbg_empty(*RegI))
MRI->setPhysRegUsed(*RegI);
} else {
for (SparseSet<unsigned>::iterator
I = PhysRegs.begin(), E = PhysRegs.end(); I != E; ++I) {
unsigned Reg = *I;
if (MRI->reg_nodbg_empty(Reg))
continue;
// Check if this register has a use that will impact the rest of the
// code. Uses in debug and noreturn instructions do not impact the
// generated code.
for (MachineInstr &It : MRI->reg_nodbg_instructions(Reg)) {
if (!NoReturnInsts.count(&It)) {
MRI->setPhysRegUsed(Reg);
break;
}
}
}
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/SpillPlacement.cpp | //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the spill code placement analysis.
//
// Each edge bundle corresponds to a node in a Hopfield network. Constraints on
// basic blocks are weighted by the block frequency and added to become the node
// bias.
//
// Transparent basic blocks have the variable live through, but don't care if it
// is spilled or in a register. These blocks become connections in the Hopfield
// network, again weighted by block frequency.
//
// The Hopfield network minimizes (possibly locally) its energy function:
//
// E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
//
// The energy function represents the expected spill code execution frequency,
// or the cost of spilling. This is a Lyapunov function which never increases
// when a node is updated. It is guaranteed to converge to a local minimum.
//
//===----------------------------------------------------------------------===//
#include "SpillPlacement.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/EdgeBundles.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/ManagedStatic.h"
using namespace llvm;
#define DEBUG_TYPE "spillplacement"
char SpillPlacement::ID = 0;
INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
"Spill Code Placement Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
"Spill Code Placement Analysis", true, true)
char &llvm::SpillPlacementID = SpillPlacement::ID;
void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addRequiredTransitive<EdgeBundles>();
AU.addRequiredTransitive<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
/// Node - Each edge bundle corresponds to a Hopfield node.
///
/// The node contains precomputed frequency data that only depends on the CFG,
/// but Bias and Links are computed each time placeSpills is called.
///
/// The node Value is positive when the variable should be in a register. The
/// value can change when linked nodes change, but convergence is very fast
/// because all weights are positive.
///
struct SpillPlacement::Node {
/// BiasN - Sum of blocks that prefer a spill.
BlockFrequency BiasN;
/// BiasP - Sum of blocks that prefer a register.
BlockFrequency BiasP;
/// Value - Output value of this node computed from the Bias and links.
/// This is always on of the values {-1, 0, 1}. A positive number means the
/// variable should go in a register through this bundle.
int Value;
typedef SmallVector<std::pair<BlockFrequency, unsigned>, 4> LinkVector;
/// Links - (Weight, BundleNo) for all transparent blocks connecting to other
/// bundles. The weights are all positive block frequencies.
LinkVector Links;
/// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
BlockFrequency SumLinkWeights;
/// preferReg - Return true when this node prefers to be in a register.
bool preferReg() const {
// Undecided nodes (Value==0) go on the stack.
return Value > 0;
}
/// mustSpill - Return True if this node is so biased that it must spill.
bool mustSpill() const {
// We must spill if Bias < -sum(weights) or the MustSpill flag was set.
// BiasN is saturated when MustSpill is set, make sure this still returns
// true when the RHS saturates. Note that SumLinkWeights includes Threshold.
return BiasN >= BiasP + SumLinkWeights;
}
/// clear - Reset per-query data, but preserve frequencies that only depend on
// the CFG.
void clear(const BlockFrequency &Threshold) {
BiasN = BiasP = Value = 0;
SumLinkWeights = Threshold;
Links.clear();
}
/// addLink - Add a link to bundle b with weight w.
void addLink(unsigned b, BlockFrequency w) {
// Update cached sum.
SumLinkWeights += w;
// There can be multiple links to the same bundle, add them up.
for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
if (I->second == b) {
I->first += w;
return;
}
// This must be the first link to b.
Links.push_back(std::make_pair(w, b));
}
/// addBias - Bias this node.
void addBias(BlockFrequency freq, BorderConstraint direction) {
switch (direction) {
default:
break;
case PrefReg:
BiasP += freq;
break;
case PrefSpill:
BiasN += freq;
break;
case MustSpill:
BiasN = BlockFrequency::getMaxFrequency();
break;
}
}
/// update - Recompute Value from Bias and Links. Return true when node
/// preference changes.
bool update(const Node nodes[], const BlockFrequency &Threshold) {
// Compute the weighted sum of inputs.
BlockFrequency SumN = BiasN;
BlockFrequency SumP = BiasP;
for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I) {
if (nodes[I->second].Value == -1)
SumN += I->first;
else if (nodes[I->second].Value == 1)
SumP += I->first;
}
// Each weighted sum is going to be less than the total frequency of the
// bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
// will add a dead zone around 0 for two reasons:
//
// 1. It avoids arbitrary bias when all links are 0 as is possible during
// initial iterations.
// 2. It helps tame rounding errors when the links nominally sum to 0.
//
bool Before = preferReg();
if (SumN >= SumP + Threshold)
Value = -1;
else if (SumP >= SumN + Threshold)
Value = 1;
else
Value = 0;
return Before != preferReg();
}
};
bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
bundles = &getAnalysis<EdgeBundles>();
loops = &getAnalysis<MachineLoopInfo>();
assert(!nodes && "Leaking node array");
nodes = new Node[bundles->getNumBundles()];
// Compute total ingoing and outgoing block frequencies for all bundles.
BlockFrequencies.resize(mf.getNumBlockIDs());
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
setThreshold(MBFI->getEntryFreq());
for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
unsigned Num = I->getNumber();
BlockFrequencies[Num] = MBFI->getBlockFreq(I);
}
// We never change the function.
return false;
}
void SpillPlacement::releaseMemory() {
delete[] nodes;
nodes = nullptr;
}
/// activate - mark node n as active if it wasn't already.
void SpillPlacement::activate(unsigned n) {
if (ActiveNodes->test(n))
return;
ActiveNodes->set(n);
nodes[n].clear(Threshold);
// Very large bundles usually come from big switches, indirect branches,
// landing pads, or loops with many 'continue' statements. It is difficult to
// allocate registers when so many different blocks are involved.
//
// Give a small negative bias to large bundles such that a substantial
// fraction of the connected blocks need to be interested before we consider
// expanding the region through the bundle. This helps compile time by
// limiting the number of blocks visited and the number of links in the
// Hopfield network.
if (bundles->getBlocks(n).size() > 100) {
nodes[n].BiasP = 0;
nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
}
}
/// \brief Set the threshold for a given entry frequency.
///
/// Set the threshold relative to \c Entry. Since the threshold is used as a
/// bound on the open interval (-Threshold;Threshold), 1 is the minimum
/// threshold.
void SpillPlacement::setThreshold(const BlockFrequency &Entry) {
// Apparently 2 is a good threshold when Entry==2^14, but we need to scale
// it. Divide by 2^13, rounding as appropriate.
uint64_t Freq = Entry.getFrequency();
uint64_t Scaled = (Freq >> 13) + bool(Freq & (1 << 12));
Threshold = std::max(UINT64_C(1), Scaled);
}
/// addConstraints - Compute node biases and weights from a set of constraints.
/// Set a bit in NodeMask for each active node.
void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
E = LiveBlocks.end(); I != E; ++I) {
BlockFrequency Freq = BlockFrequencies[I->Number];
// Live-in to block?
if (I->Entry != DontCare) {
unsigned ib = bundles->getBundle(I->Number, 0);
activate(ib);
nodes[ib].addBias(Freq, I->Entry);
}
// Live-out from block?
if (I->Exit != DontCare) {
unsigned ob = bundles->getBundle(I->Number, 1);
activate(ob);
nodes[ob].addBias(Freq, I->Exit);
}
}
}
/// addPrefSpill - Same as addConstraints(PrefSpill)
void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
I != E; ++I) {
BlockFrequency Freq = BlockFrequencies[*I];
if (Strong)
Freq += Freq;
unsigned ib = bundles->getBundle(*I, 0);
unsigned ob = bundles->getBundle(*I, 1);
activate(ib);
activate(ob);
nodes[ib].addBias(Freq, PrefSpill);
nodes[ob].addBias(Freq, PrefSpill);
}
}
void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
++I) {
unsigned Number = *I;
unsigned ib = bundles->getBundle(Number, 0);
unsigned ob = bundles->getBundle(Number, 1);
// Ignore self-loops.
if (ib == ob)
continue;
activate(ib);
activate(ob);
if (nodes[ib].Links.empty() && !nodes[ib].mustSpill())
Linked.push_back(ib);
if (nodes[ob].Links.empty() && !nodes[ob].mustSpill())
Linked.push_back(ob);
BlockFrequency Freq = BlockFrequencies[Number];
nodes[ib].addLink(ob, Freq);
nodes[ob].addLink(ib, Freq);
}
}
bool SpillPlacement::scanActiveBundles() {
Linked.clear();
RecentPositive.clear();
for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
nodes[n].update(nodes, Threshold);
// A node that must spill, or a node without any links is not going to
// change its value ever again, so exclude it from iterations.
if (nodes[n].mustSpill())
continue;
if (!nodes[n].Links.empty())
Linked.push_back(n);
if (nodes[n].preferReg())
RecentPositive.push_back(n);
}
return !RecentPositive.empty();
}
/// iterate - Repeatedly update the Hopfield nodes until stability or the
/// maximum number of iterations is reached.
/// @param Linked - Numbers of linked nodes that need updating.
void SpillPlacement::iterate() {
// First update the recently positive nodes. They have likely received new
// negative bias that will turn them off.
while (!RecentPositive.empty())
nodes[RecentPositive.pop_back_val()].update(nodes, Threshold);
if (Linked.empty())
return;
// Run up to 10 iterations. The edge bundle numbering is closely related to
// basic block numbering, so there is a strong tendency towards chains of
// linked nodes with sequential numbers. By scanning the linked nodes
// backwards and forwards, we make it very likely that a single node can
// affect the entire network in a single iteration. That means very fast
// convergence, usually in a single iteration.
for (unsigned iteration = 0; iteration != 10; ++iteration) {
// Scan backwards, skipping the last node when iteration is not zero. When
// iteration is not zero, the last node was just updated.
bool Changed = false;
for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
iteration == 0 ? Linked.rbegin() : std::next(Linked.rbegin()),
E = Linked.rend(); I != E; ++I) {
unsigned n = *I;
if (nodes[n].update(nodes, Threshold)) {
Changed = true;
if (nodes[n].preferReg())
RecentPositive.push_back(n);
}
}
if (!Changed || !RecentPositive.empty())
return;
// Scan forwards, skipping the first node which was just updated.
Changed = false;
for (SmallVectorImpl<unsigned>::const_iterator I =
std::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
unsigned n = *I;
if (nodes[n].update(nodes, Threshold)) {
Changed = true;
if (nodes[n].preferReg())
RecentPositive.push_back(n);
}
}
if (!Changed || !RecentPositive.empty())
return;
}
}
void SpillPlacement::prepare(BitVector &RegBundles) {
Linked.clear();
RecentPositive.clear();
// Reuse RegBundles as our ActiveNodes vector.
ActiveNodes = &RegBundles;
ActiveNodes->clear();
ActiveNodes->resize(bundles->getNumBundles());
}
bool
SpillPlacement::finish() {
assert(ActiveNodes && "Call prepare() first");
// Write preferences back to ActiveNodes.
bool Perfect = true;
for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n))
if (!nodes[n].preferReg()) {
ActiveNodes->reset(n);
Perfect = false;
}
ActiveNodes = nullptr;
return Perfect;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineFunction.cpp | //===-- MachineFunction.cpp -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Collect native machine code information for a function. This allows
// target-specific information about the generated code to be stored with each
// function.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionInitializer.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "codegen"
void MachineFunctionInitializer::anchor() {}
//===----------------------------------------------------------------------===//
// MachineFunction implementation
//===----------------------------------------------------------------------===//
// Out-of-line virtual method.
MachineFunctionInfo::~MachineFunctionInfo() {}
void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
MBB->getParent()->DeleteMachineBasicBlock(MBB);
}
MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
unsigned FunctionNum, MachineModuleInfo &mmi)
: Fn(F), Target(TM), STI(TM.getSubtargetImpl(*F)), Ctx(mmi.getContext()),
MMI(mmi) {
if (STI->getRegisterInfo())
RegInfo = new (Allocator) MachineRegisterInfo(this);
else
RegInfo = nullptr;
MFInfo = nullptr;
FrameInfo = new (Allocator)
MachineFrameInfo(STI->getFrameLowering()->getStackAlignment(),
STI->getFrameLowering()->isStackRealignable(),
!F->hasFnAttribute("no-realign-stack"));
if (Fn->hasFnAttribute(Attribute::StackAlignment))
FrameInfo->ensureMaxAlignment(Fn->getFnStackAlignment());
ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
Alignment = std::max(Alignment,
STI->getTargetLowering()->getPrefFunctionAlignment());
FunctionNumber = FunctionNum;
JumpTableInfo = nullptr;
}
MachineFunction::~MachineFunction() {
// Don't call destructors on MachineInstr and MachineOperand. All of their
// memory comes from the BumpPtrAllocator which is about to be purged.
//
// Do call MachineBasicBlock destructors, it contains std::vectors.
for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
I->Insts.clearAndLeakNodesUnsafely();
InstructionRecycler.clear(Allocator);
OperandRecycler.clear(Allocator);
BasicBlockRecycler.clear(Allocator);
if (RegInfo) {
RegInfo->~MachineRegisterInfo();
Allocator.Deallocate(RegInfo);
}
if (MFInfo) {
MFInfo->~MachineFunctionInfo();
Allocator.Deallocate(MFInfo);
}
FrameInfo->~MachineFrameInfo();
Allocator.Deallocate(FrameInfo);
ConstantPool->~MachineConstantPool();
Allocator.Deallocate(ConstantPool);
if (JumpTableInfo) {
JumpTableInfo->~MachineJumpTableInfo();
Allocator.Deallocate(JumpTableInfo);
}
}
const DataLayout &MachineFunction::getDataLayout() const {
return Fn->getParent()->getDataLayout();
}
/// Get the JumpTableInfo for this function.
/// If it does not already exist, allocate one.
MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind) {
if (JumpTableInfo) return JumpTableInfo;
JumpTableInfo = new (Allocator)
MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
return JumpTableInfo;
}
/// Should we be emitting segmented stack stuff for the function
bool MachineFunction::shouldSplitStack() {
return getFunction()->hasFnAttribute("split-stack");
}
/// This discards all of the MachineBasicBlock numbers and recomputes them.
/// This guarantees that the MBB numbers are sequential, dense, and match the
/// ordering of the blocks within the function. If a specific MachineBasicBlock
/// is specified, only that block and those after it are renumbered.
void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
if (empty()) { MBBNumbering.clear(); return; }
MachineFunction::iterator MBBI, E = end();
if (MBB == nullptr)
MBBI = begin();
else
MBBI = MBB;
// Figure out the block number this should have.
unsigned BlockNo = 0;
if (MBBI != begin())
BlockNo = std::prev(MBBI)->getNumber() + 1;
for (; MBBI != E; ++MBBI, ++BlockNo) {
if (MBBI->getNumber() != (int)BlockNo) {
// Remove use of the old number.
if (MBBI->getNumber() != -1) {
assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
"MBB number mismatch!");
MBBNumbering[MBBI->getNumber()] = nullptr;
}
// If BlockNo is already taken, set that block's number to -1.
if (MBBNumbering[BlockNo])
MBBNumbering[BlockNo]->setNumber(-1);
MBBNumbering[BlockNo] = MBBI;
MBBI->setNumber(BlockNo);
}
}
// Okay, all the blocks are renumbered. If we have compactified the block
// numbering, shrink MBBNumbering now.
assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
MBBNumbering.resize(BlockNo);
}
/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
MachineInstr *
MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL, bool NoImp) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(*this, MCID, DL, NoImp);
}
/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
/// identical in all ways except the instruction has no parent, prev, or next.
MachineInstr *
MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(*this, *Orig);
}
/// Delete the given MachineInstr.
///
/// This function also serves as the MachineInstr destructor - the real
/// ~MachineInstr() destructor must be empty.
void
MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
// Strip it for parts. The operand array and the MI object itself are
// independently recyclable.
if (MI->Operands)
deallocateOperandArray(MI->CapOperands, MI->Operands);
// Don't call ~MachineInstr() which must be trivial anyway because
// ~MachineFunction drops whole lists of MachineInstrs wihout calling their
// destructors.
InstructionRecycler.Deallocate(Allocator, MI);
}
/// Allocate a new MachineBasicBlock. Use this instead of
/// `new MachineBasicBlock'.
MachineBasicBlock *
MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
MachineBasicBlock(*this, bb);
}
/// Delete the given MachineBasicBlock.
void
MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
assert(MBB->getParent() == this && "MBB parent mismatch!");
MBB->~MachineBasicBlock();
BasicBlockRecycler.Deallocate(Allocator, MBB);
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
uint64_t s, unsigned base_alignment,
const AAMDNodes &AAInfo,
const MDNode *Ranges) {
return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
AAInfo, Ranges);
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
if (MMO->getValue())
return new (Allocator)
MachineMemOperand(MachinePointerInfo(MMO->getValue(),
MMO->getOffset()+Offset),
MMO->getFlags(), Size,
MMO->getBaseAlignment());
return new (Allocator)
MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
MMO->getOffset()+Offset),
MMO->getFlags(), Size,
MMO->getBaseAlignment());
}
MachineInstr::mmo_iterator
MachineFunction::allocateMemRefsArray(unsigned long Num) {
return Allocator.Allocate<MachineMemOperand *>(Num);
}
std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
MachineInstr::mmo_iterator End) {
// Count the number of load mem refs.
unsigned Num = 0;
for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
if ((*I)->isLoad())
++Num;
// Allocate a new array and populate it with the load information.
MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
unsigned Index = 0;
for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
if ((*I)->isLoad()) {
if (!(*I)->isStore())
// Reuse the MMO.
Result[Index] = *I;
else {
// Clone the MMO and unset the store flag.
MachineMemOperand *JustLoad =
getMachineMemOperand((*I)->getPointerInfo(),
(*I)->getFlags() & ~MachineMemOperand::MOStore,
(*I)->getSize(), (*I)->getBaseAlignment(),
(*I)->getAAInfo());
Result[Index] = JustLoad;
}
++Index;
}
}
return std::make_pair(Result, Result + Num);
}
std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
MachineInstr::mmo_iterator End) {
// Count the number of load mem refs.
unsigned Num = 0;
for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
if ((*I)->isStore())
++Num;
// Allocate a new array and populate it with the store information.
MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
unsigned Index = 0;
for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
if ((*I)->isStore()) {
if (!(*I)->isLoad())
// Reuse the MMO.
Result[Index] = *I;
else {
// Clone the MMO and unset the load flag.
MachineMemOperand *JustStore =
getMachineMemOperand((*I)->getPointerInfo(),
(*I)->getFlags() & ~MachineMemOperand::MOLoad,
(*I)->getSize(), (*I)->getBaseAlignment(),
(*I)->getAAInfo());
Result[Index] = JustStore;
}
++Index;
}
}
return std::make_pair(Result, Result + Num);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineFunction::dump() const {
print(dbgs());
}
#endif
StringRef MachineFunction::getName() const {
assert(getFunction() && "No function!");
return getFunction()->getName();
}
void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
OS << "# Machine code for function " << getName() << ": ";
if (RegInfo) {
OS << (RegInfo->isSSA() ? "SSA" : "Post SSA");
if (!RegInfo->tracksLiveness())
OS << ", not tracking liveness";
}
OS << '\n';
// Print Frame Information
FrameInfo->print(*this, OS);
// Print JumpTable Information
if (JumpTableInfo)
JumpTableInfo->print(OS);
// Print Constant Pool
ConstantPool->print(OS);
const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
if (RegInfo && !RegInfo->livein_empty()) {
OS << "Function Live Ins: ";
for (MachineRegisterInfo::livein_iterator
I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
OS << PrintReg(I->first, TRI);
if (I->second)
OS << " in " << PrintReg(I->second, TRI);
if (std::next(I) != E)
OS << ", ";
}
OS << '\n';
}
ModuleSlotTracker MST(getFunction()->getParent());
MST.incorporateFunction(*getFunction());
for (const auto &BB : *this) {
OS << '\n';
BB.print(OS, MST, Indexes);
}
OS << "\n# End machine code for function " << getName() << ".\n\n";
}
namespace llvm {
template<>
struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const MachineFunction *F) {
return ("CFG for '" + F->getName() + "' function").str();
}
std::string getNodeLabel(const MachineBasicBlock *Node,
const MachineFunction *Graph) {
std::string OutStr;
{
raw_string_ostream OSS(OutStr);
if (isSimple()) {
OSS << "BB#" << Node->getNumber();
if (const BasicBlock *BB = Node->getBasicBlock())
OSS << ": " << BB->getName();
} else
Node->print(OSS);
}
if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
// Process string output to make it nicer...
for (unsigned i = 0; i != OutStr.length(); ++i)
if (OutStr[i] == '\n') { // Left justify
OutStr[i] = '\\';
OutStr.insert(OutStr.begin()+i+1, 'l');
}
return OutStr;
}
};
}
void MachineFunction::viewCFG() const
{
#ifndef NDEBUG
ViewGraph(this, "mf" + getName());
#else
errs() << "MachineFunction::viewCFG is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
void MachineFunction::viewCFGOnly() const
{
#ifndef NDEBUG
ViewGraph(this, "mf" + getName(), true);
#else
errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
/// Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
unsigned MachineFunction::addLiveIn(unsigned PReg,
const TargetRegisterClass *RC) {
MachineRegisterInfo &MRI = getRegInfo();
unsigned VReg = MRI.getLiveInVirtReg(PReg);
if (VReg) {
const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
(void)VRegRC;
// A physical register can be added several times.
// Between two calls, the register class of the related virtual register
// may have been constrained to match some operation constraints.
// In that case, check that the current register class includes the
// physical register and is a sub class of the specified RC.
assert((VRegRC == RC || (VRegRC->contains(PReg) &&
RC->hasSubClassEq(VRegRC))) &&
"Register class mismatch!");
return VReg;
}
VReg = MRI.createVirtualRegister(RC);
MRI.addLiveIn(PReg, VReg);
return VReg;
}
/// Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate) const {
const DataLayout &DL = getDataLayout();
assert(JumpTableInfo && "No jump tables");
assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
const char *Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
: DL.getPrivateGlobalPrefix();
SmallString<60> Name;
raw_svector_ostream(Name)
<< Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
return Ctx.getOrCreateSymbol(Name);
}
/// Return a function-local symbol to represent the PIC base.
MCSymbol *MachineFunction::getPICBaseSymbol() const {
const DataLayout &DL = getDataLayout();
return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
Twine(getFunctionNumber()) + "$pb");
}
//===----------------------------------------------------------------------===//
// MachineFrameInfo implementation
//===----------------------------------------------------------------------===//
/// Make sure the function is at least Align bytes aligned.
void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
if (!StackRealignable || !RealignOption)
assert(Align <= StackAlignment &&
"For targets without stack realignment, Align is out of limit!");
if (MaxAlignment < Align) MaxAlignment = Align;
}
/// Clamp the alignment if requested and emit a warning.
static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
unsigned StackAlign) {
if (!ShouldClamp || Align <= StackAlign)
return Align;
DEBUG(dbgs() << "Warning: requested alignment " << Align
<< " exceeds the stack alignment " << StackAlign
<< " when stack realignment is off" << '\n');
return StackAlign;
}
/// Create a new statically sized stack object, returning a nonnegative
/// identifier to represent it.
int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
bool isSS, const AllocaInst *Alloca) {
assert(Size != 0 && "Cannot allocate zero size stack objects!");
Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
Alignment, StackAlignment);
Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
!isSS));
int Index = (int)Objects.size() - NumFixedObjects - 1;
assert(Index >= 0 && "Bad frame index!");
ensureMaxAlignment(Alignment);
return Index;
}
/// Create a new statically sized stack object that represents a spill slot,
/// returning a nonnegative identifier to represent it.
int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
unsigned Alignment) {
Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
Alignment, StackAlignment);
CreateStackObject(Size, Alignment, true);
int Index = (int)Objects.size() - NumFixedObjects - 1;
ensureMaxAlignment(Alignment);
return Index;
}
/// Notify the MachineFrameInfo object that a variable sized object has been
/// created. This must be created whenever a variable sized object is created,
/// whether or not the index returned is actually used.
int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
const AllocaInst *Alloca) {
HasVarSizedObjects = true;
Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
Alignment, StackAlignment);
Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
ensureMaxAlignment(Alignment);
return (int)Objects.size()-NumFixedObjects-1;
}
/// Create a new object at a fixed location on the stack.
/// All fixed objects should be created before other objects are created for
/// efficiency. By default, fixed objects are immutable. This returns an
/// index with a negative value.
int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
bool Immutable, bool isAliased) {
assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
// The alignment of the frame index can be determined from its offset from
// the incoming frame position. If the frame object is at offset 32 and
// the stack is guaranteed to be 16-byte aligned, then we know that the
// object is 16-byte aligned.
unsigned Align = MinAlign(SPOffset, StackAlignment);
Align = clampStackAlignment(!StackRealignable || !RealignOption, Align,
StackAlignment);
Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
/*isSS*/ false,
/*Alloca*/ nullptr, isAliased));
return -++NumFixedObjects;
}
/// Create a spill slot at a fixed location on the stack.
/// Returns an index with a negative value.
int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
int64_t SPOffset) {
unsigned Align = MinAlign(SPOffset, StackAlignment);
Align = clampStackAlignment(!StackRealignable || !RealignOption, Align,
StackAlignment);
Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset,
/*Immutable*/ true,
/*isSS*/ true,
/*Alloca*/ nullptr,
/*isAliased*/ false));
return -++NumFixedObjects;
}
BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
BitVector BV(TRI->getNumRegs());
// Before CSI is calculated, no registers are considered pristine. They can be
// freely used and PEI will make sure they are saved.
if (!isCalleeSavedInfoValid())
return BV;
for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
BV.set(*CSR);
// Saved CSRs are not pristine.
const std::vector<CalleeSavedInfo> &CSI = getCalleeSavedInfo();
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I)
BV.reset(I->getReg());
return BV;
}
unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
unsigned MaxAlign = getMaxAlignment();
int Offset = 0;
// This code is very, very similar to PEI::calculateFrameObjectOffsets().
// It really should be refactored to share code. Until then, changes
// should keep in mind that there's tight coupling between the two.
for (int i = getObjectIndexBegin(); i != 0; ++i) {
int FixedOff = -getObjectOffset(i);
if (FixedOff > Offset) Offset = FixedOff;
}
for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
if (isDeadObjectIndex(i))
continue;
Offset += getObjectSize(i);
unsigned Align = getObjectAlignment(i);
// Adjust to alignment boundary
Offset = (Offset+Align-1)/Align*Align;
MaxAlign = std::max(Align, MaxAlign);
}
if (adjustsStack() && TFI->hasReservedCallFrame(MF))
Offset += getMaxCallFrameSize();
// Round up the size to a multiple of the alignment. If the function has
// any calls or alloca's, align to the target's StackAlignment value to
// ensure that the callee's frame or the alloca data is suitably aligned;
// otherwise, for leaf functions, align to the TransientStackAlignment
// value.
unsigned StackAlign;
if (adjustsStack() || hasVarSizedObjects() ||
(RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
StackAlign = TFI->getStackAlignment();
else
StackAlign = TFI->getTransientStackAlignment();
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
unsigned AlignMask = StackAlign - 1;
Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
return (unsigned)Offset;
}
void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
if (Objects.empty()) return;
const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
OS << "Frame Objects:\n";
for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
const StackObject &SO = Objects[i];
OS << " fi#" << (int)(i-NumFixedObjects) << ": ";
if (SO.Size == ~0ULL) {
OS << "dead\n";
continue;
}
if (SO.Size == 0)
OS << "variable sized";
else
OS << "size=" << SO.Size;
OS << ", align=" << SO.Alignment;
if (i < NumFixedObjects)
OS << ", fixed";
if (i < NumFixedObjects || SO.SPOffset != -1) {
int64_t Off = SO.SPOffset - ValOffset;
OS << ", at location [SP";
if (Off > 0)
OS << "+" << Off;
else if (Off < 0)
OS << Off;
OS << "]";
}
OS << "\n";
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineFrameInfo::dump(const MachineFunction &MF) const {
print(MF, dbgs());
}
#endif
//===----------------------------------------------------------------------===//
// MachineJumpTableInfo implementation
//===----------------------------------------------------------------------===//
/// Return the size of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// The size of a jump table entry is 4 bytes unless the entry is just the
// address of a block, in which case it is the pointer size.
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerSize();
case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return 8;
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
return 4;
case MachineJumpTableInfo::EK_Inline:
return 0;
}
llvm_unreachable("Unknown jump table encoding!");
}
/// Return the alignment of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// The alignment of a jump table entry is the alignment of int32 unless the
// entry is just the address of a block, in which case it is the pointer
// alignment.
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerABIAlignment();
case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return TD.getABIIntegerTypeAlignment(64);
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
return TD.getABIIntegerTypeAlignment(32);
case MachineJumpTableInfo::EK_Inline:
return 1;
}
llvm_unreachable("Unknown jump table encoding!");
}
/// Create a new jump table entry in the jump table info.
unsigned MachineJumpTableInfo::createJumpTableIndex(
const std::vector<MachineBasicBlock*> &DestBBs) {
assert(!DestBBs.empty() && "Cannot create an empty jump table!");
JumpTables.push_back(MachineJumpTableEntry(DestBBs));
return JumpTables.size()-1;
}
/// If Old is the target of any jump tables, update the jump tables to branch
/// to New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Not making a change?");
bool MadeChange = false;
for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
ReplaceMBBInJumpTable(i, Old, New);
return MadeChange;
}
/// If Old is a target of the jump tables, update the jump table to branch to
/// New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Not making a change?");
bool MadeChange = false;
MachineJumpTableEntry &JTE = JumpTables[Idx];
for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
if (JTE.MBBs[j] == Old) {
JTE.MBBs[j] = New;
MadeChange = true;
}
return MadeChange;
}
void MachineJumpTableInfo::print(raw_ostream &OS) const {
if (JumpTables.empty()) return;
OS << "Jump Tables:\n";
for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
OS << " jt#" << i << ": ";
for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
}
OS << '\n';
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineJumpTableInfo::dump() const { print(dbgs()); }
#endif
//===----------------------------------------------------------------------===//
// MachineConstantPool implementation
//===----------------------------------------------------------------------===//
void MachineConstantPoolValue::anchor() { }
Type *MachineConstantPoolEntry::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
return Val.ConstVal->getType();
}
unsigned MachineConstantPoolEntry::getRelocationInfo() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getRelocationInfo();
return Val.ConstVal->getRelocationInfo();
}
SectionKind
MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
SectionKind Kind;
switch (getRelocationInfo()) {
default:
llvm_unreachable("Unknown section kind");
case Constant::GlobalRelocations:
Kind = SectionKind::getReadOnlyWithRel();
break;
case Constant::LocalRelocation:
Kind = SectionKind::getReadOnlyWithRelLocal();
break;
case Constant::NoRelocation:
switch (DL->getTypeAllocSize(getType())) {
case 4:
Kind = SectionKind::getMergeableConst4();
break;
case 8:
Kind = SectionKind::getMergeableConst8();
break;
case 16:
Kind = SectionKind::getMergeableConst16();
break;
default:
Kind = SectionKind::getReadOnly();
break;
}
}
return Kind;
}
MachineConstantPool::~MachineConstantPool() {
for (unsigned i = 0, e = Constants.size(); i != e; ++i)
if (Constants[i].isMachineConstantPoolEntry())
delete Constants[i].Val.MachineCPVal;
for (DenseSet<MachineConstantPoolValue*>::iterator I =
MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
I != E; ++I)
delete *I;
}
/// Test whether the given two constants can be allocated the same constant pool
/// entry.
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
const DataLayout &DL) {
// Handle the trivial case quickly.
if (A == B) return true;
// If they have the same type but weren't the same constant, quickly
// reject them.
if (A->getType() == B->getType()) return false;
// We can't handle structs or arrays.
if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
return false;
// For now, only support constants with the same size.
uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
return false;
Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
// Try constant folding a bitcast of both instructions to an integer. If we
// get two identical ConstantInt's, then we are good to share them. We use
// the constant folding APIs to do this so that we get the benefit of
// DataLayout.
if (isa<PointerType>(A->getType()))
A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
const_cast<Constant *>(A), DL);
else if (A->getType() != IntTy)
A = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
const_cast<Constant *>(A), DL);
if (isa<PointerType>(B->getType()))
B = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
const_cast<Constant *>(B), DL);
else if (B->getType() != IntTy)
B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
const_cast<Constant *>(B), DL);
return A == B;
}
/// Create a new entry in the constant pool or return an existing one.
/// User must specify the log2 of the minimum required alignment for the object.
unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;
// Check to see if we already have this constant.
//
// FIXME, this could be made much more efficient for large constant pools.
for (unsigned i = 0, e = Constants.size(); i != e; ++i)
if (!Constants[i].isMachineConstantPoolEntry() &&
CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
if ((unsigned)Constants[i].getAlignment() < Alignment)
Constants[i].Alignment = Alignment;
return i;
}
Constants.push_back(MachineConstantPoolEntry(C, Alignment));
return Constants.size()-1;
}
unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;
// Check to see if we already have this constant.
//
// FIXME, this could be made much more efficient for large constant pools.
int Idx = V->getExistingMachineCPValue(this, Alignment);
if (Idx != -1) {
MachineCPVsSharingEntries.insert(V);
return (unsigned)Idx;
}
Constants.push_back(MachineConstantPoolEntry(V, Alignment));
return Constants.size()-1;
}
void MachineConstantPool::print(raw_ostream &OS) const {
if (Constants.empty()) return;
OS << "Constant Pool:\n";
for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
OS << " cp#" << i << ": ";
if (Constants[i].isMachineConstantPoolEntry())
Constants[i].Val.MachineCPVal->print(OS);
else
Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
OS << ", align=" << Constants[i].getAlignment();
OS << "\n";
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineConstantPool::dump() const { print(dbgs()); }
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineLICM.cpp | //===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass performs loop invariant code motion on machine instructions. We
// attempt to remove as much code from the body of a loop as possible.
//
// This pass is not intended to be a replacement or a complete alternative
// for the LLVM-IR-level LICM pass. It is only designed to hoist simple
// constructs that are not exposed before lowering and instruction selection.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "machine-licm"
static cl::opt<bool>
AvoidSpeculation("avoid-speculation",
cl::desc("MachineLICM should avoid speculation"),
cl::init(true), cl::Hidden);
static cl::opt<bool>
HoistCheapInsts("hoist-cheap-insts",
cl::desc("MachineLICM should hoist even cheap instructions"),
cl::init(false), cl::Hidden);
static cl::opt<bool>
SinkInstsToAvoidSpills("sink-insts-to-avoid-spills",
cl::desc("MachineLICM should sink instructions into "
"loops to avoid register spills"),
cl::init(false), cl::Hidden);
STATISTIC(NumHoisted,
"Number of machine instructions hoisted out of loops");
STATISTIC(NumLowRP,
"Number of instructions hoisted in low reg pressure situation");
STATISTIC(NumHighLatency,
"Number of high latency instructions hoisted");
STATISTIC(NumCSEed,
"Number of hoisted machine instructions CSEed");
STATISTIC(NumPostRAHoisted,
"Number of machine instructions hoisted out of loops post regalloc");
namespace {
class MachineLICM : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetLoweringBase *TLI;
const TargetRegisterInfo *TRI;
const MachineFrameInfo *MFI;
MachineRegisterInfo *MRI;
TargetSchedModel SchedModel;
bool PreRegAlloc;
// Various analyses that we use...
AliasAnalysis *AA; // Alias analysis info.
MachineLoopInfo *MLI; // Current MachineLoopInfo
MachineDominatorTree *DT; // Machine dominator tree for the cur loop
// State that is updated as we process loops
bool Changed; // True if a loop is changed.
bool FirstInLoop; // True if it's the first LICM in the loop.
MachineLoop *CurLoop; // The current loop we are working on.
MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
// Exit blocks for CurLoop.
SmallVector<MachineBasicBlock*, 8> ExitBlocks;
bool isExitBlock(const MachineBasicBlock *MBB) const {
return std::find(ExitBlocks.begin(), ExitBlocks.end(), MBB) !=
ExitBlocks.end();
}
// Track 'estimated' register pressure.
SmallSet<unsigned, 32> RegSeen;
SmallVector<unsigned, 8> RegPressure;
// Register pressure "limit" per register pressure set. If the pressure
// is higher than the limit, then it's considered high.
SmallVector<unsigned, 8> RegLimit;
// Register pressure on path leading from loop preheader to current BB.
SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
// For each opcode, keep a list of potential CSE instructions.
DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
enum {
SpeculateFalse = 0,
SpeculateTrue = 1,
SpeculateUnknown = 2
};
// If a MBB does not dominate loop exiting blocks then it may not safe
// to hoist loads from this block.
// Tri-state: 0 - false, 1 - true, 2 - unknown
unsigned SpeculationState;
public:
static char ID; // Pass identification, replacement for typeid
MachineLICM() :
MachineFunctionPass(ID), PreRegAlloc(true) {
initializeMachineLICMPass(*PassRegistry::getPassRegistry());
}
explicit MachineLICM(bool PreRA) :
MachineFunctionPass(ID), PreRegAlloc(PreRA) {
initializeMachineLICMPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineLoopInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
void releaseMemory() override {
RegSeen.clear();
RegPressure.clear();
RegLimit.clear();
BackTrace.clear();
CSEMap.clear();
}
private:
/// CandidateInfo - Keep track of information about hoisting candidates.
struct CandidateInfo {
MachineInstr *MI;
unsigned Def;
int FI;
CandidateInfo(MachineInstr *mi, unsigned def, int fi)
: MI(mi), Def(def), FI(fi) {}
};
/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
/// invariants out to the preheader.
void HoistRegionPostRA();
/// HoistPostRA - When an instruction is found to only use loop invariant
/// operands that is safe to hoist, this instruction is called to do the
/// dirty work.
void HoistPostRA(MachineInstr *MI, unsigned Def);
/// ProcessMI - Examine the instruction for potentai LICM candidate. Also
/// gather register def and frame object update information.
void ProcessMI(MachineInstr *MI,
BitVector &PhysRegDefs,
BitVector &PhysRegClobbers,
SmallSet<int, 32> &StoredFIs,
SmallVectorImpl<CandidateInfo> &Candidates);
/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the
/// current loop.
void AddToLiveIns(unsigned Reg);
/// IsLICMCandidate - Returns true if the instruction may be a suitable
/// candidate for LICM. e.g. If the instruction is a call, then it's
/// obviously not safe to hoist it.
bool IsLICMCandidate(MachineInstr &I);
/// IsLoopInvariantInst - Returns true if the instruction is loop
/// invariant. I.e., all virtual register operands are defined outside of
/// the loop, physical registers aren't accessed (explicitly or implicitly),
/// and the instruction is hoistable.
///
bool IsLoopInvariantInst(MachineInstr &I);
/// HasLoopPHIUse - Return true if the specified instruction is used by any
/// phi node in the current loop.
bool HasLoopPHIUse(const MachineInstr *MI) const;
/// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
/// and an use in the current loop, return true if the target considered
/// it 'high'.
bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
unsigned Reg) const;
bool IsCheapInstruction(MachineInstr &MI) const;
/// CanCauseHighRegPressure - Visit BBs from header to current BB,
/// check if hoisting an instruction of the given cost matrix can cause high
/// register pressure.
bool CanCauseHighRegPressure(const DenseMap<unsigned, int> &Cost,
bool Cheap);
/// UpdateBackTraceRegPressure - Traverse the back trace from header to
/// the current block and update their register pressures to reflect the
/// effect of hoisting MI from the current block to the preheader.
void UpdateBackTraceRegPressure(const MachineInstr *MI);
/// IsProfitableToHoist - Return true if it is potentially profitable to
/// hoist the given loop invariant.
bool IsProfitableToHoist(MachineInstr &MI);
/// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
/// If not then a load from this mbb may not be safe to hoist.
bool IsGuaranteedToExecute(MachineBasicBlock *BB);
void EnterScope(MachineBasicBlock *MBB);
void ExitScope(MachineBasicBlock *MBB);
/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to given
/// dominator tree node if its a leaf or all of its children are done. Walk
/// up the dominator tree to destroy ancestors which are now done.
void ExitScopeIfDone(MachineDomTreeNode *Node,
DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
/// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
/// blocks dominated by the specified header block, and that are in the
/// current loop) in depth first order w.r.t the DominatorTree. This allows
/// us to visit definitions before uses, allowing us to hoist a loop body in
/// one pass without iteration.
///
void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode);
void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
/// SinkIntoLoop - Sink instructions into loops if profitable. This
/// especially tries to prevent register spills caused by register pressure
/// if there is little to no overhead moving instructions into loops.
void SinkIntoLoop();
/// InitRegPressure - Find all virtual register references that are liveout
/// of the preheader to initialize the starting "register pressure". Note
/// this does not count live through (livein but not used) registers.
void InitRegPressure(MachineBasicBlock *BB);
/// calcRegisterCost - Calculate the additional register pressure that the
/// registers used in MI cause.
///
/// If 'ConsiderSeen' is true, updates 'RegSeen' and uses the information to
/// figure out which usages are live-ins.
/// FIXME: Figure out a way to consider 'RegSeen' from all code paths.
DenseMap<unsigned, int> calcRegisterCost(const MachineInstr *MI,
bool ConsiderSeen,
bool ConsiderUnseenAsDef);
/// UpdateRegPressure - Update estimate of register pressure after the
/// specified instruction.
void UpdateRegPressure(const MachineInstr *MI,
bool ConsiderUnseenAsDef = false);
/// ExtractHoistableLoad - Unfold a load from the given machineinstr if
/// the load itself could be hoisted. Return the unfolded and hoistable
/// load, or null if the load couldn't be unfolded or if it wouldn't
/// be hoistable.
MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
/// LookForDuplicate - Find an instruction amount PrevMIs that is a
/// duplicate of MI. Return this instruction if it's found.
const MachineInstr *LookForDuplicate(const MachineInstr *MI,
std::vector<const MachineInstr*> &PrevMIs);
/// EliminateCSE - Given a LICM'ed instruction, look for an instruction on
/// the preheader that compute the same value. If it's found, do a RAU on
/// with the definition of the existing instruction rather than hoisting
/// the instruction to the preheader.
bool EliminateCSE(MachineInstr *MI,
DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI);
/// MayCSE - Return true if the given instruction will be CSE'd if it's
/// hoisted out of the loop.
bool MayCSE(MachineInstr *MI);
/// Hoist - When an instruction is found to only use loop invariant operands
/// that is safe to hoist, this instruction is called to do the dirty work.
/// It returns true if the instruction is hoisted.
bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
/// InitCSEMap - Initialize the CSE map with instructions that are in the
/// current loop preheader that may become duplicates of instructions that
/// are hoisted out of the loop.
void InitCSEMap(MachineBasicBlock *BB);
/// getCurPreheader - Get the preheader for the current loop, splitting
/// a critical edge if needed.
MachineBasicBlock *getCurPreheader();
};
} // end anonymous namespace
char MachineLICM::ID = 0;
char &llvm::MachineLICMID = MachineLICM::ID;
INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
"Machine Loop Invariant Code Motion", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MachineLICM, "machinelicm",
"Machine Loop Invariant Code Motion", false, false)
/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
/// loop that has a unique predecessor.
static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
// Check whether this loop even has a unique predecessor.
if (!CurLoop->getLoopPredecessor())
return false;
// Ok, now check to see if any of its outer loops do.
for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
if (L->getLoopPredecessor())
return false;
// None of them did, so this is the outermost with a unique predecessor.
return true;
}
bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
Changed = FirstInLoop = false;
const TargetSubtargetInfo &ST = MF.getSubtarget();
TII = ST.getInstrInfo();
TLI = ST.getTargetLowering();
TRI = ST.getRegisterInfo();
MFI = MF.getFrameInfo();
MRI = &MF.getRegInfo();
SchedModel.init(ST.getSchedModel(), &ST, TII);
PreRegAlloc = MRI->isSSA();
if (PreRegAlloc)
DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
else
DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
DEBUG(dbgs() << MF.getName() << " ********\n");
if (PreRegAlloc) {
// Estimate register pressure during pre-regalloc pass.
unsigned NumRPS = TRI->getNumRegPressureSets();
RegPressure.resize(NumRPS);
std::fill(RegPressure.begin(), RegPressure.end(), 0);
RegLimit.resize(NumRPS);
for (unsigned i = 0, e = NumRPS; i != e; ++i)
RegLimit[i] = TRI->getRegPressureSetLimit(MF, i);
}
// Get our Loop information...
MLI = &getAnalysis<MachineLoopInfo>();
DT = &getAnalysis<MachineDominatorTree>();
AA = &getAnalysis<AliasAnalysis>();
SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
while (!Worklist.empty()) {
CurLoop = Worklist.pop_back_val();
CurPreheader = nullptr;
ExitBlocks.clear();
// If this is done before regalloc, only visit outer-most preheader-sporting
// loops.
if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
Worklist.append(CurLoop->begin(), CurLoop->end());
continue;
}
CurLoop->getExitBlocks(ExitBlocks);
if (!PreRegAlloc)
HoistRegionPostRA();
else {
// CSEMap is initialized for loop header when the first instruction is
// being hoisted.
MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
FirstInLoop = true;
HoistOutOfLoop(N);
CSEMap.clear();
if (SinkInstsToAvoidSpills)
SinkIntoLoop();
}
}
return Changed;
}
/// InstructionStoresToFI - Return true if instruction stores to the
/// specified frame.
static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
oe = MI->memoperands_end(); o != oe; ++o) {
if (!(*o)->isStore() || !(*o)->getPseudoValue())
continue;
if (const FixedStackPseudoSourceValue *Value =
dyn_cast<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) {
if (Value->getFrameIndex() == FI)
return true;
}
}
return false;
}
/// ProcessMI - Examine the instruction for potentai LICM candidate. Also
/// gather register def and frame object update information.
void MachineLICM::ProcessMI(MachineInstr *MI,
BitVector &PhysRegDefs,
BitVector &PhysRegClobbers,
SmallSet<int, 32> &StoredFIs,
SmallVectorImpl<CandidateInfo> &Candidates) {
bool RuledOut = false;
bool HasNonInvariantUse = false;
unsigned Def = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isFI()) {
// Remember if the instruction stores to the frame index.
int FI = MO.getIndex();
if (!StoredFIs.count(FI) &&
MFI->isSpillSlotObjectIndex(FI) &&
InstructionStoresToFI(MI, FI))
StoredFIs.insert(FI);
HasNonInvariantUse = true;
continue;
}
// We can't hoist an instruction defining a physreg that is clobbered in
// the loop.
if (MO.isRegMask()) {
PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
continue;
}
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
"Not expecting virtual register!");
if (!MO.isDef()) {
if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
// If it's using a non-loop-invariant register, then it's obviously not
// safe to hoist.
HasNonInvariantUse = true;
continue;
}
if (MO.isImplicit()) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
PhysRegClobbers.set(*AI);
if (!MO.isDead())
// Non-dead implicit def? This cannot be hoisted.
RuledOut = true;
// No need to check if a dead implicit def is also defined by
// another instruction.
continue;
}
// FIXME: For now, avoid instructions with multiple defs, unless
// it's a dead implicit def.
if (Def)
RuledOut = true;
else
Def = Reg;
// If we have already seen another instruction that defines the same
// register, then this is not safe. Two defs is indicated by setting a
// PhysRegClobbers bit.
for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS) {
if (PhysRegDefs.test(*AS))
PhysRegClobbers.set(*AS);
PhysRegDefs.set(*AS);
}
if (PhysRegClobbers.test(Reg))
// MI defined register is seen defined by another instruction in
// the loop, it cannot be a LICM candidate.
RuledOut = true;
}
// Only consider reloads for now and remats which do not have register
// operands. FIXME: Consider unfold load folding instructions.
if (Def && !RuledOut) {
int FI = INT_MIN;
if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
(TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
Candidates.push_back(CandidateInfo(MI, Def, FI));
}
}
/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
/// invariants out to the preheader.
void MachineLICM::HoistRegionPostRA() {
MachineBasicBlock *Preheader = getCurPreheader();
if (!Preheader)
return;
unsigned NumRegs = TRI->getNumRegs();
BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
SmallVector<CandidateInfo, 32> Candidates;
SmallSet<int, 32> StoredFIs;
// Walk the entire region, count number of defs for each register, and
// collect potential LICM candidates.
const std::vector<MachineBasicBlock *> &Blocks = CurLoop->getBlocks();
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *BB = Blocks[i];
// If the header of the loop containing this basic block is a landing pad,
// then don't try to hoist instructions out of this loop.
const MachineLoop *ML = MLI->getLoopFor(BB);
if (ML && ML->getHeader()->isLandingPad()) continue;
// Conservatively treat live-in's as an external def.
// FIXME: That means a reload that're reused in successor block(s) will not
// be LICM'ed.
for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
E = BB->livein_end(); I != E; ++I) {
unsigned Reg = *I;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
PhysRegDefs.set(*AI);
}
SpeculationState = SpeculateUnknown;
for (MachineBasicBlock::iterator
MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
MachineInstr *MI = &*MII;
ProcessMI(MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
}
}
// Gather the registers read / clobbered by the terminator.
BitVector TermRegs(NumRegs);
MachineBasicBlock::iterator TI = Preheader->getFirstTerminator();
if (TI != Preheader->end()) {
for (unsigned i = 0, e = TI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = TI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
TermRegs.set(*AI);
}
}
// Now evaluate whether the potential candidates qualify.
// 1. Check if the candidate defined register is defined by another
// instruction in the loop.
// 2. If the candidate is a load from stack slot (always true for now),
// check if the slot is stored anywhere in the loop.
// 3. Make sure candidate def should not clobber
// registers read by the terminator. Similarly its def should not be
// clobbered by the terminator.
for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
if (Candidates[i].FI != INT_MIN &&
StoredFIs.count(Candidates[i].FI))
continue;
unsigned Def = Candidates[i].Def;
if (!PhysRegClobbers.test(Def) && !TermRegs.test(Def)) {
bool Safe = true;
MachineInstr *MI = Candidates[i].MI;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
const MachineOperand &MO = MI->getOperand(j);
if (!MO.isReg() || MO.isDef() || !MO.getReg())
continue;
unsigned Reg = MO.getReg();
if (PhysRegDefs.test(Reg) ||
PhysRegClobbers.test(Reg)) {
// If it's using a non-loop-invariant register, then it's obviously
// not safe to hoist.
Safe = false;
break;
}
}
if (Safe)
HoistPostRA(MI, Candidates[i].Def);
}
}
}
/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
/// loop, and make sure it is not killed by any instructions in the loop.
void MachineLICM::AddToLiveIns(unsigned Reg) {
const std::vector<MachineBasicBlock *> &Blocks = CurLoop->getBlocks();
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *BB = Blocks[i];
if (!BB->isLiveIn(Reg))
BB->addLiveIn(Reg);
for (MachineBasicBlock::iterator
MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
MachineInstr *MI = &*MII;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
MO.setIsKill(false);
}
}
}
}
/// HoistPostRA - When an instruction is found to only use loop invariant
/// operands that is safe to hoist, this instruction is called to do the
/// dirty work.
void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
MachineBasicBlock *Preheader = getCurPreheader();
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
<< MI->getParent()->getNumber() << ": " << *MI);
// Splice the instruction to the preheader.
MachineBasicBlock *MBB = MI->getParent();
Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
// Add register to livein list to all the BBs in the current loop since a
// loop invariant must be kept live throughout the whole loop. This is
// important to ensure later passes do not scavenge the def register.
AddToLiveIns(Def);
++NumPostRAHoisted;
Changed = true;
}
// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
// If not then a load from this mbb may not be safe to hoist.
bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
if (SpeculationState != SpeculateUnknown)
return SpeculationState == SpeculateFalse;
if (BB != CurLoop->getHeader()) {
// Check loop exiting blocks.
SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i)
if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) {
SpeculationState = SpeculateTrue;
return false;
}
}
SpeculationState = SpeculateFalse;
return true;
}
void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
// Remember livein register pressure.
BackTrace.push_back(RegPressure);
}
void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
BackTrace.pop_back();
}
/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
/// dominator tree node if its a leaf or all of its children are done. Walk
/// up the dominator tree to destroy ancestors which are now done.
void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node,
DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
if (OpenChildren[Node])
return;
// Pop scope.
ExitScope(Node->getBlock());
// Now traverse upwards to pop ancestors whose offsprings are all done.
while (MachineDomTreeNode *Parent = ParentMap[Node]) {
unsigned Left = --OpenChildren[Parent];
if (Left != 0)
break;
ExitScope(Parent->getBlock());
Node = Parent;
}
}
/// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
/// blocks dominated by the specified header block, and that are in the
/// current loop) in depth first order w.r.t the DominatorTree. This allows
/// us to visit definitions before uses, allowing us to hoist a loop body in
/// one pass without iteration.
///
void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
MachineBasicBlock *Preheader = getCurPreheader();
if (!Preheader)
return;
SmallVector<MachineDomTreeNode*, 32> Scopes;
SmallVector<MachineDomTreeNode*, 8> WorkList;
DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
// Perform a DFS walk to determine the order of visit.
WorkList.push_back(HeaderN);
while (!WorkList.empty()) {
MachineDomTreeNode *Node = WorkList.pop_back_val();
assert(Node && "Null dominator tree node?");
MachineBasicBlock *BB = Node->getBlock();
// If the header of the loop containing this basic block is a landing pad,
// then don't try to hoist instructions out of this loop.
const MachineLoop *ML = MLI->getLoopFor(BB);
if (ML && ML->getHeader()->isLandingPad())
continue;
// If this subregion is not in the top level loop at all, exit.
if (!CurLoop->contains(BB))
continue;
Scopes.push_back(Node);
const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
unsigned NumChildren = Children.size();
// Don't hoist things out of a large switch statement. This often causes
// code to be hoisted that wasn't going to be executed, and increases
// register pressure in a situation where it's likely to matter.
if (BB->succ_size() >= 25)
NumChildren = 0;
OpenChildren[Node] = NumChildren;
// Add children in reverse order as then the next popped worklist node is
// the first child of this node. This means we ultimately traverse the
// DOM tree in exactly the same order as if we'd recursed.
for (int i = (int)NumChildren-1; i >= 0; --i) {
MachineDomTreeNode *Child = Children[i];
ParentMap[Child] = Node;
WorkList.push_back(Child);
}
}
if (Scopes.size() == 0)
return;
// Compute registers which are livein into the loop headers.
RegSeen.clear();
BackTrace.clear();
InitRegPressure(Preheader);
// Now perform LICM.
for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
MachineDomTreeNode *Node = Scopes[i];
MachineBasicBlock *MBB = Node->getBlock();
EnterScope(MBB);
// Process the block
SpeculationState = SpeculateUnknown;
for (MachineBasicBlock::iterator
MII = MBB->begin(), E = MBB->end(); MII != E; ) {
MachineBasicBlock::iterator NextMII = MII; ++NextMII;
MachineInstr *MI = &*MII;
if (!Hoist(MI, Preheader))
UpdateRegPressure(MI);
MII = NextMII;
}
// If it's a leaf node, it's done. Traverse upwards to pop ancestors.
ExitScopeIfDone(Node, OpenChildren, ParentMap);
}
}
void MachineLICM::SinkIntoLoop() {
MachineBasicBlock *Preheader = getCurPreheader();
if (!Preheader)
return;
SmallVector<MachineInstr *, 8> Candidates;
for (MachineBasicBlock::instr_iterator I = Preheader->instr_begin();
I != Preheader->instr_end(); ++I) {
// We need to ensure that we can safely move this instruction into the loop.
// As such, it must not have side-effects, e.g. such as a call has.
if (IsLoopInvariantInst(*I) && !HasLoopPHIUse(I))
Candidates.push_back(I);
}
for (MachineInstr *I : Candidates) {
const MachineOperand &MO = I->getOperand(0);
if (!MO.isDef() || !MO.isReg() || !MO.getReg())
continue;
if (!MRI->hasOneDef(MO.getReg()))
continue;
bool CanSink = true;
MachineBasicBlock *B = nullptr;
for (MachineInstr &MI : MRI->use_instructions(MO.getReg())) {
// FIXME: Come up with a proper cost model that estimates whether sinking
// the instruction (and thus possibly executing it on every loop
// iteration) is more expensive than a register.
// For now assumes that copies are cheap and thus almost always worth it.
if (!MI.isCopy()) {
CanSink = false;
break;
}
if (!B) {
B = MI.getParent();
continue;
}
B = DT->findNearestCommonDominator(B, MI.getParent());
if (!B) {
CanSink = false;
break;
}
}
if (!CanSink || !B || B == Preheader)
continue;
B->splice(B->getFirstNonPHI(), Preheader, I);
}
}
static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
}
/// InitRegPressure - Find all virtual register references that are liveout of
/// the preheader to initialize the starting "register pressure". Note this
/// does not count live through (livein but not used) registers.
void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
std::fill(RegPressure.begin(), RegPressure.end(), 0);
// If the preheader has only a single predecessor and it ends with a
// fallthrough or an unconditional branch, then scan its predecessor for live
// defs as well. This happens whenever the preheader is created by splitting
// the critical edge from the loop predecessor to the loop header.
if (BB->pred_size() == 1) {
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
InitRegPressure(*BB->pred_begin());
}
for (const MachineInstr &MI : *BB)
UpdateRegPressure(&MI, /*ConsiderUnseenAsDef=*/true);
}
/// UpdateRegPressure - Update estimate of register pressure after the
/// specified instruction.
void MachineLICM::UpdateRegPressure(const MachineInstr *MI,
bool ConsiderUnseenAsDef) {
auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/true, ConsiderUnseenAsDef);
for (const auto &RPIdAndCost : Cost) {
unsigned Class = RPIdAndCost.first;
if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second)
RegPressure[Class] = 0;
else
RegPressure[Class] += RPIdAndCost.second;
}
}
DenseMap<unsigned, int>
MachineLICM::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
bool ConsiderUnseenAsDef) {
DenseMap<unsigned, int> Cost;
if (MI->isImplicitDef())
return Cost;
for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.isImplicit())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
// FIXME: It seems bad to use RegSeen only for some of these calculations.
bool isNew = ConsiderSeen ? RegSeen.insert(Reg).second : false;
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
RegClassWeight W = TRI->getRegClassWeight(RC);
int RCCost = 0;
if (MO.isDef())
RCCost = W.RegWeight;
else {
bool isKill = isOperandKill(MO, MRI);
if (isNew && !isKill && ConsiderUnseenAsDef)
// Haven't seen this, it must be a livein.
RCCost = W.RegWeight;
else if (!isNew && isKill)
RCCost = -W.RegWeight;
}
if (RCCost == 0)
continue;
const int *PS = TRI->getRegClassPressureSets(RC);
for (; *PS != -1; ++PS) {
if (Cost.find(*PS) == Cost.end())
Cost[*PS] = RCCost;
else
Cost[*PS] += RCCost;
}
}
return Cost;
}
/// isLoadFromGOTOrConstantPool - Return true if this machine instruction
/// loads from global offset table or constant pool.
static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) {
assert (MI.mayLoad() && "Expected MI that loads!");
for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
E = MI.memoperands_end(); I != E; ++I) {
if (const PseudoSourceValue *PSV = (*I)->getPseudoValue()) {
if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
return true;
}
}
return false;
}
/// IsLICMCandidate - Returns true if the instruction may be a suitable
/// candidate for LICM. e.g. If the instruction is a call, then it's obviously
/// not safe to hoist it.
bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
// Check if it's safe to move the instruction.
bool DontMoveAcrossStore = true;
if (!I.isSafeToMove(AA, DontMoveAcrossStore))
return false;
// If it is load then check if it is guaranteed to execute by making sure that
// it dominates all exiting blocks. If it doesn't, then there is a path out of
// the loop which does not execute this load, so we can't hoist it. Loads
// from constant memory are not safe to speculate all the time, for example
// indexed load from a jump table.
// Stores and side effects are already checked by isSafeToMove.
if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) &&
!IsGuaranteedToExecute(I.getParent()))
return false;
return true;
}
/// IsLoopInvariantInst - Returns true if the instruction is loop
/// invariant. I.e., all virtual register operands are defined outside of the
/// loop, physical registers aren't accessed explicitly, and there are no side
/// effects that aren't captured by the operands or other flags.
///
bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
if (!IsLICMCandidate(I))
return false;
// The instruction is loop invariant if all of its operands are.
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = I.getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
// Don't hoist an instruction that uses or defines a physical register.
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
if (!MRI->isConstantPhysReg(Reg, *I.getParent()->getParent()))
return false;
// Otherwise it's safe to move.
continue;
} else if (!MO.isDead()) {
// A def that isn't dead. We can't move it.
return false;
} else if (CurLoop->getHeader()->isLiveIn(Reg)) {
// If the reg is live into the loop, we can't hoist an instruction
// which would clobber it.
return false;
}
}
if (!MO.isUse())
continue;
assert(MRI->getVRegDef(Reg) &&
"Machine instr not mapped for this vreg?!");
// If the loop contains the definition of an operand, then the instruction
// isn't loop invariant.
if (CurLoop->contains(MRI->getVRegDef(Reg)))
return false;
}
// If we got this far, the instruction is loop invariant!
return true;
}
/// HasLoopPHIUse - Return true if the specified instruction is used by a
/// phi node and hoisting it could cause a copy to be inserted.
bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
SmallVector<const MachineInstr*, 8> Work(1, MI);
do {
MI = Work.pop_back_val();
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
// A PHI may cause a copy to be inserted.
if (UseMI.isPHI()) {
// A PHI inside the loop causes a copy because the live range of Reg is
// extended across the PHI.
if (CurLoop->contains(&UseMI))
return true;
// A PHI in an exit block can cause a copy to be inserted if the PHI
// has multiple predecessors in the loop with different values.
// For now, approximate by rejecting all exit blocks.
if (isExitBlock(UseMI.getParent()))
return true;
continue;
}
// Look past copies as well.
if (UseMI.isCopy() && CurLoop->contains(&UseMI))
Work.push_back(&UseMI);
}
}
} while (!Work.empty());
return false;
}
/// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
/// and an use in the current loop, return true if the target considered
/// it 'high'.
bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
unsigned DefIdx, unsigned Reg) const {
if (MRI->use_nodbg_empty(Reg))
return false;
for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
if (UseMI.isCopyLike())
continue;
if (!CurLoop->contains(UseMI.getParent()))
continue;
for (unsigned i = 0, e = UseMI.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = UseMI.getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned MOReg = MO.getReg();
if (MOReg != Reg)
continue;
if (TII->hasHighOperandLatency(SchedModel, MRI, &MI, DefIdx, &UseMI, i))
return true;
}
// Only look at the first in loop use.
break;
}
return false;
}
/// IsCheapInstruction - Return true if the instruction is marked "cheap" or
/// the operand latency between its def and a use is one or less.
bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
if (TII->isAsCheapAsAMove(&MI) || MI.isCopyLike())
return true;
bool isCheap = false;
unsigned NumDefs = MI.getDesc().getNumDefs();
for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
MachineOperand &DefMO = MI.getOperand(i);
if (!DefMO.isReg() || !DefMO.isDef())
continue;
--NumDefs;
unsigned Reg = DefMO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (!TII->hasLowDefLatency(SchedModel, &MI, i))
return false;
isCheap = true;
}
return isCheap;
}
/// CanCauseHighRegPressure - Visit BBs from header to current BB, check
/// if hoisting an instruction of the given cost matrix can cause high
/// register pressure.
bool MachineLICM::CanCauseHighRegPressure(const DenseMap<unsigned, int>& Cost,
bool CheapInstr) {
for (const auto &RPIdAndCost : Cost) {
if (RPIdAndCost.second <= 0)
continue;
unsigned Class = RPIdAndCost.first;
int Limit = RegLimit[Class];
// Don't hoist cheap instructions if they would increase register pressure,
// even if we're under the limit.
if (CheapInstr && !HoistCheapInsts)
return true;
for (const auto &RP : BackTrace)
if (static_cast<int>(RP[Class]) + RPIdAndCost.second >= Limit)
return true;
}
return false;
}
/// UpdateBackTraceRegPressure - Traverse the back trace from header to the
/// current block and update their register pressures to reflect the effect
/// of hoisting MI from the current block to the preheader.
void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
// First compute the 'cost' of the instruction, i.e. its contribution
// to register pressure.
auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/false,
/*ConsiderUnseenAsDef=*/false);
// Update register pressure of blocks from loop header to current block.
for (auto &RP : BackTrace)
for (const auto &RPIdAndCost : Cost)
RP[RPIdAndCost.first] += RPIdAndCost.second;
}
/// IsProfitableToHoist - Return true if it is potentially profitable to hoist
/// the given loop invariant.
bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
if (MI.isImplicitDef())
return true;
// Besides removing computation from the loop, hoisting an instruction has
// these effects:
//
// - The value defined by the instruction becomes live across the entire
// loop. This increases register pressure in the loop.
//
// - If the value is used by a PHI in the loop, a copy will be required for
// lowering the PHI after extending the live range.
//
// - When hoisting the last use of a value in the loop, that value no longer
// needs to be live in the loop. This lowers register pressure in the loop.
bool CheapInstr = IsCheapInstruction(MI);
bool CreatesCopy = HasLoopPHIUse(&MI);
// Don't hoist a cheap instruction if it would create a copy in the loop.
if (CheapInstr && CreatesCopy) {
DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
return false;
}
// Rematerializable instructions should always be hoisted since the register
// allocator can just pull them down again when needed.
if (TII->isTriviallyReMaterializable(&MI, AA))
return true;
// FIXME: If there are long latency loop-invariant instructions inside the
// loop at this point, why didn't the optimizer's LICM hoist them?
for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.isImplicit())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
DEBUG(dbgs() << "Hoist High Latency: " << MI);
++NumHighLatency;
return true;
}
}
// Estimate register pressure to determine whether to LICM the instruction.
// In low register pressure situation, we can be more aggressive about
// hoisting. Also, favors hoisting long latency instructions even in
// moderately high pressure situation.
// Cheap instructions will only be hoisted if they don't increase register
// pressure at all.
auto Cost = calcRegisterCost(&MI, /*ConsiderSeen=*/false,
/*ConsiderUnseenAsDef=*/false);
// Visit BBs from header to current BB, if hoisting this doesn't cause
// high register pressure, then it's safe to proceed.
if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
++NumLowRP;
return true;
}
// Don't risk increasing register pressure if it would create copies.
if (CreatesCopy) {
DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
return false;
}
// Do not "speculate" in high register pressure situation. If an
// instruction is not guaranteed to be executed in the loop, it's best to be
// conservative.
if (AvoidSpeculation &&
(!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) {
DEBUG(dbgs() << "Won't speculate: " << MI);
return false;
}
// High register pressure situation, only hoist if the instruction is going
// to be remat'ed.
if (!TII->isTriviallyReMaterializable(&MI, AA) &&
!MI.isInvariantLoad(AA)) {
DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
return false;
}
return true;
}
MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
// Don't unfold simple loads.
if (MI->canFoldAsLoad())
return nullptr;
// If not, we may be able to unfold a load and hoist that.
// First test whether the instruction is loading from an amenable
// memory location.
if (!MI->isInvariantLoad(AA))
return nullptr;
// Next determine the register class for a temporary register.
unsigned LoadRegIndex;
unsigned NewOpc =
TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
/*UnfoldLoad=*/true,
/*UnfoldStore=*/false,
&LoadRegIndex);
if (NewOpc == 0) return nullptr;
const MCInstrDesc &MID = TII->get(NewOpc);
if (MID.getNumDefs() != 1) return nullptr;
MachineFunction &MF = *MI->getParent()->getParent();
const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
// Ok, we're unfolding. Create a temporary register and do the unfold.
unsigned Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
bool Success =
TII->unfoldMemoryOperand(MF, MI, Reg,
/*UnfoldLoad=*/true, /*UnfoldStore=*/false,
NewMIs);
(void)Success;
assert(Success &&
"unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
"succeeded!");
assert(NewMIs.size() == 2 &&
"Unfolded a load into multiple instructions!");
MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::iterator Pos = MI;
MBB->insert(Pos, NewMIs[0]);
MBB->insert(Pos, NewMIs[1]);
// If unfolding produced a load that wasn't loop-invariant or profitable to
// hoist, discard the new instructions and bail.
if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
NewMIs[0]->eraseFromParent();
NewMIs[1]->eraseFromParent();
return nullptr;
}
// Update register pressure for the unfolded instruction.
UpdateRegPressure(NewMIs[1]);
// Otherwise we successfully unfolded a load that we can hoist.
MI->eraseFromParent();
return NewMIs[0];
}
void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) {
const MachineInstr *MI = &*I;
unsigned Opcode = MI->getOpcode();
CSEMap[Opcode].push_back(MI);
}
}
const MachineInstr*
MachineLICM::LookForDuplicate(const MachineInstr *MI,
std::vector<const MachineInstr*> &PrevMIs) {
for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
const MachineInstr *PrevMI = PrevMIs[i];
if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : nullptr)))
return PrevMI;
}
return nullptr;
}
bool MachineLICM::EliminateCSE(MachineInstr *MI,
DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
// Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
// the undef property onto uses.
if (CI == CSEMap.end() || MI->isImplicitDef())
return false;
if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
// Replace virtual registers defined by MI by their counterparts defined
// by Dup.
SmallVector<unsigned, 2> Defs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
// Physical registers may not differ here.
assert((!MO.isReg() || MO.getReg() == 0 ||
!TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
MO.getReg() == Dup->getOperand(i).getReg()) &&
"Instructions with different phys regs are not identical!");
if (MO.isReg() && MO.isDef() &&
!TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
Defs.push_back(i);
}
SmallVector<const TargetRegisterClass*, 2> OrigRCs;
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
unsigned Idx = Defs[i];
unsigned Reg = MI->getOperand(Idx).getReg();
unsigned DupReg = Dup->getOperand(Idx).getReg();
OrigRCs.push_back(MRI->getRegClass(DupReg));
if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
// Restore old RCs if more than one defs.
for (unsigned j = 0; j != i; ++j)
MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
return false;
}
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
unsigned Idx = Defs[i];
unsigned Reg = MI->getOperand(Idx).getReg();
unsigned DupReg = Dup->getOperand(Idx).getReg();
MRI->replaceRegWith(Reg, DupReg);
MRI->clearKillFlags(DupReg);
}
MI->eraseFromParent();
++NumCSEed;
return true;
}
return false;
}
/// MayCSE - Return true if the given instruction will be CSE'd if it's
/// hoisted out of the loop.
bool MachineLICM::MayCSE(MachineInstr *MI) {
unsigned Opcode = MI->getOpcode();
DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
CI = CSEMap.find(Opcode);
// Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
// the undef property onto uses.
if (CI == CSEMap.end() || MI->isImplicitDef())
return false;
return LookForDuplicate(MI, CI->second) != nullptr;
}
/// Hoist - When an instruction is found to use only loop invariant operands
/// that are safe to hoist, this instruction is called to do the dirty work.
///
bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
// First check whether we should hoist this instruction.
if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
// If not, try unfolding a hoistable load.
MI = ExtractHoistableLoad(MI);
if (!MI) return false;
}
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
DEBUG({
dbgs() << "Hoisting " << *MI;
if (Preheader->getBasicBlock())
dbgs() << " to MachineBasicBlock "
<< Preheader->getName();
if (MI->getParent()->getBasicBlock())
dbgs() << " from MachineBasicBlock "
<< MI->getParent()->getName();
dbgs() << "\n";
});
// If this is the first instruction being hoisted to the preheader,
// initialize the CSE map with potential common expressions.
if (FirstInLoop) {
InitCSEMap(Preheader);
FirstInLoop = false;
}
// Look for opportunity to CSE the hoisted instruction.
unsigned Opcode = MI->getOpcode();
DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
CI = CSEMap.find(Opcode);
if (!EliminateCSE(MI, CI)) {
// Otherwise, splice the instruction to the preheader.
Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
// Update register pressure for BBs from header to this block.
UpdateBackTraceRegPressure(MI);
// Clear the kill flags of any register this instruction defines,
// since they may need to be live throughout the entire loop
// rather than just live for part of it.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef() && !MO.isDead())
MRI->clearKillFlags(MO.getReg());
}
// Add to the CSE map.
if (CI != CSEMap.end())
CI->second.push_back(MI);
else
CSEMap[Opcode].push_back(MI);
}
++NumHoisted;
Changed = true;
return true;
}
MachineBasicBlock *MachineLICM::getCurPreheader() {
// Determine the block to which to hoist instructions. If we can't find a
// suitable loop predecessor, we can't do any hoisting.
// If we've tried to get a preheader and failed, don't try again.
if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
return nullptr;
if (!CurPreheader) {
CurPreheader = CurLoop->getLoopPreheader();
if (!CurPreheader) {
MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
if (!Pred) {
CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
return nullptr;
}
CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
if (!CurPreheader) {
CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
return nullptr;
}
}
}
return CurPreheader;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/Passes.cpp | //===-- Passes.cpp - Target independent code generation passes ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines interfaces to access the target independent code
// generation passes provided by the LLVM backend.
//
//===---------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
using namespace llvm;
static cl::opt<bool> DisablePostRA("disable-post-ra", cl::Hidden,
cl::desc("Disable Post Regalloc"));
static cl::opt<bool> DisableBranchFold("disable-branch-fold", cl::Hidden,
cl::desc("Disable branch folding"));
static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden,
cl::desc("Disable tail duplication"));
static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden,
cl::desc("Disable pre-register allocation tail duplication"));
static cl::opt<bool> DisableBlockPlacement("disable-block-placement",
cl::Hidden, cl::desc("Disable probability-driven block placement"));
static cl::opt<bool> EnableBlockPlacementStats("enable-block-placement-stats",
cl::Hidden, cl::desc("Collect probability-driven block placement stats"));
static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
cl::desc("Disable Stack Slot Coloring"));
static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden,
cl::desc("Disable Machine Dead Code Elimination"));
static cl::opt<bool> DisableEarlyIfConversion("disable-early-ifcvt", cl::Hidden,
cl::desc("Disable Early If-conversion"));
static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
cl::desc("Disable Machine LICM"));
static cl::opt<bool> DisableMachineCSE("disable-machine-cse", cl::Hidden,
cl::desc("Disable Machine Common Subexpression Elimination"));
static cl::opt<cl::boolOrDefault>
EnableShrinkWrapOpt("enable-shrink-wrap", cl::Hidden,
cl::desc("enable the shrink-wrapping pass"));
static cl::opt<cl::boolOrDefault> OptimizeRegAlloc(
"optimize-regalloc", cl::Hidden,
cl::desc("Enable optimized register allocation compilation path."));
static cl::opt<bool> DisablePostRAMachineLICM("disable-postra-machine-licm",
cl::Hidden,
cl::desc("Disable Machine LICM"));
static cl::opt<bool> DisableMachineSink("disable-machine-sink", cl::Hidden,
cl::desc("Disable Machine Sinking"));
static cl::opt<bool> DisableLSR("disable-lsr", cl::Hidden,
cl::desc("Disable Loop Strength Reduction Pass"));
static cl::opt<bool> DisableConstantHoisting("disable-constant-hoisting",
cl::Hidden, cl::desc("Disable ConstantHoisting"));
static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
cl::desc("Disable Codegen Prepare"));
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
cl::desc("Disable Copy Propagation pass"));
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
static cl::opt<bool> EnableImplicitNullChecks(
"enable-implicit-null-checks",
cl::desc("Fold null checks into faulting memory operations"),
cl::init(false));
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
cl::desc("Print LLVM IR input to isel pass"));
static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
cl::desc("Dump garbage collector data"));
static cl::opt<bool> VerifyMachineCode("verify-machineinstrs", cl::Hidden,
cl::desc("Verify generated machine code"),
cl::init(false),
cl::ZeroOrMore);
static cl::opt<std::string>
PrintMachineInstrs("print-machineinstrs", cl::ValueOptional,
cl::desc("Print machine instrs"),
cl::value_desc("pass-name"), cl::init("option-unspecified"));
// Temporary option to allow experimenting with MachineScheduler as a post-RA
// scheduler. Targets can "properly" enable this with
// substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); Ideally it
// wouldn't be part of the standard pass pipeline, and the target would just add
// a PostRA scheduling pass wherever it wants.
static cl::opt<bool> MISchedPostRA("misched-postra", cl::Hidden,
cl::desc("Run MachineScheduler post regalloc (independent of preRA sched)"));
// Experimental option to run live interval analysis early.
static cl::opt<bool> EarlyLiveIntervals("early-live-intervals", cl::Hidden,
cl::desc("Run live interval analysis earlier in the pipeline"));
static cl::opt<bool> UseCFLAA("use-cfl-aa-in-codegen",
cl::init(false), cl::Hidden,
cl::desc("Enable the new, experimental CFL alias analysis in CodeGen"));
/// Allow standard passes to be disabled by command line options. This supports
/// simple binary flags that either suppress the pass or do nothing.
/// i.e. -disable-mypass=false has no effect.
/// These should be converted to boolOrDefault in order to use applyOverride.
static IdentifyingPassPtr applyDisable(IdentifyingPassPtr PassID,
bool Override) {
if (Override)
return IdentifyingPassPtr();
return PassID;
}
/// Allow standard passes to be disabled by the command line, regardless of who
/// is adding the pass.
///
/// StandardID is the pass identified in the standard pass pipeline and provided
/// to addPass(). It may be a target-specific ID in the case that the target
/// directly adds its own pass, but in that case we harmlessly fall through.
///
/// TargetID is the pass that the target has configured to override StandardID.
///
/// StandardID may be a pseudo ID. In that case TargetID is the name of the real
/// pass to run. This allows multiple options to control a single pass depending
/// on where in the pipeline that pass is added.
static IdentifyingPassPtr overridePass(AnalysisID StandardID,
IdentifyingPassPtr TargetID) {
if (StandardID == &PostRASchedulerID)
return applyDisable(TargetID, DisablePostRA);
if (StandardID == &BranchFolderPassID)
return applyDisable(TargetID, DisableBranchFold);
if (StandardID == &TailDuplicateID)
return applyDisable(TargetID, DisableTailDuplicate);
if (StandardID == &TargetPassConfig::EarlyTailDuplicateID)
return applyDisable(TargetID, DisableEarlyTailDup);
if (StandardID == &MachineBlockPlacementID)
return applyDisable(TargetID, DisableBlockPlacement);
if (StandardID == &StackSlotColoringID)
return applyDisable(TargetID, DisableSSC);
if (StandardID == &DeadMachineInstructionElimID)
return applyDisable(TargetID, DisableMachineDCE);
if (StandardID == &EarlyIfConverterID)
return applyDisable(TargetID, DisableEarlyIfConversion);
if (StandardID == &MachineLICMID)
return applyDisable(TargetID, DisableMachineLICM);
if (StandardID == &MachineCSEID)
return applyDisable(TargetID, DisableMachineCSE);
if (StandardID == &TargetPassConfig::PostRAMachineLICMID)
return applyDisable(TargetID, DisablePostRAMachineLICM);
if (StandardID == &MachineSinkingID)
return applyDisable(TargetID, DisableMachineSink);
if (StandardID == &MachineCopyPropagationID)
return applyDisable(TargetID, DisableCopyProp);
return TargetID;
}
//===---------------------------------------------------------------------===//
/// TargetPassConfig
//===---------------------------------------------------------------------===//
INITIALIZE_PASS(TargetPassConfig, "targetpassconfig",
"Target Pass Configuration", false, false)
char TargetPassConfig::ID = 0;
// Pseudo Pass IDs.
char TargetPassConfig::EarlyTailDuplicateID = 0;
char TargetPassConfig::PostRAMachineLICMID = 0;
namespace llvm {
class PassConfigImpl {
public:
// List of passes explicitly substituted by this target. Normally this is
// empty, but it is a convenient way to suppress or replace specific passes
// that are part of a standard pass pipeline without overridding the entire
// pipeline. This mechanism allows target options to inherit a standard pass's
// user interface. For example, a target may disable a standard pass by
// default by substituting a pass ID of zero, and the user may still enable
// that standard pass with an explicit command line option.
DenseMap<AnalysisID,IdentifyingPassPtr> TargetPasses;
/// Store the pairs of <AnalysisID, AnalysisID> of which the second pass
/// is inserted after each instance of the first one.
SmallVector<std::pair<AnalysisID, IdentifyingPassPtr>, 4> InsertedPasses;
};
} // namespace llvm
// Out of line virtual method.
TargetPassConfig::~TargetPassConfig() {
delete Impl;
}
// Out of line constructor provides default values for pass options and
// registers all common codegen passes.
TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
: ImmutablePass(ID), PM(&pm), StartBefore(nullptr), StartAfter(nullptr),
StopAfter(nullptr), Started(true), Stopped(false),
AddingMachinePasses(false), TM(tm), Impl(nullptr), Initialized(false),
DisableVerify(false), EnableTailMerge(true), EnableShrinkWrap(false) {
Impl = new PassConfigImpl();
// Register all target independent codegen passes to activate their PassIDs,
// including this pass itself.
initializeCodeGen(*PassRegistry::getPassRegistry());
// Substitute Pseudo Pass IDs for real ones.
substitutePass(&EarlyTailDuplicateID, &TailDuplicateID);
substitutePass(&PostRAMachineLICMID, &MachineLICMID);
}
/// Insert InsertedPassID pass after TargetPassID.
void TargetPassConfig::insertPass(AnalysisID TargetPassID,
IdentifyingPassPtr InsertedPassID) {
assert(((!InsertedPassID.isInstance() &&
TargetPassID != InsertedPassID.getID()) ||
(InsertedPassID.isInstance() &&
TargetPassID != InsertedPassID.getInstance()->getPassID())) &&
"Insert a pass after itself!");
std::pair<AnalysisID, IdentifyingPassPtr> P(TargetPassID, InsertedPassID);
Impl->InsertedPasses.push_back(P);
}
/// createPassConfig - Create a pass configuration object to be used by
/// addPassToEmitX methods for generating a pipeline of CodeGen passes.
///
/// Targets may override this to extend TargetPassConfig.
TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) {
return new TargetPassConfig(this, PM);
}
TargetPassConfig::TargetPassConfig()
: ImmutablePass(ID), PM(nullptr) {
llvm_unreachable("TargetPassConfig should not be constructed on-the-fly");
}
// Helper to verify the analysis is really immutable.
void TargetPassConfig::setOpt(bool &Opt, bool Val) {
assert(!Initialized && "PassConfig is immutable");
Opt = Val;
}
void TargetPassConfig::substitutePass(AnalysisID StandardID,
IdentifyingPassPtr TargetID) {
Impl->TargetPasses[StandardID] = TargetID;
}
IdentifyingPassPtr TargetPassConfig::getPassSubstitution(AnalysisID ID) const {
DenseMap<AnalysisID, IdentifyingPassPtr>::const_iterator
I = Impl->TargetPasses.find(ID);
if (I == Impl->TargetPasses.end())
return ID;
return I->second;
}
/// Add a pass to the PassManager if that pass is supposed to be run. If the
/// Started/Stopped flags indicate either that the compilation should start at
/// a later pass or that it should stop after an earlier pass, then do not add
/// the pass. Finally, compare the current pass against the StartAfter
/// and StopAfter options and change the Started/Stopped flags accordingly.
void TargetPassConfig::addPass(Pass *P, bool verifyAfter, bool printAfter) {
assert(!Initialized && "PassConfig is immutable");
// Cache the Pass ID here in case the pass manager finds this pass is
// redundant with ones already scheduled / available, and deletes it.
// Fundamentally, once we add the pass to the manager, we no longer own it
// and shouldn't reference it.
AnalysisID PassID = P->getPassID();
if (StartBefore == PassID)
Started = true;
if (Started && !Stopped) {
std::string Banner;
// Construct banner message before PM->add() as that may delete the pass.
if (AddingMachinePasses && (printAfter || verifyAfter))
Banner = std::string("After ") + std::string(P->getPassName());
PM->add(P);
if (AddingMachinePasses) {
if (printAfter)
addPrintPass(Banner);
if (verifyAfter)
addVerifyPass(Banner);
}
// Add the passes after the pass P if there is any.
for (SmallVectorImpl<std::pair<AnalysisID, IdentifyingPassPtr> >::iterator
I = Impl->InsertedPasses.begin(),
E = Impl->InsertedPasses.end();
I != E; ++I) {
if ((*I).first == PassID) {
assert((*I).second.isValid() && "Illegal Pass ID!");
Pass *NP;
if ((*I).second.isInstance())
NP = (*I).second.getInstance();
else {
NP = Pass::createPass((*I).second.getID());
assert(NP && "Pass ID not registered");
}
addPass(NP, false, false);
}
}
} else {
delete P;
}
if (StopAfter == PassID)
Stopped = true;
if (StartAfter == PassID)
Started = true;
if (Stopped && !Started)
report_fatal_error("Cannot stop compilation after pass that is not run");
}
/// Add a CodeGen pass at this point in the pipeline after checking for target
/// and command line overrides.
///
/// addPass cannot return a pointer to the pass instance because is internal the
/// PassManager and the instance we create here may already be freed.
AnalysisID TargetPassConfig::addPass(AnalysisID PassID, bool verifyAfter,
bool printAfter) {
IdentifyingPassPtr TargetID = getPassSubstitution(PassID);
IdentifyingPassPtr FinalPtr = overridePass(PassID, TargetID);
if (!FinalPtr.isValid())
return nullptr;
Pass *P;
if (FinalPtr.isInstance())
P = FinalPtr.getInstance();
else {
P = Pass::createPass(FinalPtr.getID());
if (!P)
llvm_unreachable("Pass ID not registered");
}
AnalysisID FinalID = P->getPassID();
addPass(P, verifyAfter, printAfter); // Ends the lifetime of P.
return FinalID;
}
void TargetPassConfig::printAndVerify(const std::string &Banner) {
addPrintPass(Banner);
addVerifyPass(Banner);
}
void TargetPassConfig::addPrintPass(const std::string &Banner) {
if (TM->shouldPrintMachineCode())
PM->add(createMachineFunctionPrinterPass(dbgs(), Banner));
}
void TargetPassConfig::addVerifyPass(const std::string &Banner) {
if (VerifyMachineCode)
PM->add(createMachineVerifierPass(Banner));
}
/// Add common target configurable passes that perform LLVM IR to IR transforms
/// following machine independent optimization.
void TargetPassConfig::addIRPasses() {
// Basic AliasAnalysis support.
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
// BasicAliasAnalysis wins if they disagree. This is intended to help
// support "obvious" type-punning idioms.
if (UseCFLAA)
addPass(createCFLAliasAnalysisPass());
addPass(createTypeBasedAliasAnalysisPass());
addPass(createScopedNoAliasAAPass());
addPass(createBasicAliasAnalysisPass());
// Before running any passes, run the verifier to determine if the input
// coming from the front-end and/or optimizer is valid.
if (!DisableVerify)
addPass(createVerifierPass());
// Run loop strength reduction before anything else.
if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
addPass(createLoopStrengthReducePass());
if (PrintLSR)
addPass(createPrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
}
// Run GC lowering passes for builtin collectors
// TODO: add a pass insertion point here
addPass(createGCLoweringPass());
addPass(createShadowStackGCLoweringPass());
// Make sure that no unreachable blocks are instruction selected.
addPass(createUnreachableBlockEliminationPass());
// Prepare expensive constants for SelectionDAG.
if (getOptLevel() != CodeGenOpt::None && !DisableConstantHoisting)
addPass(createConstantHoistingPass());
if (getOptLevel() != CodeGenOpt::None && !DisablePartialLibcallInlining)
addPass(createPartiallyInlineLibCallsPass());
}
/// Turn exception handling constructs into something the code generators can
/// handle.
void TargetPassConfig::addPassesToHandleExceptions() {
switch (TM->getMCAsmInfo()->getExceptionHandlingType()) {
case ExceptionHandling::SjLj:
// SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
// Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
// catch info can get misplaced when a selector ends up more than one block
// removed from the parent invoke(s). This could happen when a landing
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
addPass(createSjLjEHPreparePass());
// FALLTHROUGH
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
addPass(createDwarfEHPass(TM));
break;
case ExceptionHandling::WinEH:
// We support using both GCC-style and MSVC-style exceptions on Windows, so
// add both preparation passes. Each pass will only actually run if it
// recognizes the personality function.
addPass(createWinEHPass(TM));
addPass(createDwarfEHPass(TM));
break;
case ExceptionHandling::None:
addPass(createLowerInvokePass());
// The lower invoke pass may create unreachable code. Remove it.
addPass(createUnreachableBlockEliminationPass());
break;
}
}
/// Add pass to prepare the LLVM IR for code generation. This should be done
/// before exception handling preparation passes.
void TargetPassConfig::addCodeGenPrepare() {
if (getOptLevel() != CodeGenOpt::None && !DisableCGP)
addPass(createCodeGenPreparePass(TM));
addPass(createRewriteSymbolsPass());
}
/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
void TargetPassConfig::addISelPrepare() {
addPreISel();
// Add both the safe stack and the stack protection passes: each of them will
// only protect functions that have corresponding attributes.
addPass(createSafeStackPass());
addPass(createStackProtectorPass(TM));
if (PrintISelInput)
addPass(createPrintFunctionPass(
dbgs(), "\n\n*** Final LLVM Code input to ISel ***\n"));
// All passes which modify the LLVM IR are now complete; run the verifier
// to ensure that the IR is valid.
if (!DisableVerify)
addPass(createVerifierPass());
}
/// Add the complete set of target-independent postISel code generator passes.
///
/// This can be read as the standard order of major LLVM CodeGen stages. Stages
/// with nontrivial configuration or multiple passes are broken out below in
/// add%Stage routines.
///
/// Any TargetPassConfig::addXX routine may be overriden by the Target. The
/// addPre/Post methods with empty header implementations allow injecting
/// target-specific fixups just before or after major stages. Additionally,
/// targets have the flexibility to change pass order within a stage by
/// overriding default implementation of add%Stage routines below. Each
/// technique has maintainability tradeoffs because alternate pass orders are
/// not well supported. addPre/Post works better if the target pass is easily
/// tied to a common pass. But if it has subtle dependencies on multiple passes,
/// the target should override the stage instead.
///
/// TODO: We could use a single addPre/Post(ID) hook to allow pass injection
/// before/after any target-independent pass. But it's currently overkill.
void TargetPassConfig::addMachinePasses() {
AddingMachinePasses = true;
// Insert a machine instr printer pass after the specified pass.
// If -print-machineinstrs specified, print machineinstrs after all passes.
if (StringRef(PrintMachineInstrs.getValue()).equals(""))
TM->Options.PrintMachineCode = true;
else if (!StringRef(PrintMachineInstrs.getValue())
.equals("option-unspecified")) {
const PassRegistry *PR = PassRegistry::getPassRegistry();
const PassInfo *TPI = PR->getPassInfo(PrintMachineInstrs.getValue());
const PassInfo *IPI = PR->getPassInfo(StringRef("machineinstr-printer"));
assert (TPI && IPI && "Pass ID not registered!");
const char *TID = (const char *)(TPI->getTypeInfo());
const char *IID = (const char *)(IPI->getTypeInfo());
insertPass(TID, IID);
}
// Print the instruction selected machine code...
printAndVerify("After Instruction Selection");
// Expand pseudo-instructions emitted by ISel.
addPass(&ExpandISelPseudosID);
// Add passes that optimize machine instructions in SSA form.
if (getOptLevel() != CodeGenOpt::None) {
addMachineSSAOptimization();
} else {
// If the target requests it, assign local variables to stack slots relative
// to one another and simplify frame index references where possible.
addPass(&LocalStackSlotAllocationID, false);
}
// Run pre-ra passes.
addPreRegAlloc();
// Run register allocation and passes that are tightly coupled with it,
// including phi elimination and scheduling.
if (getOptimizeRegAlloc())
addOptimizedRegAlloc(createRegAllocPass(true));
else
addFastRegAlloc(createRegAllocPass(false));
// Run post-ra passes.
addPostRegAlloc();
// Insert prolog/epilog code. Eliminate abstract frame index references...
if (getEnableShrinkWrap())
addPass(&ShrinkWrapID);
addPass(&PrologEpilogCodeInserterID);
/// Add passes that optimize machine instructions after register allocation.
if (getOptLevel() != CodeGenOpt::None)
addMachineLateOptimization();
// Expand pseudo instructions before second scheduling pass.
addPass(&ExpandPostRAPseudosID);
// Run pre-sched2 passes.
addPreSched2();
if (EnableImplicitNullChecks)
addPass(&ImplicitNullChecksID);
// Second pass scheduler.
if (getOptLevel() != CodeGenOpt::None) {
if (MISchedPostRA)
addPass(&PostMachineSchedulerID);
else
addPass(&PostRASchedulerID);
}
// GC
if (addGCPasses()) {
if (PrintGCInfo)
addPass(createGCInfoPrinter(dbgs()), false, false);
}
// Basic block placement.
if (getOptLevel() != CodeGenOpt::None)
addBlockPlacement();
addPreEmitPass();
addPass(&StackMapLivenessID, false);
AddingMachinePasses = false;
}
/// Add passes that optimize machine instructions in SSA form.
void TargetPassConfig::addMachineSSAOptimization() {
// Pre-ra tail duplication.
addPass(&EarlyTailDuplicateID);
// Optimize PHIs before DCE: removing dead PHI cycles may make more
// instructions dead.
addPass(&OptimizePHIsID, false);
// This pass merges large allocas. StackSlotColoring is a different pass
// which merges spill slots.
addPass(&StackColoringID, false);
// If the target requests it, assign local variables to stack slots relative
// to one another and simplify frame index references where possible.
addPass(&LocalStackSlotAllocationID, false);
// With optimization, dead code should already be eliminated. However
// there is one known exception: lowered code for arguments that are only
// used by tail calls, where the tail calls reuse the incoming stack
// arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
addPass(&DeadMachineInstructionElimID);
// Allow targets to insert passes that improve instruction level parallelism,
// like if-conversion. Such passes will typically need dominator trees and
// loop info, just like LICM and CSE below.
addILPOpts();
addPass(&MachineLICMID, false);
addPass(&MachineCSEID, false);
addPass(&MachineSinkingID);
addPass(&PeepholeOptimizerID, false);
// Clean-up the dead code that may have been generated by peephole
// rewriting.
addPass(&DeadMachineInstructionElimID);
}
bool TargetPassConfig::getEnableShrinkWrap() const {
switch (EnableShrinkWrapOpt) {
case cl::BOU_UNSET:
return EnableShrinkWrap && getOptLevel() != CodeGenOpt::None;
// If EnableShrinkWrap is set, it takes precedence on whatever the
// target sets. The rational is that we assume we want to test
// something related to shrink-wrapping.
case cl::BOU_TRUE:
return true;
case cl::BOU_FALSE:
return false;
}
llvm_unreachable("Invalid shrink-wrapping state");
}
//===---------------------------------------------------------------------===//
/// Register Allocation Pass Configuration
//===---------------------------------------------------------------------===//
bool TargetPassConfig::getOptimizeRegAlloc() const {
switch (OptimizeRegAlloc) {
case cl::BOU_UNSET: return getOptLevel() != CodeGenOpt::None;
case cl::BOU_TRUE: return true;
case cl::BOU_FALSE: return false;
}
llvm_unreachable("Invalid optimize-regalloc state");
}
/// RegisterRegAlloc's global Registry tracks allocator registration.
MachinePassRegistry RegisterRegAlloc::Registry;
/// A dummy default pass factory indicates whether the register allocator is
/// overridden on the command line.
static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
static RegisterRegAlloc
defaultRegAlloc("default",
"pick register allocator based on -O option",
useDefaultRegisterAllocator);
/// -regalloc=... command line option.
static cl::opt<RegisterRegAlloc::FunctionPassCtor, false,
RegisterPassParser<RegisterRegAlloc> >
RegAlloc("regalloc",
cl::init(&useDefaultRegisterAllocator),
cl::desc("Register allocator to use"));
/// Instantiate the default register allocator pass for this target for either
/// the optimized or unoptimized allocation path. This will be added to the pass
/// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc
/// in the optimized case.
///
/// A target that uses the standard regalloc pass order for fast or optimized
/// allocation may still override this for per-target regalloc
/// selection. But -regalloc=... always takes precedence.
FunctionPass *TargetPassConfig::createTargetRegisterAllocator(bool Optimized) {
if (Optimized)
return createGreedyRegisterAllocator();
else
return createFastRegisterAllocator();
}
/// Find and instantiate the register allocation pass requested by this target
/// at the current optimization level. Different register allocators are
/// defined as separate passes because they may require different analysis.
///
/// This helper ensures that the regalloc= option is always available,
/// even for targets that override the default allocator.
///
/// FIXME: When MachinePassRegistry register pass IDs instead of function ptrs,
/// this can be folded into addPass.
FunctionPass *TargetPassConfig::createRegAllocPass(bool Optimized) {
RegisterRegAlloc::FunctionPassCtor Ctor = RegisterRegAlloc::getDefault();
// Initialize the global default.
if (!Ctor) {
Ctor = RegAlloc;
RegisterRegAlloc::setDefault(RegAlloc);
}
if (Ctor != useDefaultRegisterAllocator)
return Ctor();
// With no -regalloc= override, ask the target for a regalloc pass.
return createTargetRegisterAllocator(Optimized);
}
/// Return true if the default global register allocator is in use and
/// has not be overriden on the command line with '-regalloc=...'
bool TargetPassConfig::usingDefaultRegAlloc() const {
return RegAlloc.getNumOccurrences() == 0;
}
/// Add the minimum set of target-independent passes that are required for
/// register allocation. No coalescing or scheduling.
void TargetPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
addPass(&PHIEliminationID, false);
addPass(&TwoAddressInstructionPassID, false);
addPass(RegAllocPass);
}
/// Add standard target-independent passes that are tightly coupled with
/// optimized register allocation, including coalescing, machine instruction
/// scheduling, and register allocation itself.
void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
addPass(&ProcessImplicitDefsID, false);
// LiveVariables currently requires pure SSA form.
//
// FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
// LiveVariables can be removed completely, and LiveIntervals can be directly
// computed. (We still either need to regenerate kill flags after regalloc, or
// preferably fix the scavenger to not depend on them).
addPass(&LiveVariablesID, false);
// Edge splitting is smarter with machine loop info.
addPass(&MachineLoopInfoID, false);
addPass(&PHIEliminationID, false);
// Eventually, we want to run LiveIntervals before PHI elimination.
if (EarlyLiveIntervals)
addPass(&LiveIntervalsID, false);
addPass(&TwoAddressInstructionPassID, false);
addPass(&RegisterCoalescerID);
// PreRA instruction scheduling.
addPass(&MachineSchedulerID);
// Add the selected register allocation pass.
addPass(RegAllocPass);
// Allow targets to change the register assignments before rewriting.
addPreRewrite();
// Finally rewrite virtual registers.
addPass(&VirtRegRewriterID);
// Perform stack slot coloring and post-ra machine LICM.
//
// FIXME: Re-enable coloring with register when it's capable of adding
// kill markers.
addPass(&StackSlotColoringID);
// Run post-ra machine LICM to hoist reloads / remats.
//
// FIXME: can this move into MachineLateOptimization?
addPass(&PostRAMachineLICMID);
}
//===---------------------------------------------------------------------===//
/// Post RegAlloc Pass Configuration
//===---------------------------------------------------------------------===//
/// Add passes that optimize machine instructions after register allocation.
void TargetPassConfig::addMachineLateOptimization() {
// Branch folding must be run after regalloc and prolog/epilog insertion.
addPass(&BranchFolderPassID);
// Tail duplication.
// Note that duplicating tail just increases code size and degrades
// performance for targets that require Structured Control Flow.
// In addition it can also make CFG irreducible. Thus we disable it.
if (!TM->requiresStructuredCFG())
addPass(&TailDuplicateID);
// Copy propagation.
addPass(&MachineCopyPropagationID);
}
/// Add standard GC passes.
bool TargetPassConfig::addGCPasses() {
addPass(&GCMachineCodeAnalysisID, false);
return true;
}
/// Add standard basic block placement passes.
void TargetPassConfig::addBlockPlacement() {
if (addPass(&MachineBlockPlacementID, false)) {
// Run a separate pass to collect block placement statistics.
if (EnableBlockPlacementStats)
addPass(&MachineBlockPlacementStatsID);
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LexicalScopes.cpp | //===- LexicalScopes.cpp - Collecting lexical scope info ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements LexicalScopes analysis.
//
// This pass collects lexical scope information and maps machine instructions
// to respective lexical scopes.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
using namespace llvm;
#define DEBUG_TYPE "lexicalscopes"
/// reset - Reset the instance so that it's prepared for another function.
void LexicalScopes::reset() {
MF = nullptr;
CurrentFnLexicalScope = nullptr;
LexicalScopeMap.clear();
AbstractScopeMap.clear();
InlinedLexicalScopeMap.clear();
AbstractScopesList.clear();
}
/// initialize - Scan machine function and constuct lexical scope nest.
void LexicalScopes::initialize(const MachineFunction &Fn) {
reset();
MF = &Fn;
SmallVector<InsnRange, 4> MIRanges;
DenseMap<const MachineInstr *, LexicalScope *> MI2ScopeMap;
extractLexicalScopes(MIRanges, MI2ScopeMap);
if (CurrentFnLexicalScope) {
constructScopeNest(CurrentFnLexicalScope);
assignInstructionRanges(MIRanges, MI2ScopeMap);
}
}
/// extractLexicalScopes - Extract instruction ranges for each lexical scopes
/// for the given machine function.
void LexicalScopes::extractLexicalScopes(
SmallVectorImpl<InsnRange> &MIRanges,
DenseMap<const MachineInstr *, LexicalScope *> &MI2ScopeMap) {
// Scan each instruction and create scopes. First build working set of scopes.
for (const auto &MBB : *MF) {
const MachineInstr *RangeBeginMI = nullptr;
const MachineInstr *PrevMI = nullptr;
const DILocation *PrevDL = nullptr;
for (const auto &MInsn : MBB) {
// Check if instruction has valid location information.
const DILocation *MIDL = MInsn.getDebugLoc();
if (!MIDL) {
PrevMI = &MInsn;
continue;
}
// If scope has not changed then skip this instruction.
if (MIDL == PrevDL) {
PrevMI = &MInsn;
continue;
}
// Ignore DBG_VALUE. It does not contribute to any instruction in output.
if (MInsn.isDebugValue())
continue;
if (RangeBeginMI) {
// If we have already seen a beginning of an instruction range and
// current instruction scope does not match scope of first instruction
// in this range then create a new instruction range.
InsnRange R(RangeBeginMI, PrevMI);
MI2ScopeMap[RangeBeginMI] = getOrCreateLexicalScope(PrevDL);
MIRanges.push_back(R);
}
// This is a beginning of a new instruction range.
RangeBeginMI = &MInsn;
// Reset previous markers.
PrevMI = &MInsn;
PrevDL = MIDL;
}
// Create last instruction range.
if (RangeBeginMI && PrevMI && PrevDL) {
InsnRange R(RangeBeginMI, PrevMI);
MIRanges.push_back(R);
MI2ScopeMap[RangeBeginMI] = getOrCreateLexicalScope(PrevDL);
}
}
}
/// findLexicalScope - Find lexical scope, either regular or inlined, for the
/// given DebugLoc. Return NULL if not found.
LexicalScope *LexicalScopes::findLexicalScope(const DILocation *DL) {
DILocalScope *Scope = DL->getScope();
if (!Scope)
return nullptr;
// The scope that we were created with could have an extra file - which
// isn't what we care about in this case.
if (auto *File = dyn_cast<DILexicalBlockFile>(Scope))
Scope = File->getScope();
if (auto *IA = DL->getInlinedAt()) {
auto I = InlinedLexicalScopeMap.find(std::make_pair(Scope, IA));
return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
}
return findLexicalScope(Scope);
}
/// getOrCreateLexicalScope - Find lexical scope for the given DebugLoc. If
/// not available then create new lexical scope.
LexicalScope *LexicalScopes::getOrCreateLexicalScope(const DILocalScope *Scope,
const DILocation *IA) {
if (IA) {
// Create an abstract scope for inlined function.
getOrCreateAbstractScope(Scope);
// Create an inlined scope for inlined function.
return getOrCreateInlinedScope(Scope, IA);
}
return getOrCreateRegularScope(Scope);
}
/// getOrCreateRegularScope - Find or create a regular lexical scope.
LexicalScope *
LexicalScopes::getOrCreateRegularScope(const DILocalScope *Scope) {
if (auto *File = dyn_cast<DILexicalBlockFile>(Scope))
Scope = File->getScope();
auto I = LexicalScopeMap.find(Scope);
if (I != LexicalScopeMap.end())
return &I->second;
// FIXME: Should the following dyn_cast be DILexicalBlock?
LexicalScope *Parent = nullptr;
if (auto *Block = dyn_cast<DILexicalBlockBase>(Scope))
Parent = getOrCreateLexicalScope(Block->getScope());
I = LexicalScopeMap.emplace(std::piecewise_construct,
std::forward_as_tuple(Scope),
std::forward_as_tuple(Parent, Scope, nullptr,
false)).first;
if (!Parent) {
assert(cast<DISubprogram>(Scope)->describes(MF->getFunction()));
assert(!CurrentFnLexicalScope);
CurrentFnLexicalScope = &I->second;
}
return &I->second;
}
/// getOrCreateInlinedScope - Find or create an inlined lexical scope.
LexicalScope *
LexicalScopes::getOrCreateInlinedScope(const DILocalScope *Scope,
const DILocation *InlinedAt) {
std::pair<const DILocalScope *, const DILocation *> P(Scope, InlinedAt);
auto I = InlinedLexicalScopeMap.find(P);
if (I != InlinedLexicalScopeMap.end())
return &I->second;
LexicalScope *Parent;
if (auto *Block = dyn_cast<DILexicalBlockBase>(Scope))
Parent = getOrCreateInlinedScope(Block->getScope(), InlinedAt);
else
Parent = getOrCreateLexicalScope(InlinedAt);
I = InlinedLexicalScopeMap.emplace(std::piecewise_construct,
std::forward_as_tuple(P),
std::forward_as_tuple(Parent, Scope,
InlinedAt, false))
.first;
return &I->second;
}
/// getOrCreateAbstractScope - Find or create an abstract lexical scope.
LexicalScope *
LexicalScopes::getOrCreateAbstractScope(const DILocalScope *Scope) {
assert(Scope && "Invalid Scope encoding!");
if (auto *File = dyn_cast<DILexicalBlockFile>(Scope))
Scope = File->getScope();
auto I = AbstractScopeMap.find(Scope);
if (I != AbstractScopeMap.end())
return &I->second;
// FIXME: Should the following isa be DILexicalBlock?
LexicalScope *Parent = nullptr;
if (auto *Block = dyn_cast<DILexicalBlockBase>(Scope))
Parent = getOrCreateAbstractScope(Block->getScope());
I = AbstractScopeMap.emplace(std::piecewise_construct,
std::forward_as_tuple(Scope),
std::forward_as_tuple(Parent, Scope,
nullptr, true)).first;
if (isa<DISubprogram>(Scope))
AbstractScopesList.push_back(&I->second);
return &I->second;
}
/// constructScopeNest
void LexicalScopes::constructScopeNest(LexicalScope *Scope) {
assert(Scope && "Unable to calculate scope dominance graph!");
SmallVector<LexicalScope *, 4> WorkStack;
WorkStack.push_back(Scope);
unsigned Counter = 0;
while (!WorkStack.empty()) {
LexicalScope *WS = WorkStack.back();
const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren();
bool visitedChildren = false;
for (SmallVectorImpl<LexicalScope *>::const_iterator SI = Children.begin(),
SE = Children.end();
SI != SE; ++SI) {
LexicalScope *ChildScope = *SI;
if (!ChildScope->getDFSOut()) {
WorkStack.push_back(ChildScope);
visitedChildren = true;
ChildScope->setDFSIn(++Counter);
break;
}
}
if (!visitedChildren) {
WorkStack.pop_back();
WS->setDFSOut(++Counter);
}
}
}
/// assignInstructionRanges - Find ranges of instructions covered by each
/// lexical scope.
void LexicalScopes::assignInstructionRanges(
SmallVectorImpl<InsnRange> &MIRanges,
DenseMap<const MachineInstr *, LexicalScope *> &MI2ScopeMap) {
LexicalScope *PrevLexicalScope = nullptr;
for (SmallVectorImpl<InsnRange>::const_iterator RI = MIRanges.begin(),
RE = MIRanges.end();
RI != RE; ++RI) {
const InsnRange &R = *RI;
LexicalScope *S = MI2ScopeMap.lookup(R.first);
assert(S && "Lost LexicalScope for a machine instruction!");
if (PrevLexicalScope && !PrevLexicalScope->dominates(S))
PrevLexicalScope->closeInsnRange(S);
S->openInsnRange(R.first);
S->extendInsnRange(R.second);
PrevLexicalScope = S;
}
if (PrevLexicalScope)
PrevLexicalScope->closeInsnRange();
}
/// getMachineBasicBlocks - Populate given set using machine basic blocks which
/// have machine instructions that belong to lexical scope identified by
/// DebugLoc.
void LexicalScopes::getMachineBasicBlocks(
const DILocation *DL, SmallPtrSetImpl<const MachineBasicBlock *> &MBBs) {
MBBs.clear();
LexicalScope *Scope = getOrCreateLexicalScope(DL);
if (!Scope)
return;
if (Scope == CurrentFnLexicalScope) {
for (const auto &MBB : *MF)
MBBs.insert(&MBB);
return;
}
SmallVectorImpl<InsnRange> &InsnRanges = Scope->getRanges();
for (SmallVectorImpl<InsnRange>::iterator I = InsnRanges.begin(),
E = InsnRanges.end();
I != E; ++I) {
InsnRange &R = *I;
MBBs.insert(R.first->getParent());
}
}
/// dominates - Return true if DebugLoc's lexical scope dominates at least one
/// machine instruction's lexical scope in a given machine basic block.
bool LexicalScopes::dominates(const DILocation *DL, MachineBasicBlock *MBB) {
LexicalScope *Scope = getOrCreateLexicalScope(DL);
if (!Scope)
return false;
// Current function scope covers all basic blocks in the function.
if (Scope == CurrentFnLexicalScope && MBB->getParent() == MF)
return true;
bool Result = false;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
++I) {
if (const DILocation *IDL = I->getDebugLoc())
if (LexicalScope *IScope = getOrCreateLexicalScope(IDL))
if (Scope->dominates(IScope))
return true;
}
return Result;
}
/// dump - Print data structures.
void LexicalScope::dump(unsigned Indent) const {
#ifndef NDEBUG
raw_ostream &err = dbgs();
err.indent(Indent);
err << "DFSIn: " << DFSIn << " DFSOut: " << DFSOut << "\n";
const MDNode *N = Desc;
err.indent(Indent);
N->dump();
if (AbstractScope)
err << std::string(Indent, ' ') << "Abstract Scope\n";
if (!Children.empty())
err << std::string(Indent + 2, ' ') << "Children ...\n";
for (unsigned i = 0, e = Children.size(); i != e; ++i)
if (Children[i] != this)
Children[i]->dump(Indent + 2);
#endif
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MIRPrinter.h | //===- MIRPrinter.h - MIR serialization format printer --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the functions that print out the LLVM IR and the machine
// functions using the MIR serialization format.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_MIRPRINTER_H
#define LLVM_LIB_CODEGEN_MIRPRINTER_H
namespace llvm {
class MachineFunction;
class Module;
class raw_ostream;
/// Print LLVM IR using the MIR serialization format to the given output stream.
void printMIR(raw_ostream &OS, const Module &M);
/// Print a machine function using the MIR serialization format to the given
/// output stream.
void printMIR(raw_ostream &OS, const MachineFunction &MF);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/InterferenceCache.h | //===-- InterferenceCache.h - Caching per-block interference ---*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// InterferenceCache remembers per-block interference from LiveIntervalUnions,
// fixed RegUnit interference, and register masks.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_INTERFERENCECACHE_H
#define LLVM_LIB_CODEGEN_INTERFERENCECACHE_H
#include "llvm/CodeGen/LiveIntervalUnion.h"
namespace llvm {
class LiveIntervals;
class LLVM_LIBRARY_VISIBILITY InterferenceCache {
const TargetRegisterInfo *TRI;
LiveIntervalUnion *LIUArray;
MachineFunction *MF;
/// BlockInterference - information about the interference in a single basic
/// block.
struct BlockInterference {
BlockInterference() : Tag(0) {}
unsigned Tag;
SlotIndex First;
SlotIndex Last;
};
/// Entry - A cache entry containing interference information for all aliases
/// of PhysReg in all basic blocks.
class Entry {
/// PhysReg - The register currently represented.
unsigned PhysReg;
/// Tag - Cache tag is changed when any of the underlying LiveIntervalUnions
/// change.
unsigned Tag;
/// RefCount - The total number of Cursor instances referring to this Entry.
unsigned RefCount;
/// MF - The current function.
MachineFunction *MF;
/// Indexes - Mapping block numbers to SlotIndex ranges.
SlotIndexes *Indexes;
/// LIS - Used for accessing register mask interference maps.
LiveIntervals *LIS;
/// PrevPos - The previous position the iterators were moved to.
SlotIndex PrevPos;
/// RegUnitInfo - Information tracked about each RegUnit in PhysReg.
/// When PrevPos is set, the iterators are valid as if advanceTo(PrevPos)
/// had just been called.
struct RegUnitInfo {
/// Iterator pointing into the LiveIntervalUnion containing virtual
/// register interference.
LiveIntervalUnion::SegmentIter VirtI;
/// Tag of the LIU last time we looked.
unsigned VirtTag;
/// Fixed interference in RegUnit.
LiveRange *Fixed;
/// Iterator pointing into the fixed RegUnit interference.
LiveInterval::iterator FixedI;
RegUnitInfo(LiveIntervalUnion &LIU)
: VirtTag(LIU.getTag()), Fixed(nullptr) {
VirtI.setMap(LIU.getMap());
}
};
/// Info for each RegUnit in PhysReg. It is very rare ofr a PHysReg to have
/// more than 4 RegUnits.
SmallVector<RegUnitInfo, 4> RegUnits;
/// Blocks - Interference for each block in the function.
SmallVector<BlockInterference, 8> Blocks;
/// update - Recompute Blocks[MBBNum]
void update(unsigned MBBNum);
public:
Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(nullptr), LIS(nullptr) {}
void clear(MachineFunction *mf, SlotIndexes *indexes, LiveIntervals *lis) {
assert(!hasRefs() && "Cannot clear cache entry with references");
PhysReg = 0;
MF = mf;
Indexes = indexes;
LIS = lis;
}
unsigned getPhysReg() const { return PhysReg; }
void addRef(int Delta) { RefCount += Delta; }
bool hasRefs() const { return RefCount > 0; }
void revalidate(LiveIntervalUnion *LIUArray, const TargetRegisterInfo *TRI);
/// valid - Return true if this is a valid entry for physReg.
bool valid(LiveIntervalUnion *LIUArray, const TargetRegisterInfo *TRI);
/// reset - Initialize entry to represent physReg's aliases.
void reset(unsigned physReg,
LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI,
const MachineFunction *MF);
/// get - Return an up to date BlockInterference.
BlockInterference *get(unsigned MBBNum) {
if (Blocks[MBBNum].Tag != Tag)
update(MBBNum);
return &Blocks[MBBNum];
}
};
// We don't keep a cache entry for every physical register, that would use too
// much memory. Instead, a fixed number of cache entries are used in a round-
// robin manner.
enum { CacheEntries = 32 };
// Point to an entry for each physreg. The entry pointed to may not be up to
// date, and it may have been reused for a different physreg.
unsigned char* PhysRegEntries;
size_t PhysRegEntriesCount;
// Next round-robin entry to be picked.
unsigned RoundRobin;
// The actual cache entries.
Entry Entries[CacheEntries];
// get - Get a valid entry for PhysReg.
Entry *get(unsigned PhysReg);
public:
InterferenceCache()
: TRI(nullptr), LIUArray(nullptr), MF(nullptr), PhysRegEntries(nullptr),
PhysRegEntriesCount(0), RoundRobin(0) {}
~InterferenceCache() {
delete[] PhysRegEntries; // HLSL Change: Use overridable operator delete
}
void reinitPhysRegEntries();
/// init - Prepare cache for a new function.
void init(MachineFunction*, LiveIntervalUnion*, SlotIndexes*, LiveIntervals*,
const TargetRegisterInfo *);
/// getMaxCursors - Return the maximum number of concurrent cursors that can
/// be supported.
unsigned getMaxCursors() const { return CacheEntries; }
/// Cursor - The primary query interface for the block interference cache.
class Cursor {
Entry *CacheEntry;
const BlockInterference *Current;
static const BlockInterference NoInterference;
void setEntry(Entry *E) {
Current = nullptr;
// Update reference counts. Nothing happens when RefCount reaches 0, so
// we don't have to check for E == CacheEntry etc.
if (CacheEntry)
CacheEntry->addRef(-1);
CacheEntry = E;
if (CacheEntry)
CacheEntry->addRef(+1);
}
public:
/// Cursor - Create a dangling cursor.
Cursor() : CacheEntry(nullptr), Current(nullptr) {}
~Cursor() { setEntry(nullptr); }
Cursor(const Cursor &O) : CacheEntry(nullptr), Current(nullptr) {
setEntry(O.CacheEntry);
}
Cursor &operator=(const Cursor &O) {
setEntry(O.CacheEntry);
return *this;
}
/// setPhysReg - Point this cursor to PhysReg's interference.
void setPhysReg(InterferenceCache &Cache, unsigned PhysReg) {
// Release reference before getting a new one. That guarantees we can
// actually have CacheEntries live cursors.
setEntry(nullptr);
if (PhysReg)
setEntry(Cache.get(PhysReg));
}
/// moveTo - Move cursor to basic block MBBNum.
void moveToBlock(unsigned MBBNum) {
Current = CacheEntry ? CacheEntry->get(MBBNum) : &NoInterference;
}
/// hasInterference - Return true if the current block has any interference.
bool hasInterference() {
return Current->First.isValid();
}
/// first - Return the starting index of the first interfering range in the
/// current block.
SlotIndex first() {
return Current->First;
}
/// last - Return the ending index of the last interfering range in the
/// current block.
SlotIndex last() {
return Current->Last;
}
};
friend class Cursor;
};
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/IfConversion.cpp | //===-- IfConversion.cpp - Machine code if conversion pass. ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the machine instruction level if-conversion pass.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "BranchFolding.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "ifcvt"
// Hidden options for help debugging.
static cl::opt<int> IfCvtFnStart("ifcvt-fn-start", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtFnStop("ifcvt-fn-stop", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtLimit("ifcvt-limit", cl::init(-1), cl::Hidden);
static cl::opt<bool> DisableSimple("disable-ifcvt-simple",
cl::init(false), cl::Hidden);
static cl::opt<bool> DisableSimpleF("disable-ifcvt-simple-false",
cl::init(false), cl::Hidden);
static cl::opt<bool> DisableTriangle("disable-ifcvt-triangle",
cl::init(false), cl::Hidden);
static cl::opt<bool> DisableTriangleR("disable-ifcvt-triangle-rev",
cl::init(false), cl::Hidden);
static cl::opt<bool> DisableTriangleF("disable-ifcvt-triangle-false",
cl::init(false), cl::Hidden);
static cl::opt<bool> DisableTriangleFR("disable-ifcvt-triangle-false-rev",
cl::init(false), cl::Hidden);
static cl::opt<bool> DisableDiamond("disable-ifcvt-diamond",
cl::init(false), cl::Hidden);
static cl::opt<bool> IfCvtBranchFold("ifcvt-branch-fold",
cl::init(true), cl::Hidden);
STATISTIC(NumSimple, "Number of simple if-conversions performed");
STATISTIC(NumSimpleFalse, "Number of simple (F) if-conversions performed");
STATISTIC(NumTriangle, "Number of triangle if-conversions performed");
STATISTIC(NumTriangleRev, "Number of triangle (R) if-conversions performed");
STATISTIC(NumTriangleFalse,"Number of triangle (F) if-conversions performed");
STATISTIC(NumTriangleFRev, "Number of triangle (F/R) if-conversions performed");
STATISTIC(NumDiamonds, "Number of diamond if-conversions performed");
STATISTIC(NumIfConvBBs, "Number of if-converted blocks");
STATISTIC(NumDupBBs, "Number of duplicated blocks");
STATISTIC(NumUnpred, "Number of true blocks of diamonds unpredicated");
namespace {
class IfConverter : public MachineFunctionPass {
enum IfcvtKind {
ICNotClassfied, // BB data valid, but not classified.
ICSimpleFalse, // Same as ICSimple, but on the false path.
ICSimple, // BB is entry of an one split, no rejoin sub-CFG.
ICTriangleFRev, // Same as ICTriangleFalse, but false path rev condition.
ICTriangleRev, // Same as ICTriangle, but true path rev condition.
ICTriangleFalse, // Same as ICTriangle, but on the false path.
ICTriangle, // BB is entry of a triangle sub-CFG.
ICDiamond // BB is entry of a diamond sub-CFG.
};
/// BBInfo - One per MachineBasicBlock, this is used to cache the result
/// if-conversion feasibility analysis. This includes results from
/// TargetInstrInfo::AnalyzeBranch() (i.e. TBB, FBB, and Cond), and its
/// classification, and common tail block of its successors (if it's a
/// diamond shape), its size, whether it's predicable, and whether any
/// instruction can clobber the 'would-be' predicate.
///
/// IsDone - True if BB is not to be considered for ifcvt.
/// IsBeingAnalyzed - True if BB is currently being analyzed.
/// IsAnalyzed - True if BB has been analyzed (info is still valid).
/// IsEnqueued - True if BB has been enqueued to be ifcvt'ed.
/// IsBrAnalyzable - True if AnalyzeBranch() returns false.
/// HasFallThrough - True if BB may fallthrough to the following BB.
/// IsUnpredicable - True if BB is known to be unpredicable.
/// ClobbersPred - True if BB could modify predicates (e.g. has
/// cmp, call, etc.)
/// NonPredSize - Number of non-predicated instructions.
/// ExtraCost - Extra cost for multi-cycle instructions.
/// ExtraCost2 - Some instructions are slower when predicated
/// BB - Corresponding MachineBasicBlock.
/// TrueBB / FalseBB- See AnalyzeBranch().
/// BrCond - Conditions for end of block conditional branches.
/// Predicate - Predicate used in the BB.
struct BBInfo {
bool IsDone : 1;
bool IsBeingAnalyzed : 1;
bool IsAnalyzed : 1;
bool IsEnqueued : 1;
bool IsBrAnalyzable : 1;
bool HasFallThrough : 1;
bool IsUnpredicable : 1;
bool CannotBeCopied : 1;
bool ClobbersPred : 1;
unsigned NonPredSize;
unsigned ExtraCost;
unsigned ExtraCost2;
MachineBasicBlock *BB;
MachineBasicBlock *TrueBB;
MachineBasicBlock *FalseBB;
SmallVector<MachineOperand, 4> BrCond;
SmallVector<MachineOperand, 4> Predicate;
BBInfo() : IsDone(false), IsBeingAnalyzed(false),
IsAnalyzed(false), IsEnqueued(false), IsBrAnalyzable(false),
HasFallThrough(false), IsUnpredicable(false),
CannotBeCopied(false), ClobbersPred(false), NonPredSize(0),
ExtraCost(0), ExtraCost2(0), BB(nullptr), TrueBB(nullptr),
FalseBB(nullptr) {}
};
/// IfcvtToken - Record information about pending if-conversions to attempt:
/// BBI - Corresponding BBInfo.
/// Kind - Type of block. See IfcvtKind.
/// NeedSubsumption - True if the to-be-predicated BB has already been
/// predicated.
/// NumDups - Number of instructions that would be duplicated due
/// to this if-conversion. (For diamonds, the number of
/// identical instructions at the beginnings of both
/// paths).
/// NumDups2 - For diamonds, the number of identical instructions
/// at the ends of both paths.
struct IfcvtToken {
BBInfo &BBI;
IfcvtKind Kind;
bool NeedSubsumption;
unsigned NumDups;
unsigned NumDups2;
IfcvtToken(BBInfo &b, IfcvtKind k, bool s, unsigned d, unsigned d2 = 0)
: BBI(b), Kind(k), NeedSubsumption(s), NumDups(d), NumDups2(d2) {}
};
/// BBAnalysis - Results of if-conversion feasibility analysis indexed by
/// basic block number.
std::vector<BBInfo> BBAnalysis;
TargetSchedModel SchedModel;
const TargetLoweringBase *TLI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const MachineBlockFrequencyInfo *MBFI;
const MachineBranchProbabilityInfo *MBPI;
MachineRegisterInfo *MRI;
LivePhysRegs Redefs;
LivePhysRegs DontKill;
bool PreRegAlloc;
bool MadeChange;
int FnNum;
std::function<bool(const Function &)> PredicateFtor;
public:
static char ID;
IfConverter(std::function<bool(const Function &)> Ftor = nullptr)
: MachineFunctionPass(ID), FnNum(-1), PredicateFtor(Ftor) {
initializeIfConverterPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addRequired<MachineBranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override;
private:
bool ReverseBranchCondition(BBInfo &BBI);
bool ValidSimple(BBInfo &TrueBBI, unsigned &Dups,
const BranchProbability &Prediction) const;
bool ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
bool FalseBranch, unsigned &Dups,
const BranchProbability &Prediction) const;
bool ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
unsigned &Dups1, unsigned &Dups2) const;
void ScanInstructions(BBInfo &BBI);
void AnalyzeBlock(MachineBasicBlock *MBB, std::vector<IfcvtToken*> &Tokens);
bool FeasibilityAnalysis(BBInfo &BBI, SmallVectorImpl<MachineOperand> &Cond,
bool isTriangle = false, bool RevBranch = false);
void AnalyzeBlocks(MachineFunction &MF, std::vector<IfcvtToken*> &Tokens);
void InvalidatePreds(MachineBasicBlock *BB);
void RemoveExtraEdges(BBInfo &BBI);
bool IfConvertSimple(BBInfo &BBI, IfcvtKind Kind);
bool IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind);
bool IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
unsigned NumDups1, unsigned NumDups2);
void PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
SmallVectorImpl<MachineOperand> &Cond,
SmallSet<unsigned, 4> *LaterRedefs = nullptr);
void CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
bool IgnoreBr = false);
void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges = true);
bool MeetIfcvtSizeLimit(MachineBasicBlock &BB,
unsigned Cycle, unsigned Extra,
const BranchProbability &Prediction) const {
return Cycle > 0 && TII->isProfitableToIfCvt(BB, Cycle, Extra,
Prediction);
}
bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB,
unsigned TCycle, unsigned TExtra,
MachineBasicBlock &FBB,
unsigned FCycle, unsigned FExtra,
const BranchProbability &Prediction) const {
return TCycle > 0 && FCycle > 0 &&
TII->isProfitableToIfCvt(TBB, TCycle, TExtra, FBB, FCycle, FExtra,
Prediction);
}
// blockAlwaysFallThrough - Block ends without a terminator.
bool blockAlwaysFallThrough(BBInfo &BBI) const {
return BBI.IsBrAnalyzable && BBI.TrueBB == nullptr;
}
// IfcvtTokenCmp - Used to sort if-conversion candidates.
static bool IfcvtTokenCmp(IfcvtToken *C1, IfcvtToken *C2) {
int Incr1 = (C1->Kind == ICDiamond)
? -(int)(C1->NumDups + C1->NumDups2) : (int)C1->NumDups;
int Incr2 = (C2->Kind == ICDiamond)
? -(int)(C2->NumDups + C2->NumDups2) : (int)C2->NumDups;
if (Incr1 > Incr2)
return true;
else if (Incr1 == Incr2) {
// Favors subsumption.
if (!C1->NeedSubsumption && C2->NeedSubsumption)
return true;
else if (C1->NeedSubsumption == C2->NeedSubsumption) {
// Favors diamond over triangle, etc.
if ((unsigned)C1->Kind < (unsigned)C2->Kind)
return true;
else if (C1->Kind == C2->Kind)
return C1->BBI.BB->getNumber() < C2->BBI.BB->getNumber();
}
}
return false;
}
};
char IfConverter::ID = 0;
}
char &llvm::IfConverterID = IfConverter::ID;
INITIALIZE_PASS_BEGIN(IfConverter, "if-converter", "If Converter", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_END(IfConverter, "if-converter", "If Converter", false, false)
bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
if (PredicateFtor && !PredicateFtor(*MF.getFunction()))
return false;
const TargetSubtargetInfo &ST = MF.getSubtarget();
TLI = ST.getTargetLowering();
TII = ST.getInstrInfo();
TRI = ST.getRegisterInfo();
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
MRI = &MF.getRegInfo();
SchedModel.init(ST.getSchedModel(), &ST, TII);
if (!TII) return false;
PreRegAlloc = MRI->isSSA();
bool BFChange = false;
if (!PreRegAlloc) {
// Tail merge tend to expose more if-conversion opportunities.
BranchFolder BF(true, false, *MBFI, *MBPI);
BFChange = BF.OptimizeFunction(MF, TII, ST.getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>());
}
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
<< MF.getName() << "\'");
if (FnNum < IfCvtFnStart || (IfCvtFnStop != -1 && FnNum > IfCvtFnStop)) {
DEBUG(dbgs() << " skipped\n");
return false;
}
DEBUG(dbgs() << "\n");
MF.RenumberBlocks();
BBAnalysis.resize(MF.getNumBlockIDs());
std::vector<IfcvtToken*> Tokens;
MadeChange = false;
unsigned NumIfCvts = NumSimple + NumSimpleFalse + NumTriangle +
NumTriangleRev + NumTriangleFalse + NumTriangleFRev + NumDiamonds;
while (IfCvtLimit == -1 || (int)NumIfCvts < IfCvtLimit) {
// Do an initial analysis for each basic block and find all the potential
// candidates to perform if-conversion.
bool Change = false;
AnalyzeBlocks(MF, Tokens);
while (!Tokens.empty()) {
IfcvtToken *Token = Tokens.back();
Tokens.pop_back();
BBInfo &BBI = Token->BBI;
IfcvtKind Kind = Token->Kind;
unsigned NumDups = Token->NumDups;
unsigned NumDups2 = Token->NumDups2;
delete Token;
// If the block has been evicted out of the queue or it has already been
// marked dead (due to it being predicated), then skip it.
if (BBI.IsDone)
BBI.IsEnqueued = false;
if (!BBI.IsEnqueued)
continue;
BBI.IsEnqueued = false;
bool RetVal = false;
switch (Kind) {
default: llvm_unreachable("Unexpected!");
case ICSimple:
case ICSimpleFalse: {
bool isFalse = Kind == ICSimpleFalse;
if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break;
DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ?
" false" : "")
<< "): BB#" << BBI.BB->getNumber() << " ("
<< ((Kind == ICSimpleFalse)
? BBI.FalseBB->getNumber()
: BBI.TrueBB->getNumber()) << ") ");
RetVal = IfConvertSimple(BBI, Kind);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
if (isFalse) ++NumSimpleFalse;
else ++NumSimple;
}
break;
}
case ICTriangle:
case ICTriangleRev:
case ICTriangleFalse:
case ICTriangleFRev: {
bool isFalse = Kind == ICTriangleFalse;
bool isRev = (Kind == ICTriangleRev || Kind == ICTriangleFRev);
if (DisableTriangle && !isFalse && !isRev) break;
if (DisableTriangleR && !isFalse && isRev) break;
if (DisableTriangleF && isFalse && !isRev) break;
if (DisableTriangleFR && isFalse && isRev) break;
DEBUG(dbgs() << "Ifcvt (Triangle");
if (isFalse)
DEBUG(dbgs() << " false");
if (isRev)
DEBUG(dbgs() << " rev");
DEBUG(dbgs() << "): BB#" << BBI.BB->getNumber() << " (T:"
<< BBI.TrueBB->getNumber() << ",F:"
<< BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertTriangle(BBI, Kind);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
if (isFalse) {
if (isRev) ++NumTriangleFRev;
else ++NumTriangleFalse;
} else {
if (isRev) ++NumTriangleRev;
else ++NumTriangle;
}
}
break;
}
case ICDiamond: {
if (DisableDiamond) break;
DEBUG(dbgs() << "Ifcvt (Diamond): BB#" << BBI.BB->getNumber() << " (T:"
<< BBI.TrueBB->getNumber() << ",F:"
<< BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) ++NumDiamonds;
break;
}
}
Change |= RetVal;
NumIfCvts = NumSimple + NumSimpleFalse + NumTriangle + NumTriangleRev +
NumTriangleFalse + NumTriangleFRev + NumDiamonds;
if (IfCvtLimit != -1 && (int)NumIfCvts >= IfCvtLimit)
break;
}
if (!Change)
break;
MadeChange |= Change;
}
// Delete tokens in case of early exit.
while (!Tokens.empty()) {
IfcvtToken *Token = Tokens.back();
Tokens.pop_back();
delete Token;
}
Tokens.clear();
BBAnalysis.clear();
if (MadeChange && IfCvtBranchFold) {
BranchFolder BF(false, false, *MBFI, *MBPI);
BF.OptimizeFunction(MF, TII, MF.getSubtarget().getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>());
}
MadeChange |= BFChange;
return MadeChange;
}
/// findFalseBlock - BB has a fallthrough. Find its 'false' successor given
/// its 'true' successor.
static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
MachineBasicBlock *TrueBB) {
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
E = BB->succ_end(); SI != E; ++SI) {
MachineBasicBlock *SuccBB = *SI;
if (SuccBB != TrueBB)
return SuccBB;
}
return nullptr;
}
/// ReverseBranchCondition - Reverse the condition of the end of the block
/// branch. Swap block's 'true' and 'false' successors.
bool IfConverter::ReverseBranchCondition(BBInfo &BBI) {
DebugLoc dl; // FIXME: this is nowhere
if (!TII->ReverseBranchCondition(BBI.BrCond)) {
TII->RemoveBranch(*BBI.BB);
TII->InsertBranch(*BBI.BB, BBI.FalseBB, BBI.TrueBB, BBI.BrCond, dl);
std::swap(BBI.TrueBB, BBI.FalseBB);
return true;
}
return false;
}
/// getNextBlock - Returns the next block in the function blocks ordering. If
/// it is the end, returns NULL.
static inline MachineBasicBlock *getNextBlock(MachineBasicBlock *BB) {
MachineFunction::iterator I = BB;
MachineFunction::iterator E = BB->getParent()->end();
if (++I == E)
return nullptr;
return I;
}
/// ValidSimple - Returns true if the 'true' block (along with its
/// predecessor) forms a valid simple shape for ifcvt. It also returns the
/// number of instructions that the ifcvt would need to duplicate if performed
/// in Dups.
bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups,
const BranchProbability &Prediction) const {
Dups = 0;
if (TrueBBI.IsBeingAnalyzed || TrueBBI.IsDone)
return false;
if (TrueBBI.IsBrAnalyzable)
return false;
if (TrueBBI.BB->pred_size() > 1) {
if (TrueBBI.CannotBeCopied ||
!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize,
Prediction))
return false;
Dups = TrueBBI.NonPredSize;
}
return true;
}
/// ValidTriangle - Returns true if the 'true' and 'false' blocks (along
/// with their common predecessor) forms a valid triangle shape for ifcvt.
/// If 'FalseBranch' is true, it checks if 'true' block's false branch
/// branches to the 'false' block rather than the other way around. It also
/// returns the number of instructions that the ifcvt would need to duplicate
/// if performed in 'Dups'.
bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
bool FalseBranch, unsigned &Dups,
const BranchProbability &Prediction) const {
Dups = 0;
if (TrueBBI.IsBeingAnalyzed || TrueBBI.IsDone)
return false;
if (TrueBBI.BB->pred_size() > 1) {
if (TrueBBI.CannotBeCopied)
return false;
unsigned Size = TrueBBI.NonPredSize;
if (TrueBBI.IsBrAnalyzable) {
if (TrueBBI.TrueBB && TrueBBI.BrCond.empty())
// Ends with an unconditional branch. It will be removed.
--Size;
else {
MachineBasicBlock *FExit = FalseBranch
? TrueBBI.TrueBB : TrueBBI.FalseBB;
if (FExit)
// Require a conditional branch
++Size;
}
}
if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size, Prediction))
return false;
Dups = Size;
}
MachineBasicBlock *TExit = FalseBranch ? TrueBBI.FalseBB : TrueBBI.TrueBB;
if (!TExit && blockAlwaysFallThrough(TrueBBI)) {
MachineFunction::iterator I = TrueBBI.BB;
if (++I == TrueBBI.BB->getParent()->end())
return false;
TExit = I;
}
return TExit && TExit == FalseBBI.BB;
}
/// ValidDiamond - Returns true if the 'true' and 'false' blocks (along
/// with their common predecessor) forms a valid diamond shape for ifcvt.
bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
unsigned &Dups1, unsigned &Dups2) const {
Dups1 = Dups2 = 0;
if (TrueBBI.IsBeingAnalyzed || TrueBBI.IsDone ||
FalseBBI.IsBeingAnalyzed || FalseBBI.IsDone)
return false;
MachineBasicBlock *TT = TrueBBI.TrueBB;
MachineBasicBlock *FT = FalseBBI.TrueBB;
if (!TT && blockAlwaysFallThrough(TrueBBI))
TT = getNextBlock(TrueBBI.BB);
if (!FT && blockAlwaysFallThrough(FalseBBI))
FT = getNextBlock(FalseBBI.BB);
if (TT != FT)
return false;
if (!TT && (TrueBBI.IsBrAnalyzable || FalseBBI.IsBrAnalyzable))
return false;
if (TrueBBI.BB->pred_size() > 1 || FalseBBI.BB->pred_size() > 1)
return false;
// FIXME: Allow true block to have an early exit?
if (TrueBBI.FalseBB || FalseBBI.FalseBB ||
(TrueBBI.ClobbersPred && FalseBBI.ClobbersPred))
return false;
// Count duplicate instructions at the beginning of the true and false blocks.
MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
MachineBasicBlock::iterator TIE = TrueBBI.BB->end();
MachineBasicBlock::iterator FIE = FalseBBI.BB->end();
while (TIB != TIE && FIB != FIE) {
// Skip dbg_value instructions. These do not count.
if (TIB->isDebugValue()) {
while (TIB != TIE && TIB->isDebugValue())
++TIB;
if (TIB == TIE)
break;
}
if (FIB->isDebugValue()) {
while (FIB != FIE && FIB->isDebugValue())
++FIB;
if (FIB == FIE)
break;
}
if (!TIB->isIdenticalTo(FIB))
break;
++Dups1;
++TIB;
++FIB;
}
// Now, in preparation for counting duplicate instructions at the ends of the
// blocks, move the end iterators up past any branch instructions.
while (TIE != TIB) {
--TIE;
if (!TIE->isBranch())
break;
}
while (FIE != FIB) {
--FIE;
if (!FIE->isBranch())
break;
}
// If Dups1 includes all of a block, then don't count duplicate
// instructions at the end of the blocks.
if (TIB == TIE || FIB == FIE)
return true;
// Count duplicate instructions at the ends of the blocks.
while (TIE != TIB && FIE != FIB) {
// Skip dbg_value instructions. These do not count.
if (TIE->isDebugValue()) {
while (TIE != TIB && TIE->isDebugValue())
--TIE;
if (TIE == TIB)
break;
}
if (FIE->isDebugValue()) {
while (FIE != FIB && FIE->isDebugValue())
--FIE;
if (FIE == FIB)
break;
}
if (!TIE->isIdenticalTo(FIE))
break;
++Dups2;
--TIE;
--FIE;
}
return true;
}
/// ScanInstructions - Scan all the instructions in the block to determine if
/// the block is predicable. In most cases, that means all the instructions
/// in the block are isPredicable(). Also checks if the block contains any
/// instruction which can clobber a predicate (e.g. condition code register).
/// If so, the block is not predicable unless it's the last instruction.
void IfConverter::ScanInstructions(BBInfo &BBI) {
if (BBI.IsDone)
return;
bool AlreadyPredicated = !BBI.Predicate.empty();
// First analyze the end of BB branches.
BBI.TrueBB = BBI.FalseBB = nullptr;
BBI.BrCond.clear();
BBI.IsBrAnalyzable =
!TII->AnalyzeBranch(*BBI.BB, BBI.TrueBB, BBI.FalseBB, BBI.BrCond);
BBI.HasFallThrough = BBI.IsBrAnalyzable && BBI.FalseBB == nullptr;
if (BBI.BrCond.size()) {
// No false branch. This BB must end with a conditional branch and a
// fallthrough.
if (!BBI.FalseBB)
BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
if (!BBI.FalseBB) {
// Malformed bcc? True and false blocks are the same?
BBI.IsUnpredicable = true;
return;
}
}
// Then scan all the instructions.
BBI.NonPredSize = 0;
BBI.ExtraCost = 0;
BBI.ExtraCost2 = 0;
BBI.ClobbersPred = false;
for (MachineBasicBlock::iterator I = BBI.BB->begin(), E = BBI.BB->end();
I != E; ++I) {
if (I->isDebugValue())
continue;
if (I->isNotDuplicable())
BBI.CannotBeCopied = true;
bool isPredicated = TII->isPredicated(I);
bool isCondBr = BBI.IsBrAnalyzable && I->isConditionalBranch();
// A conditional branch is not predicable, but it may be eliminated.
if (isCondBr)
continue;
if (!isPredicated) {
BBI.NonPredSize++;
unsigned ExtraPredCost = TII->getPredicationCost(&*I);
unsigned NumCycles = SchedModel.computeInstrLatency(&*I, false);
if (NumCycles > 1)
BBI.ExtraCost += NumCycles-1;
BBI.ExtraCost2 += ExtraPredCost;
} else if (!AlreadyPredicated) {
// FIXME: This instruction is already predicated before the
// if-conversion pass. It's probably something like a conditional move.
// Mark this block unpredicable for now.
BBI.IsUnpredicable = true;
return;
}
if (BBI.ClobbersPred && !isPredicated) {
// Predicate modification instruction should end the block (except for
// already predicated instructions and end of block branches).
// Predicate may have been modified, the subsequent (currently)
// unpredicated instructions cannot be correctly predicated.
BBI.IsUnpredicable = true;
return;
}
// FIXME: Make use of PredDefs? e.g. ADDC, SUBC sets predicates but are
// still potentially predicable.
std::vector<MachineOperand> PredDefs;
if (TII->DefinesPredicate(I, PredDefs))
BBI.ClobbersPred = true;
if (!TII->isPredicable(I)) {
BBI.IsUnpredicable = true;
return;
}
}
}
/// FeasibilityAnalysis - Determine if the block is a suitable candidate to be
/// predicated by the specified predicate.
bool IfConverter::FeasibilityAnalysis(BBInfo &BBI,
SmallVectorImpl<MachineOperand> &Pred,
bool isTriangle, bool RevBranch) {
// If the block is dead or unpredicable, then it cannot be predicated.
if (BBI.IsDone || BBI.IsUnpredicable)
return false;
// If it is already predicated but we couldn't analyze its terminator, the
// latter might fallthrough, but we can't determine where to.
// Conservatively avoid if-converting again.
if (BBI.Predicate.size() && !BBI.IsBrAnalyzable)
return false;
// If it is already predicated, check if the new predicate subsumes
// its predicate.
if (BBI.Predicate.size() && !TII->SubsumesPredicate(Pred, BBI.Predicate))
return false;
if (BBI.BrCond.size()) {
if (!isTriangle)
return false;
// Test predicate subsumption.
SmallVector<MachineOperand, 4> RevPred(Pred.begin(), Pred.end());
SmallVector<MachineOperand, 4> Cond(BBI.BrCond.begin(), BBI.BrCond.end());
if (RevBranch) {
if (TII->ReverseBranchCondition(Cond))
return false;
}
if (TII->ReverseBranchCondition(RevPred) ||
!TII->SubsumesPredicate(Cond, RevPred))
return false;
}
return true;
}
/// AnalyzeBlock - Analyze the structure of the sub-CFG starting from
/// the specified block. Record its successors and whether it looks like an
/// if-conversion candidate.
void IfConverter::AnalyzeBlock(MachineBasicBlock *MBB,
std::vector<IfcvtToken*> &Tokens) {
struct BBState {
BBState(MachineBasicBlock *BB) : MBB(BB), SuccsAnalyzed(false) {}
MachineBasicBlock *MBB;
/// This flag is true if MBB's successors have been analyzed.
bool SuccsAnalyzed;
};
// Push MBB to the stack.
SmallVector<BBState, 16> BBStack(1, MBB);
while (!BBStack.empty()) {
BBState &State = BBStack.back();
MachineBasicBlock *BB = State.MBB;
BBInfo &BBI = BBAnalysis[BB->getNumber()];
if (!State.SuccsAnalyzed) {
if (BBI.IsAnalyzed || BBI.IsBeingAnalyzed) {
BBStack.pop_back();
continue;
}
BBI.BB = BB;
BBI.IsBeingAnalyzed = true;
ScanInstructions(BBI);
// Unanalyzable or ends with fallthrough or unconditional branch, or if is
// not considered for ifcvt anymore.
if (!BBI.IsBrAnalyzable || BBI.BrCond.empty() || BBI.IsDone) {
BBI.IsBeingAnalyzed = false;
BBI.IsAnalyzed = true;
BBStack.pop_back();
continue;
}
// Do not ifcvt if either path is a back edge to the entry block.
if (BBI.TrueBB == BB || BBI.FalseBB == BB) {
BBI.IsBeingAnalyzed = false;
BBI.IsAnalyzed = true;
BBStack.pop_back();
continue;
}
// Do not ifcvt if true and false fallthrough blocks are the same.
if (!BBI.FalseBB) {
BBI.IsBeingAnalyzed = false;
BBI.IsAnalyzed = true;
BBStack.pop_back();
continue;
}
// Push the False and True blocks to the stack.
State.SuccsAnalyzed = true;
BBStack.push_back(BBI.FalseBB);
BBStack.push_back(BBI.TrueBB);
continue;
}
BBInfo &TrueBBI = BBAnalysis[BBI.TrueBB->getNumber()];
BBInfo &FalseBBI = BBAnalysis[BBI.FalseBB->getNumber()];
if (TrueBBI.IsDone && FalseBBI.IsDone) {
BBI.IsBeingAnalyzed = false;
BBI.IsAnalyzed = true;
BBStack.pop_back();
continue;
}
SmallVector<MachineOperand, 4>
RevCond(BBI.BrCond.begin(), BBI.BrCond.end());
bool CanRevCond = !TII->ReverseBranchCondition(RevCond);
unsigned Dups = 0;
unsigned Dups2 = 0;
bool TNeedSub = !TrueBBI.Predicate.empty();
bool FNeedSub = !FalseBBI.Predicate.empty();
bool Enqueued = false;
BranchProbability Prediction = MBPI->getEdgeProbability(BB, TrueBBI.BB);
if (CanRevCond && ValidDiamond(TrueBBI, FalseBBI, Dups, Dups2) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, (TrueBBI.NonPredSize - (Dups + Dups2) +
TrueBBI.ExtraCost), TrueBBI.ExtraCost2,
*FalseBBI.BB, (FalseBBI.NonPredSize - (Dups + Dups2) +
FalseBBI.ExtraCost),FalseBBI.ExtraCost2,
Prediction) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
// Diamond:
// EBB
// / \_
// | |
// TBB FBB
// \ /
// TailBB
// Note TailBB can be empty.
Tokens.push_back(new IfcvtToken(BBI, ICDiamond, TNeedSub|FNeedSub, Dups,
Dups2));
Enqueued = true;
}
if (ValidTriangle(TrueBBI, FalseBBI, false, Dups, Prediction) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
TrueBBI.ExtraCost2, Prediction) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true)) {
// Triangle:
// EBB
// | \_
// | |
// | TBB
// | /
// FBB
Tokens.push_back(new IfcvtToken(BBI, ICTriangle, TNeedSub, Dups));
Enqueued = true;
}
if (ValidTriangle(TrueBBI, FalseBBI, true, Dups, Prediction) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
TrueBBI.ExtraCost2, Prediction) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleRev, TNeedSub, Dups));
Enqueued = true;
}
if (ValidSimple(TrueBBI, Dups, Prediction) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
TrueBBI.ExtraCost2, Prediction) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond)) {
// Simple (split, no rejoin):
// EBB
// | \_
// | |
// | TBB---> exit
// |
// FBB
Tokens.push_back(new IfcvtToken(BBI, ICSimple, TNeedSub, Dups));
Enqueued = true;
}
if (CanRevCond) {
// Try the other path...
if (ValidTriangle(FalseBBI, TrueBBI, false, Dups,
Prediction.getCompl()) &&
MeetIfcvtSizeLimit(*FalseBBI.BB,
FalseBBI.NonPredSize + FalseBBI.ExtraCost,
FalseBBI.ExtraCost2, Prediction.getCompl()) &&
FeasibilityAnalysis(FalseBBI, RevCond, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFalse, FNeedSub, Dups));
Enqueued = true;
}
if (ValidTriangle(FalseBBI, TrueBBI, true, Dups,
Prediction.getCompl()) &&
MeetIfcvtSizeLimit(*FalseBBI.BB,
FalseBBI.NonPredSize + FalseBBI.ExtraCost,
FalseBBI.ExtraCost2, Prediction.getCompl()) &&
FeasibilityAnalysis(FalseBBI, RevCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFRev, FNeedSub, Dups));
Enqueued = true;
}
if (ValidSimple(FalseBBI, Dups, Prediction.getCompl()) &&
MeetIfcvtSizeLimit(*FalseBBI.BB,
FalseBBI.NonPredSize + FalseBBI.ExtraCost,
FalseBBI.ExtraCost2, Prediction.getCompl()) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
Tokens.push_back(new IfcvtToken(BBI, ICSimpleFalse, FNeedSub, Dups));
Enqueued = true;
}
}
BBI.IsEnqueued = Enqueued;
BBI.IsBeingAnalyzed = false;
BBI.IsAnalyzed = true;
BBStack.pop_back();
}
}
/// AnalyzeBlocks - Analyze all blocks and find entries for all if-conversion
/// candidates.
void IfConverter::AnalyzeBlocks(MachineFunction &MF,
std::vector<IfcvtToken*> &Tokens) {
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *BB = I;
AnalyzeBlock(BB, Tokens);
}
// Sort to favor more complex ifcvt scheme.
std::stable_sort(Tokens.begin(), Tokens.end(), IfcvtTokenCmp);
}
/// canFallThroughTo - Returns true either if ToBB is the next block after BB or
/// that all the intervening blocks are empty (given BB can fall through to its
/// next block).
static bool canFallThroughTo(MachineBasicBlock *BB, MachineBasicBlock *ToBB) {
MachineFunction::iterator PI = BB;
MachineFunction::iterator I = std::next(PI);
MachineFunction::iterator TI = ToBB;
MachineFunction::iterator E = BB->getParent()->end();
while (I != TI) {
// Check isSuccessor to avoid case where the next block is empty, but
// it's not a successor.
if (I == E || !I->empty() || !PI->isSuccessor(I))
return false;
PI = I++;
}
return true;
}
/// InvalidatePreds - Invalidate predecessor BB info so it would be re-analyzed
/// to determine if it can be if-converted. If predecessor is already enqueued,
/// dequeue it!
void IfConverter::InvalidatePreds(MachineBasicBlock *BB) {
for (const auto &Predecessor : BB->predecessors()) {
BBInfo &PBBI = BBAnalysis[Predecessor->getNumber()];
if (PBBI.IsDone || PBBI.BB == BB)
continue;
PBBI.IsAnalyzed = false;
PBBI.IsEnqueued = false;
}
}
/// InsertUncondBranch - Inserts an unconditional branch from BB to ToBB.
///
static void InsertUncondBranch(MachineBasicBlock *BB, MachineBasicBlock *ToBB,
const TargetInstrInfo *TII) {
DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 0> NoCond;
TII->InsertBranch(*BB, ToBB, nullptr, NoCond, dl);
}
/// RemoveExtraEdges - Remove true / false edges if either / both are no longer
/// successors.
void IfConverter::RemoveExtraEdges(BBInfo &BBI) {
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*BBI.BB, TBB, FBB, Cond))
BBI.BB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
}
/// Behaves like LiveRegUnits::StepForward() but also adds implicit uses to all
/// values defined in MI which are not live/used by MI.
static void UpdatePredRedefs(MachineInstr *MI, LivePhysRegs &Redefs) {
SmallVector<std::pair<unsigned, const MachineOperand*>, 4> Clobbers;
Redefs.stepForward(*MI, Clobbers);
// Now add the implicit uses for each of the clobbered values.
for (auto Reg : Clobbers) {
// FIXME: Const cast here is nasty, but better than making StepForward
// take a mutable instruction instead of const.
MachineOperand &Op = const_cast<MachineOperand&>(*Reg.second);
MachineInstr *OpMI = Op.getParent();
MachineInstrBuilder MIB(*OpMI->getParent()->getParent(), OpMI);
if (Op.isRegMask()) {
// First handle regmasks. They clobber any entries in the mask which
// means that we need a def for those registers.
MIB.addReg(Reg.first, RegState::Implicit | RegState::Undef);
// We also need to add an implicit def of this register for the later
// use to read from.
// For the register allocator to have allocated a register clobbered
// by the call which is used later, it must be the case that
// the call doesn't return.
MIB.addReg(Reg.first, RegState::Implicit | RegState::Define);
continue;
}
assert(Op.isReg() && "Register operand required");
if (Op.isDead()) {
// If we found a dead def, but it needs to be live, then remove the dead
// flag.
if (Redefs.contains(Op.getReg()))
Op.setIsDead(false);
}
MIB.addReg(Reg.first, RegState::Implicit | RegState::Undef);
}
}
/**
* Remove kill flags from operands with a registers in the @p DontKill set.
*/
static void RemoveKills(MachineInstr &MI, const LivePhysRegs &DontKill) {
for (MIBundleOperands O(&MI); O.isValid(); ++O) {
if (!O->isReg() || !O->isKill())
continue;
if (DontKill.contains(O->getReg()))
O->setIsKill(false);
}
}
/**
* Walks a range of machine instructions and removes kill flags for registers
* in the @p DontKill set.
*/
static void RemoveKills(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E,
const LivePhysRegs &DontKill,
const MCRegisterInfo &MCRI) {
for ( ; I != E; ++I)
RemoveKills(*I, DontKill);
}
/// IfConvertSimple - If convert a simple (split, no rejoin) sub-CFG.
///
bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
BBInfo &TrueBBI = BBAnalysis[BBI.TrueBB->getNumber()];
BBInfo &FalseBBI = BBAnalysis[BBI.FalseBB->getNumber()];
BBInfo *CvtBBI = &TrueBBI;
BBInfo *NextBBI = &FalseBBI;
SmallVector<MachineOperand, 4> Cond(BBI.BrCond.begin(), BBI.BrCond.end());
if (Kind == ICSimpleFalse)
std::swap(CvtBBI, NextBBI);
if (CvtBBI->IsDone ||
(CvtBBI->CannotBeCopied && CvtBBI->BB->pred_size() > 1)) {
// Something has changed. It's no longer safe to predicate this block.
BBI.IsAnalyzed = false;
CvtBBI->IsAnalyzed = false;
return false;
}
if (CvtBBI->BB->hasAddressTaken())
// Conservatively abort if-conversion if BB's address is taken.
return false;
if (Kind == ICSimpleFalse)
if (TII->ReverseBranchCondition(Cond))
llvm_unreachable("Unable to reverse branch condition!");
// Initialize liveins to the first BB. These are potentiall redefined by
// predicated instructions.
Redefs.init(TRI);
Redefs.addLiveIns(CvtBBI->BB);
Redefs.addLiveIns(NextBBI->BB);
// Compute a set of registers which must not be killed by instructions in
// BB1: This is everything live-in to BB2.
DontKill.init(TRI);
DontKill.addLiveIns(NextBBI->BB);
if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
CopyAndPredicateBlock(BBI, *CvtBBI, Cond);
// RemoveExtraEdges won't work if the block has an unanalyzable branch, so
// explicitly remove CvtBBI as a successor.
BBI.BB->removeSuccessor(CvtBBI->BB);
} else {
RemoveKills(CvtBBI->BB->begin(), CvtBBI->BB->end(), DontKill, *TRI);
PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
// Merge converted block into entry block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
MergeBlocks(BBI, *CvtBBI);
}
bool IterIfcvt = true;
if (!canFallThroughTo(BBI.BB, NextBBI->BB)) {
InsertUncondBranch(BBI.BB, NextBBI->BB, TII);
BBI.HasFallThrough = false;
// Now ifcvt'd block will look like this:
// BB:
// ...
// t, f = cmp
// if t op
// b BBf
//
// We cannot further ifcvt this block because the unconditional branch
// will have to be predicated on the new condition, that will not be
// available if cmp executes.
IterIfcvt = false;
}
RemoveExtraEdges(BBI);
// Update block info. BB can be iteratively if-converted.
if (!IterIfcvt)
BBI.IsDone = true;
InvalidatePreds(BBI.BB);
CvtBBI->IsDone = true;
// FIXME: Must maintain LiveIns.
return true;
}
/// Scale down weights to fit into uint32_t. NewTrue is the new weight
/// for successor TrueBB, and NewFalse is the new weight for successor
/// FalseBB.
static void ScaleWeights(uint64_t NewTrue, uint64_t NewFalse,
MachineBasicBlock *MBB,
const MachineBasicBlock *TrueBB,
const MachineBasicBlock *FalseBB,
const MachineBranchProbabilityInfo *MBPI) {
uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
uint32_t Scale = (NewMax / UINT32_MAX) + 1;
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end();
SI != SE; ++SI) {
if (*SI == TrueBB)
MBB->setSuccWeight(SI, (uint32_t)(NewTrue / Scale));
else if (*SI == FalseBB)
MBB->setSuccWeight(SI, (uint32_t)(NewFalse / Scale));
else
MBB->setSuccWeight(SI, MBPI->getEdgeWeight(MBB, SI) / Scale);
}
}
/// IfConvertTriangle - If convert a triangle sub-CFG.
///
bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
BBInfo &TrueBBI = BBAnalysis[BBI.TrueBB->getNumber()];
BBInfo &FalseBBI = BBAnalysis[BBI.FalseBB->getNumber()];
BBInfo *CvtBBI = &TrueBBI;
BBInfo *NextBBI = &FalseBBI;
DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 4> Cond(BBI.BrCond.begin(), BBI.BrCond.end());
if (Kind == ICTriangleFalse || Kind == ICTriangleFRev)
std::swap(CvtBBI, NextBBI);
if (CvtBBI->IsDone ||
(CvtBBI->CannotBeCopied && CvtBBI->BB->pred_size() > 1)) {
// Something has changed. It's no longer safe to predicate this block.
BBI.IsAnalyzed = false;
CvtBBI->IsAnalyzed = false;
return false;
}
if (CvtBBI->BB->hasAddressTaken())
// Conservatively abort if-conversion if BB's address is taken.
return false;
if (Kind == ICTriangleFalse || Kind == ICTriangleFRev)
if (TII->ReverseBranchCondition(Cond))
llvm_unreachable("Unable to reverse branch condition!");
if (Kind == ICTriangleRev || Kind == ICTriangleFRev) {
if (ReverseBranchCondition(*CvtBBI)) {
// BB has been changed, modify its predecessors (except for this
// one) so they don't get ifcvt'ed based on bad intel.
for (MachineBasicBlock::pred_iterator PI = CvtBBI->BB->pred_begin(),
E = CvtBBI->BB->pred_end(); PI != E; ++PI) {
MachineBasicBlock *PBB = *PI;
if (PBB == BBI.BB)
continue;
BBInfo &PBBI = BBAnalysis[PBB->getNumber()];
if (PBBI.IsEnqueued) {
PBBI.IsAnalyzed = false;
PBBI.IsEnqueued = false;
}
}
}
}
// Initialize liveins to the first BB. These are potentially redefined by
// predicated instructions.
Redefs.init(TRI);
Redefs.addLiveIns(CvtBBI->BB);
Redefs.addLiveIns(NextBBI->BB);
DontKill.clear();
bool HasEarlyExit = CvtBBI->FalseBB != nullptr;
uint64_t CvtNext = 0, CvtFalse = 0, BBNext = 0, BBCvt = 0, SumWeight = 0;
uint32_t WeightScale = 0;
if (HasEarlyExit) {
// Get weights before modifying CvtBBI->BB and BBI.BB.
CvtNext = MBPI->getEdgeWeight(CvtBBI->BB, NextBBI->BB);
CvtFalse = MBPI->getEdgeWeight(CvtBBI->BB, CvtBBI->FalseBB);
BBNext = MBPI->getEdgeWeight(BBI.BB, NextBBI->BB);
BBCvt = MBPI->getEdgeWeight(BBI.BB, CvtBBI->BB);
SumWeight = MBPI->getSumForBlock(CvtBBI->BB, WeightScale);
}
if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
CopyAndPredicateBlock(BBI, *CvtBBI, Cond, true);
// RemoveExtraEdges won't work if the block has an unanalyzable branch, so
// explicitly remove CvtBBI as a successor.
BBI.BB->removeSuccessor(CvtBBI->BB);
} else {
// Predicate the 'true' block after removing its branch.
CvtBBI->NonPredSize -= TII->RemoveBranch(*CvtBBI->BB);
PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
// Now merge the entry of the triangle with the true block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
MergeBlocks(BBI, *CvtBBI, false);
}
// If 'true' block has a 'false' successor, add an exit branch to it.
if (HasEarlyExit) {
SmallVector<MachineOperand, 4> RevCond(CvtBBI->BrCond.begin(),
CvtBBI->BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
llvm_unreachable("Unable to reverse branch condition!");
TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, nullptr, RevCond, dl);
BBI.BB->addSuccessor(CvtBBI->FalseBB);
// Update the edge weight for both CvtBBI->FalseBB and NextBBI.
// New_Weight(BBI.BB, NextBBI->BB) =
// Weight(BBI.BB, NextBBI->BB) * getSumForBlock(CvtBBI->BB) +
// Weight(BBI.BB, CvtBBI->BB) * Weight(CvtBBI->BB, NextBBI->BB)
// New_Weight(BBI.BB, CvtBBI->FalseBB) =
// Weight(BBI.BB, CvtBBI->BB) * Weight(CvtBBI->BB, CvtBBI->FalseBB)
uint64_t NewNext = BBNext * SumWeight + (BBCvt * CvtNext) / WeightScale;
uint64_t NewFalse = (BBCvt * CvtFalse) / WeightScale;
// We need to scale down all weights of BBI.BB to fit uint32_t.
// Here BBI.BB is connected to CvtBBI->FalseBB and will fall through to
// the next block.
ScaleWeights(NewNext, NewFalse, BBI.BB, getNextBlock(BBI.BB),
CvtBBI->FalseBB, MBPI);
}
// Merge in the 'false' block if the 'false' block has no other
// predecessors. Otherwise, add an unconditional branch to 'false'.
bool FalseBBDead = false;
bool IterIfcvt = true;
bool isFallThrough = canFallThroughTo(BBI.BB, NextBBI->BB);
if (!isFallThrough) {
// Only merge them if the true block does not fallthrough to the false
// block. By not merging them, we make it possible to iteratively
// ifcvt the blocks.
if (!HasEarlyExit &&
NextBBI->BB->pred_size() == 1 && !NextBBI->HasFallThrough &&
!NextBBI->BB->hasAddressTaken()) {
MergeBlocks(BBI, *NextBBI);
FalseBBDead = true;
} else {
InsertUncondBranch(BBI.BB, NextBBI->BB, TII);
BBI.HasFallThrough = false;
}
// Mixed predicated and unpredicated code. This cannot be iteratively
// predicated.
IterIfcvt = false;
}
RemoveExtraEdges(BBI);
// Update block info. BB can be iteratively if-converted.
if (!IterIfcvt)
BBI.IsDone = true;
InvalidatePreds(BBI.BB);
CvtBBI->IsDone = true;
if (FalseBBDead)
NextBBI->IsDone = true;
// FIXME: Must maintain LiveIns.
return true;
}
/// IfConvertDiamond - If convert a diamond sub-CFG.
///
bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
unsigned NumDups1, unsigned NumDups2) {
BBInfo &TrueBBI = BBAnalysis[BBI.TrueBB->getNumber()];
BBInfo &FalseBBI = BBAnalysis[BBI.FalseBB->getNumber()];
MachineBasicBlock *TailBB = TrueBBI.TrueBB;
// True block must fall through or end with an unanalyzable terminator.
if (!TailBB) {
if (blockAlwaysFallThrough(TrueBBI))
TailBB = FalseBBI.TrueBB;
assert((TailBB || !TrueBBI.IsBrAnalyzable) && "Unexpected!");
}
if (TrueBBI.IsDone || FalseBBI.IsDone ||
TrueBBI.BB->pred_size() > 1 ||
FalseBBI.BB->pred_size() > 1) {
// Something has changed. It's no longer safe to predicate these blocks.
BBI.IsAnalyzed = false;
TrueBBI.IsAnalyzed = false;
FalseBBI.IsAnalyzed = false;
return false;
}
if (TrueBBI.BB->hasAddressTaken() || FalseBBI.BB->hasAddressTaken())
// Conservatively abort if-conversion if either BB has its address taken.
return false;
// Put the predicated instructions from the 'true' block before the
// instructions from the 'false' block, unless the true block would clobber
// the predicate, in which case, do the opposite.
BBInfo *BBI1 = &TrueBBI;
BBInfo *BBI2 = &FalseBBI;
SmallVector<MachineOperand, 4> RevCond(BBI.BrCond.begin(), BBI.BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
llvm_unreachable("Unable to reverse branch condition!");
SmallVector<MachineOperand, 4> *Cond1 = &BBI.BrCond;
SmallVector<MachineOperand, 4> *Cond2 = &RevCond;
// Figure out the more profitable ordering.
bool DoSwap = false;
if (TrueBBI.ClobbersPred && !FalseBBI.ClobbersPred)
DoSwap = true;
else if (TrueBBI.ClobbersPred == FalseBBI.ClobbersPred) {
if (TrueBBI.NonPredSize > FalseBBI.NonPredSize)
DoSwap = true;
}
if (DoSwap) {
std::swap(BBI1, BBI2);
std::swap(Cond1, Cond2);
}
// Remove the conditional branch from entry to the blocks.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Initialize liveins to the first BB. These are potentially redefined by
// predicated instructions.
Redefs.init(TRI);
Redefs.addLiveIns(BBI1->BB);
// Remove the duplicated instructions at the beginnings of both paths.
// Skip dbg_value instructions
MachineBasicBlock::iterator DI1 = BBI1->BB->getFirstNonDebugInstr();
MachineBasicBlock::iterator DI2 = BBI2->BB->getFirstNonDebugInstr();
BBI1->NonPredSize -= NumDups1;
BBI2->NonPredSize -= NumDups1;
// Skip past the dups on each side separately since there may be
// differing dbg_value entries.
for (unsigned i = 0; i < NumDups1; ++DI1) {
if (!DI1->isDebugValue())
++i;
}
while (NumDups1 != 0) {
++DI2;
if (!DI2->isDebugValue())
--NumDups1;
}
// Compute a set of registers which must not be killed by instructions in BB1:
// This is everything used+live in BB2 after the duplicated instructions. We
// can compute this set by simulating liveness backwards from the end of BB2.
DontKill.init(TRI);
for (MachineBasicBlock::reverse_iterator I = BBI2->BB->rbegin(),
E = MachineBasicBlock::reverse_iterator(DI2); I != E; ++I) {
DontKill.stepBackward(*I);
}
for (MachineBasicBlock::const_iterator I = BBI1->BB->begin(), E = DI1; I != E;
++I) {
SmallVector<std::pair<unsigned, const MachineOperand*>, 4> IgnoredClobbers;
Redefs.stepForward(*I, IgnoredClobbers);
}
BBI.BB->splice(BBI.BB->end(), BBI1->BB, BBI1->BB->begin(), DI1);
BBI2->BB->erase(BBI2->BB->begin(), DI2);
// Remove branch from 'true' block and remove duplicated instructions.
BBI1->NonPredSize -= TII->RemoveBranch(*BBI1->BB);
DI1 = BBI1->BB->end();
for (unsigned i = 0; i != NumDups2; ) {
// NumDups2 only counted non-dbg_value instructions, so this won't
// run off the head of the list.
assert (DI1 != BBI1->BB->begin());
--DI1;
// skip dbg_value instructions
if (!DI1->isDebugValue())
++i;
}
BBI1->BB->erase(DI1, BBI1->BB->end());
// Kill flags in the true block for registers living into the false block
// must be removed.
RemoveKills(BBI1->BB->begin(), BBI1->BB->end(), DontKill, *TRI);
// Remove 'false' block branch and find the last instruction to predicate.
BBI2->NonPredSize -= TII->RemoveBranch(*BBI2->BB);
DI2 = BBI2->BB->end();
while (NumDups2 != 0) {
// NumDups2 only counted non-dbg_value instructions, so this won't
// run off the head of the list.
assert (DI2 != BBI2->BB->begin());
--DI2;
// skip dbg_value instructions
if (!DI2->isDebugValue())
--NumDups2;
}
// Remember which registers would later be defined by the false block.
// This allows us not to predicate instructions in the true block that would
// later be re-defined. That is, rather than
// subeq r0, r1, #1
// addne r0, r1, #1
// generate:
// sub r0, r1, #1
// addne r0, r1, #1
SmallSet<unsigned, 4> RedefsByFalse;
SmallSet<unsigned, 4> ExtUses;
if (TII->isProfitableToUnpredicate(*BBI1->BB, *BBI2->BB)) {
for (MachineBasicBlock::iterator FI = BBI2->BB->begin(); FI != DI2; ++FI) {
if (FI->isDebugValue())
continue;
SmallVector<unsigned, 4> Defs;
for (unsigned i = 0, e = FI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = FI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isDef()) {
Defs.push_back(Reg);
} else if (!RedefsByFalse.count(Reg)) {
// These are defined before ctrl flow reach the 'false' instructions.
// They cannot be modified by the 'true' instructions.
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
ExtUses.insert(*SubRegs);
}
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
unsigned Reg = Defs[i];
if (!ExtUses.count(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
RedefsByFalse.insert(*SubRegs);
}
}
}
}
// Predicate the 'true' block.
PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1, &RedefsByFalse);
// Predicate the 'false' block.
PredicateBlock(*BBI2, DI2, *Cond2);
// Merge the true block into the entry of the diamond.
MergeBlocks(BBI, *BBI1, TailBB == nullptr);
MergeBlocks(BBI, *BBI2, TailBB == nullptr);
// If the if-converted block falls through or unconditionally branches into
// the tail block, and the tail block does not have other predecessors, then
// fold the tail block in as well. Otherwise, unless it falls through to the
// tail, add a unconditional branch to it.
if (TailBB) {
BBInfo &TailBBI = BBAnalysis[TailBB->getNumber()];
bool CanMergeTail = !TailBBI.HasFallThrough &&
!TailBBI.BB->hasAddressTaken();
// There may still be a fall-through edge from BBI1 or BBI2 to TailBB;
// check if there are any other predecessors besides those.
unsigned NumPreds = TailBB->pred_size();
if (NumPreds > 1)
CanMergeTail = false;
else if (NumPreds == 1 && CanMergeTail) {
MachineBasicBlock::pred_iterator PI = TailBB->pred_begin();
if (*PI != BBI1->BB && *PI != BBI2->BB)
CanMergeTail = false;
}
if (CanMergeTail) {
MergeBlocks(BBI, TailBBI);
TailBBI.IsDone = true;
} else {
BBI.BB->addSuccessor(TailBB);
InsertUncondBranch(BBI.BB, TailBB, TII);
BBI.HasFallThrough = false;
}
}
// RemoveExtraEdges won't work if the block has an unanalyzable branch,
// which can happen here if TailBB is unanalyzable and is merged, so
// explicitly remove BBI1 and BBI2 as successors.
BBI.BB->removeSuccessor(BBI1->BB);
BBI.BB->removeSuccessor(BBI2->BB);
RemoveExtraEdges(BBI);
// Update block info.
BBI.IsDone = TrueBBI.IsDone = FalseBBI.IsDone = true;
InvalidatePreds(BBI.BB);
// FIXME: Must maintain LiveIns.
return true;
}
static bool MaySpeculate(const MachineInstr *MI,
SmallSet<unsigned, 4> &LaterRedefs) {
bool SawStore = true;
if (!MI->isSafeToMove(nullptr, SawStore))
return false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isDef() && !LaterRedefs.count(Reg))
return false;
}
return true;
}
/// PredicateBlock - Predicate instructions from the start of the block to the
/// specified end with the specified condition.
void IfConverter::PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
SmallVectorImpl<MachineOperand> &Cond,
SmallSet<unsigned, 4> *LaterRedefs) {
bool AnyUnpred = false;
bool MaySpec = LaterRedefs != nullptr;
for (MachineBasicBlock::iterator I = BBI.BB->begin(); I != E; ++I) {
if (I->isDebugValue() || TII->isPredicated(I))
continue;
// It may be possible not to predicate an instruction if it's the 'true'
// side of a diamond and the 'false' side may re-define the instruction's
// defs.
if (MaySpec && MaySpeculate(I, *LaterRedefs)) {
AnyUnpred = true;
continue;
}
// If any instruction is predicated, then every instruction after it must
// be predicated.
MaySpec = false;
if (!TII->PredicateInstruction(I, Cond)) {
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
llvm_unreachable(nullptr);
}
// If the predicated instruction now redefines a register as the result of
// if-conversion, add an implicit kill.
UpdatePredRedefs(I, Redefs);
}
BBI.Predicate.append(Cond.begin(), Cond.end());
BBI.IsAnalyzed = false;
BBI.NonPredSize = 0;
++NumIfConvBBs;
if (AnyUnpred)
++NumUnpred;
}
/// CopyAndPredicateBlock - Copy and predicate instructions from source BB to
/// the destination block. Skip end of block branches if IgnoreBr is true.
void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
bool IgnoreBr) {
MachineFunction &MF = *ToBBI.BB->getParent();
for (MachineBasicBlock::iterator I = FromBBI.BB->begin(),
E = FromBBI.BB->end(); I != E; ++I) {
// Do not copy the end of the block branches.
if (IgnoreBr && I->isBranch())
break;
MachineInstr *MI = MF.CloneMachineInstr(I);
ToBBI.BB->insert(ToBBI.BB->end(), MI);
ToBBI.NonPredSize++;
unsigned ExtraPredCost = TII->getPredicationCost(&*I);
unsigned NumCycles = SchedModel.computeInstrLatency(&*I, false);
if (NumCycles > 1)
ToBBI.ExtraCost += NumCycles-1;
ToBBI.ExtraCost2 += ExtraPredCost;
if (!TII->isPredicated(I) && !MI->isDebugValue()) {
if (!TII->PredicateInstruction(MI, Cond)) {
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
llvm_unreachable(nullptr);
}
}
// If the predicated instruction now redefines a register as the result of
// if-conversion, add an implicit kill.
UpdatePredRedefs(MI, Redefs);
// Some kill flags may not be correct anymore.
if (!DontKill.empty())
RemoveKills(*MI, DontKill);
}
if (!IgnoreBr) {
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : nullptr;
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
MachineBasicBlock *Succ = Succs[i];
// Fallthrough edge can't be transferred.
if (Succ == FallThrough)
continue;
ToBBI.BB->addSuccessor(Succ);
}
}
ToBBI.Predicate.append(FromBBI.Predicate.begin(), FromBBI.Predicate.end());
ToBBI.Predicate.append(Cond.begin(), Cond.end());
ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
ToBBI.IsAnalyzed = false;
++NumDupBBs;
}
/// MergeBlocks - Move all instructions from FromBB to the end of ToBB.
/// This will leave FromBB as an empty block, so remove all of its
/// successor edges except for the fall-through edge. If AddEdges is true,
/// i.e., when FromBBI's branch is being moved, add those successor edges to
/// ToBBI.
void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
assert(!FromBBI.BB->hasAddressTaken() &&
"Removing a BB whose address is taken!");
ToBBI.BB->splice(ToBBI.BB->end(),
FromBBI.BB, FromBBI.BB->begin(), FromBBI.BB->end());
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : nullptr;
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
MachineBasicBlock *Succ = Succs[i];
// Fallthrough edge can't be transferred.
if (Succ == FallThrough)
continue;
FromBBI.BB->removeSuccessor(Succ);
if (AddEdges && !ToBBI.BB->isSuccessor(Succ))
ToBBI.BB->addSuccessor(Succ);
}
// Now FromBBI always falls through to the next block!
if (NBB && !FromBBI.BB->isSuccessor(NBB))
FromBBI.BB->addSuccessor(NBB);
ToBBI.Predicate.append(FromBBI.Predicate.begin(), FromBBI.Predicate.end());
FromBBI.Predicate.clear();
ToBBI.NonPredSize += FromBBI.NonPredSize;
ToBBI.ExtraCost += FromBBI.ExtraCost;
ToBBI.ExtraCost2 += FromBBI.ExtraCost2;
FromBBI.NonPredSize = 0;
FromBBI.ExtraCost = 0;
FromBBI.ExtraCost2 = 0;
ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
ToBBI.HasFallThrough = FromBBI.HasFallThrough;
ToBBI.IsAnalyzed = false;
FromBBI.IsAnalyzed = false;
}
FunctionPass *
llvm::createIfConverter(std::function<bool(const Function &)> Ftor) {
return new IfConverter(Ftor);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineCSE.cpp | //===-- MachineCSE.cpp - Machine Common Subexpression Elimination Pass ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass performs global common subexpression elimination on machine
// instructions using a scoped hash table based value numbering scheme. It
// must be run while the machine function is still in SSA form.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "machine-cse"
STATISTIC(NumCoalesces, "Number of copies coalesced");
STATISTIC(NumCSEs, "Number of common subexpression eliminated");
STATISTIC(NumPhysCSEs,
"Number of physreg referencing common subexpr eliminated");
STATISTIC(NumCrossBBCSEs,
"Number of cross-MBB physreg referencing CS eliminated");
STATISTIC(NumCommutes, "Number of copies coalesced after commuting");
namespace {
class MachineCSE : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
AliasAnalysis *AA;
MachineDominatorTree *DT;
MachineRegisterInfo *MRI;
public:
static char ID; // Pass identification
MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(0), CurrVN(0) {
initializeMachineCSEPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.addPreservedID(MachineLoopInfoID);
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
}
void releaseMemory() override {
ScopeMap.clear();
Exps.clear();
}
private:
unsigned LookAheadLimit;
typedef RecyclingAllocator<BumpPtrAllocator,
ScopedHashTableVal<MachineInstr*, unsigned> > AllocatorTy;
typedef ScopedHashTable<MachineInstr*, unsigned,
MachineInstrExpressionTrait, AllocatorTy> ScopedHTType;
typedef ScopedHTType::ScopeTy ScopeType;
DenseMap<MachineBasicBlock*, ScopeType*> ScopeMap;
ScopedHTType VNT;
SmallVector<MachineInstr*, 64> Exps;
unsigned CurrVN;
bool PerformTrivialCopyPropagation(MachineInstr *MI,
MachineBasicBlock *MBB);
bool isPhysDefTriviallyDead(unsigned Reg,
MachineBasicBlock::const_iterator I,
MachineBasicBlock::const_iterator E) const;
bool hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
SmallVectorImpl<unsigned> &PhysDefs,
bool &PhysUseDef) const;
bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
SmallSet<unsigned,8> &PhysRefs,
SmallVectorImpl<unsigned> &PhysDefs,
bool &NonLocal) const;
bool isCSECandidate(MachineInstr *MI);
bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
MachineInstr *CSMI, MachineInstr *MI);
void EnterScope(MachineBasicBlock *MBB);
void ExitScope(MachineBasicBlock *MBB);
bool ProcessBlock(MachineBasicBlock *MBB);
void ExitScopeIfDone(MachineDomTreeNode *Node,
DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren);
bool PerformCSE(MachineDomTreeNode *Node);
};
} // end anonymous namespace
char MachineCSE::ID = 0;
char &llvm::MachineCSEID = MachineCSE::ID;
INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse",
"Machine Common Subexpression Elimination", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MachineCSE, "machine-cse",
"Machine Common Subexpression Elimination", false, false)
/// The source register of a COPY machine instruction can be propagated to all
/// its users, and this propagation could increase the probability of finding
/// common subexpressions. If the COPY has only one user, the COPY itself can
/// be removed.
bool MachineCSE::PerformTrivialCopyPropagation(MachineInstr *MI,
MachineBasicBlock *MBB) {
bool Changed = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
bool OnlyOneUse = MRI->hasOneNonDBGUse(Reg);
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (!DefMI->isCopy())
continue;
unsigned SrcReg = DefMI->getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
continue;
if (DefMI->getOperand(0).getSubReg())
continue;
// FIXME: We should trivially coalesce subregister copies to expose CSE
// opportunities on instructions with truncated operands (see
// cse-add-with-overflow.ll). This can be done here as follows:
// if (SrcSubReg)
// RC = TRI->getMatchingSuperRegClass(MRI->getRegClass(SrcReg), RC,
// SrcSubReg);
// MO.substVirtReg(SrcReg, SrcSubReg, *TRI);
//
// The 2-addr pass has been updated to handle coalesced subregs. However,
// some machine-specific code still can't handle it.
// To handle it properly we also need a way find a constrained subregister
// class given a super-reg class and subreg index.
if (DefMI->getOperand(1).getSubReg())
continue;
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
if (!MRI->constrainRegClass(SrcReg, RC))
continue;
DEBUG(dbgs() << "Coalescing: " << *DefMI);
DEBUG(dbgs() << "*** to: " << *MI);
// Propagate SrcReg of copies to MI.
MO.setReg(SrcReg);
MRI->clearKillFlags(SrcReg);
// Coalesce single use copies.
if (OnlyOneUse) {
DefMI->eraseFromParent();
++NumCoalesces;
}
Changed = true;
}
return Changed;
}
bool
MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
MachineBasicBlock::const_iterator I,
MachineBasicBlock::const_iterator E) const {
unsigned LookAheadLeft = LookAheadLimit;
while (LookAheadLeft) {
// Skip over dbg_value's.
while (I != E && I->isDebugValue())
++I;
if (I == E)
// Reached end of block, register is obviously dead.
return true;
bool SeenDef = false;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = I->getOperand(i);
if (MO.isRegMask() && MO.clobbersPhysReg(Reg))
SeenDef = true;
if (!MO.isReg() || !MO.getReg())
continue;
if (!TRI->regsOverlap(MO.getReg(), Reg))
continue;
if (MO.isUse())
// Found a use!
return false;
SeenDef = true;
}
if (SeenDef)
// See a def of Reg (or an alias) before encountering any use, it's
// trivially dead.
return true;
--LookAheadLeft;
++I;
}
return false;
}
/// hasLivePhysRegDefUses - Return true if the specified instruction read/write
/// physical registers (except for dead defs of physical registers). It also
/// returns the physical register def by reference if it's the only one and the
/// instruction does not uses a physical register.
bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
SmallVectorImpl<unsigned> &PhysDefs,
bool &PhysUseDef) const{
// First, add all uses to PhysRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
continue;
// Reading constant physregs is ok.
if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
PhysRefs.insert(*AI);
}
// Next, collect all defs into PhysDefs. If any is already in PhysRefs
// (which currently contains only uses), set the PhysUseDef flag.
PhysUseDef = false;
MachineBasicBlock::const_iterator I = MI; I = std::next(I);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
continue;
// Check against PhysRefs even if the def is "dead".
if (PhysRefs.count(Reg))
PhysUseDef = true;
// If the def is dead, it's ok. But the def may not marked "dead". That's
// common since this pass is run before livevariables. We can scan
// forward a few instructions and check if it is obviously dead.
if (!MO.isDead() && !isPhysDefTriviallyDead(Reg, I, MBB->end()))
PhysDefs.push_back(Reg);
}
// Finally, add all defs to PhysRefs as well.
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i)
for (MCRegAliasIterator AI(PhysDefs[i], TRI, true); AI.isValid(); ++AI)
PhysRefs.insert(*AI);
return !PhysRefs.empty();
}
bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
SmallSet<unsigned,8> &PhysRefs,
SmallVectorImpl<unsigned> &PhysDefs,
bool &NonLocal) const {
// For now conservatively returns false if the common subexpression is
// not in the same basic block as the given instruction. The only exception
// is if the common subexpression is in the sole predecessor block.
const MachineBasicBlock *MBB = MI->getParent();
const MachineBasicBlock *CSMBB = CSMI->getParent();
bool CrossMBB = false;
if (CSMBB != MBB) {
if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB)
return false;
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
if (MRI->isAllocatable(PhysDefs[i]) || MRI->isReserved(PhysDefs[i]))
// Avoid extending live range of physical registers if they are
//allocatable or reserved.
return false;
}
CrossMBB = true;
}
MachineBasicBlock::const_iterator I = CSMI; I = std::next(I);
MachineBasicBlock::const_iterator E = MI;
MachineBasicBlock::const_iterator EE = CSMBB->end();
unsigned LookAheadLeft = LookAheadLimit;
while (LookAheadLeft) {
// Skip over dbg_value's.
while (I != E && I != EE && I->isDebugValue())
++I;
if (I == EE) {
assert(CrossMBB && "Reaching end-of-MBB without finding MI?");
(void)CrossMBB;
CrossMBB = false;
NonLocal = true;
I = MBB->begin();
EE = MBB->end();
continue;
}
if (I == E)
return true;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = I->getOperand(i);
// RegMasks go on instructions like calls that clobber lots of physregs.
// Don't attempt to CSE across such an instruction.
if (MO.isRegMask())
return false;
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(MOReg))
continue;
if (PhysRefs.count(MOReg))
return false;
}
--LookAheadLeft;
++I;
}
return false;
}
bool MachineCSE::isCSECandidate(MachineInstr *MI) {
if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() || MI->isKill() ||
MI->isInlineAsm() || MI->isDebugValue())
return false;
// Ignore copies.
if (MI->isCopyLike())
return false;
// Ignore stuff that we obviously can't move.
if (MI->mayStore() || MI->isCall() || MI->isTerminator() ||
MI->hasUnmodeledSideEffects())
return false;
if (MI->mayLoad()) {
// Okay, this instruction does a load. As a refinement, we allow the target
// to decide whether the loaded value is actually a constant. If so, we can
// actually use it as a load.
if (!MI->isInvariantLoad(AA))
// FIXME: we should be able to hoist loads with no other side effects if
// there are no other instructions which can change memory in this loop.
// This is a trivial form of alias analysis.
return false;
}
return true;
}
/// isProfitableToCSE - Return true if it's profitable to eliminate MI with a
/// common expression that defines Reg.
bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
MachineInstr *CSMI, MachineInstr *MI) {
// FIXME: Heuristics that works around the lack the live range splitting.
// If CSReg is used at all uses of Reg, CSE should not increase register
// pressure of CSReg.
bool MayIncreasePressure = true;
if (TargetRegisterInfo::isVirtualRegister(CSReg) &&
TargetRegisterInfo::isVirtualRegister(Reg)) {
MayIncreasePressure = false;
SmallPtrSet<MachineInstr*, 8> CSUses;
for (MachineInstr &MI : MRI->use_nodbg_instructions(CSReg)) {
CSUses.insert(&MI);
}
for (MachineInstr &MI : MRI->use_nodbg_instructions(Reg)) {
if (!CSUses.count(&MI)) {
MayIncreasePressure = true;
break;
}
}
}
if (!MayIncreasePressure) return true;
// Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
// an immediate predecessor. We don't want to increase register pressure and
// end up causing other computation to be spilled.
if (TII->isAsCheapAsAMove(MI)) {
MachineBasicBlock *CSBB = CSMI->getParent();
MachineBasicBlock *BB = MI->getParent();
if (CSBB != BB && !CSBB->isSuccessor(BB))
return false;
}
// Heuristics #2: If the expression doesn't not use a vr and the only use
// of the redundant computation are copies, do not cse.
bool HasVRegUse = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
HasVRegUse = true;
break;
}
}
if (!HasVRegUse) {
bool HasNonCopyUse = false;
for (MachineInstr &MI : MRI->use_nodbg_instructions(Reg)) {
// Ignore copies.
if (!MI.isCopyLike()) {
HasNonCopyUse = true;
break;
}
}
if (!HasNonCopyUse)
return false;
}
// Heuristics #3: If the common subexpression is used by PHIs, do not reuse
// it unless the defined value is already used in the BB of the new use.
bool HasPHI = false;
SmallPtrSet<MachineBasicBlock*, 4> CSBBs;
for (MachineInstr &MI : MRI->use_nodbg_instructions(CSReg)) {
HasPHI |= MI.isPHI();
CSBBs.insert(MI.getParent());
}
if (!HasPHI)
return true;
return CSBBs.count(MI->getParent());
}
void MachineCSE::EnterScope(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
ScopeType *Scope = new ScopeType(VNT);
ScopeMap[MBB] = Scope;
}
void MachineCSE::ExitScope(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
assert(SI != ScopeMap.end());
delete SI->second;
ScopeMap.erase(SI);
}
bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
bool Changed = false;
SmallVector<std::pair<unsigned, unsigned>, 8> CSEPairs;
SmallVector<unsigned, 2> ImplicitDefsToUpdate;
SmallVector<unsigned, 2> ImplicitDefs;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ) {
MachineInstr *MI = &*I;
++I;
if (!isCSECandidate(MI))
continue;
bool FoundCSE = VNT.count(MI);
if (!FoundCSE) {
// Using trivial copy propagation to find more CSE opportunities.
if (PerformTrivialCopyPropagation(MI, MBB)) {
Changed = true;
// After coalescing MI itself may become a copy.
if (MI->isCopyLike())
continue;
// Try again to see if CSE is possible.
FoundCSE = VNT.count(MI);
}
}
// Commute commutable instructions.
bool Commuted = false;
if (!FoundCSE && MI->isCommutable()) {
MachineInstr *NewMI = TII->commuteInstruction(MI);
if (NewMI) {
Commuted = true;
FoundCSE = VNT.count(NewMI);
if (NewMI != MI) {
// New instruction. It doesn't need to be kept.
NewMI->eraseFromParent();
Changed = true;
} else if (!FoundCSE)
// MI was changed but it didn't help, commute it back!
(void)TII->commuteInstruction(MI);
}
}
// If the instruction defines physical registers and the values *may* be
// used, then it's not safe to replace it with a common subexpression.
// It's also not safe if the instruction uses physical registers.
bool CrossMBBPhysDef = false;
SmallSet<unsigned, 8> PhysRefs;
SmallVector<unsigned, 2> PhysDefs;
bool PhysUseDef = false;
if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs,
PhysDefs, PhysUseDef)) {
FoundCSE = false;
// ... Unless the CS is local or is in the sole predecessor block
// and it also defines the physical register which is not clobbered
// in between and the physical register uses were not clobbered.
// This can never be the case if the instruction both uses and
// defines the same physical register, which was detected above.
if (!PhysUseDef) {
unsigned CSVN = VNT.lookup(MI);
MachineInstr *CSMI = Exps[CSVN];
if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
FoundCSE = true;
}
}
if (!FoundCSE) {
VNT.insert(MI, CurrVN++);
Exps.push_back(MI);
continue;
}
// Found a common subexpression, eliminate it.
unsigned CSVN = VNT.lookup(MI);
MachineInstr *CSMI = Exps[CSVN];
DEBUG(dbgs() << "Examining: " << *MI);
DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI);
// Check if it's profitable to perform this CSE.
bool DoCSE = true;
unsigned NumDefs = MI->getDesc().getNumDefs() +
MI->getDesc().getNumImplicitDefs();
for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned OldReg = MO.getReg();
unsigned NewReg = CSMI->getOperand(i).getReg();
// Go through implicit defs of CSMI and MI, if a def is not dead at MI,
// we should make sure it is not dead at CSMI.
if (MO.isImplicit() && !MO.isDead() && CSMI->getOperand(i).isDead())
ImplicitDefsToUpdate.push_back(i);
// Keep track of implicit defs of CSMI and MI, to clear possibly
// made-redundant kill flags.
if (MO.isImplicit() && !MO.isDead() && OldReg == NewReg)
ImplicitDefs.push_back(OldReg);
if (OldReg == NewReg) {
--NumDefs;
continue;
}
assert(TargetRegisterInfo::isVirtualRegister(OldReg) &&
TargetRegisterInfo::isVirtualRegister(NewReg) &&
"Do not CSE physical register defs!");
if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) {
DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
DoCSE = false;
break;
}
// Don't perform CSE if the result of the old instruction cannot exist
// within the register class of the new instruction.
const TargetRegisterClass *OldRC = MRI->getRegClass(OldReg);
if (!MRI->constrainRegClass(NewReg, OldRC)) {
DEBUG(dbgs() << "*** Not the same register class, avoid CSE!\n");
DoCSE = false;
break;
}
CSEPairs.push_back(std::make_pair(OldReg, NewReg));
--NumDefs;
}
// Actually perform the elimination.
if (DoCSE) {
for (unsigned i = 0, e = CSEPairs.size(); i != e; ++i) {
unsigned OldReg = CSEPairs[i].first;
unsigned NewReg = CSEPairs[i].second;
// OldReg may have been unused but is used now, clear the Dead flag
MachineInstr *Def = MRI->getUniqueVRegDef(NewReg);
assert(Def != nullptr && "CSEd register has no unique definition?");
Def->clearRegisterDeads(NewReg);
// Replace with NewReg and clear kill flags which may be wrong now.
MRI->replaceRegWith(OldReg, NewReg);
MRI->clearKillFlags(NewReg);
}
// Go through implicit defs of CSMI and MI, if a def is not dead at MI,
// we should make sure it is not dead at CSMI.
for (unsigned i = 0, e = ImplicitDefsToUpdate.size(); i != e; ++i)
CSMI->getOperand(ImplicitDefsToUpdate[i]).setIsDead(false);
// Go through implicit defs of CSMI and MI, and clear the kill flags on
// their uses in all the instructions between CSMI and MI.
// We might have made some of the kill flags redundant, consider:
// subs ... %NZCV<imp-def> <- CSMI
// csinc ... %NZCV<imp-use,kill> <- this kill flag isn't valid anymore
// subs ... %NZCV<imp-def> <- MI, to be eliminated
// csinc ... %NZCV<imp-use,kill>
// Since we eliminated MI, and reused a register imp-def'd by CSMI
// (here %NZCV), that register, if it was killed before MI, should have
// that kill flag removed, because it's lifetime was extended.
if (CSMI->getParent() == MI->getParent()) {
for (MachineBasicBlock::iterator II = CSMI, IE = MI; II != IE; ++II)
for (auto ImplicitDef : ImplicitDefs)
if (MachineOperand *MO = II->findRegisterUseOperand(
ImplicitDef, /*isKill=*/true, TRI))
MO->setIsKill(false);
} else {
// If the instructions aren't in the same BB, bail out and clear the
// kill flag on all uses of the imp-def'd register.
for (auto ImplicitDef : ImplicitDefs)
MRI->clearKillFlags(ImplicitDef);
}
if (CrossMBBPhysDef) {
// Add physical register defs now coming in from a predecessor to MBB
// livein list.
while (!PhysDefs.empty()) {
unsigned LiveIn = PhysDefs.pop_back_val();
if (!MBB->isLiveIn(LiveIn))
MBB->addLiveIn(LiveIn);
}
++NumCrossBBCSEs;
}
MI->eraseFromParent();
++NumCSEs;
if (!PhysRefs.empty())
++NumPhysCSEs;
if (Commuted)
++NumCommutes;
Changed = true;
} else {
VNT.insert(MI, CurrVN++);
Exps.push_back(MI);
}
CSEPairs.clear();
ImplicitDefsToUpdate.clear();
ImplicitDefs.clear();
}
return Changed;
}
/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
/// dominator tree node if its a leaf or all of its children are done. Walk
/// up the dominator tree to destroy ancestors which are now done.
void
MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren) {
if (OpenChildren[Node])
return;
// Pop scope.
ExitScope(Node->getBlock());
// Now traverse upwards to pop ancestors whose offsprings are all done.
while (MachineDomTreeNode *Parent = Node->getIDom()) {
unsigned Left = --OpenChildren[Parent];
if (Left != 0)
break;
ExitScope(Parent->getBlock());
Node = Parent;
}
}
bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
SmallVector<MachineDomTreeNode*, 32> Scopes;
SmallVector<MachineDomTreeNode*, 8> WorkList;
DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
CurrVN = 0;
// Perform a DFS walk to determine the order of visit.
WorkList.push_back(Node);
do {
Node = WorkList.pop_back_val();
Scopes.push_back(Node);
const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
unsigned NumChildren = Children.size();
OpenChildren[Node] = NumChildren;
for (unsigned i = 0; i != NumChildren; ++i) {
MachineDomTreeNode *Child = Children[i];
WorkList.push_back(Child);
}
} while (!WorkList.empty());
// Now perform CSE.
bool Changed = false;
for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
MachineDomTreeNode *Node = Scopes[i];
MachineBasicBlock *MBB = Node->getBlock();
EnterScope(MBB);
Changed |= ProcessBlock(MBB);
// If it's a leaf node, it's done. Traverse upwards to pop ancestors.
ExitScopeIfDone(Node, OpenChildren);
}
return Changed;
}
bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<MachineDominatorTree>();
LookAheadLimit = TII->getMachineCSELookAheadLimit();
return PerformCSE(DT->getRootNode());
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ExecutionDepsFix.cpp | //===- ExecutionDepsFix.cpp - Fix execution dependecy issues ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the execution dependency fix pass.
//
// Some X86 SSE instructions like mov, and, or, xor are available in different
// variants for different operand types. These variant instructions are
// equivalent, but on Nehalem and newer cpus there is extra latency
// transferring data between integer and floating point domains. ARM cores
// have similar issues when they are configured with both VFP and NEON
// pipelines.
//
// This pass changes the variant instructions to minimize domain crossings.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "execution-fix"
/// A DomainValue is a bit like LiveIntervals' ValNo, but it also keeps track
/// of execution domains.
///
/// An open DomainValue represents a set of instructions that can still switch
/// execution domain. Multiple registers may refer to the same open
/// DomainValue - they will eventually be collapsed to the same execution
/// domain.
///
/// A collapsed DomainValue represents a single register that has been forced
/// into one of more execution domains. There is a separate collapsed
/// DomainValue for each register, but it may contain multiple execution
/// domains. A register value is initially created in a single execution
/// domain, but if we were forced to pay the penalty of a domain crossing, we
/// keep track of the fact that the register is now available in multiple
/// domains.
namespace {
struct DomainValue {
// Basic reference counting.
unsigned Refs;
// Bitmask of available domains. For an open DomainValue, it is the still
// possible domains for collapsing. For a collapsed DomainValue it is the
// domains where the register is available for free.
unsigned AvailableDomains;
// Pointer to the next DomainValue in a chain. When two DomainValues are
// merged, Victim.Next is set to point to Victor, so old DomainValue
// references can be updated by following the chain.
DomainValue *Next;
// Twiddleable instructions using or defining these registers.
SmallVector<MachineInstr*, 8> Instrs;
// A collapsed DomainValue has no instructions to twiddle - it simply keeps
// track of the domains where the registers are already available.
bool isCollapsed() const { return Instrs.empty(); }
// Is domain available?
bool hasDomain(unsigned domain) const {
assert(domain <
static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
"undefined behavior");
return AvailableDomains & (1u << domain);
}
// Mark domain as available.
void addDomain(unsigned domain) {
AvailableDomains |= 1u << domain;
}
// Restrict to a single domain available.
void setSingleDomain(unsigned domain) {
AvailableDomains = 1u << domain;
}
// Return bitmask of domains that are available and in mask.
unsigned getCommonDomains(unsigned mask) const {
return AvailableDomains & mask;
}
// First domain available.
unsigned getFirstDomain() const {
return countTrailingZeros(AvailableDomains);
}
DomainValue() : Refs(0) { clear(); }
// Clear this DomainValue and point to next which has all its data.
void clear() {
AvailableDomains = 0;
Next = nullptr;
Instrs.clear();
}
};
}
namespace {
/// Information about a live register.
struct LiveReg {
/// Value currently in this register, or NULL when no value is being tracked.
/// This counts as a DomainValue reference.
DomainValue *Value;
/// Instruction that defined this register, relative to the beginning of the
/// current basic block. When a LiveReg is used to represent a live-out
/// register, this value is relative to the end of the basic block, so it
/// will be a negative number.
int Def;
};
} // anonymous namespace
namespace {
class ExeDepsFix : public MachineFunctionPass {
static char ID;
SpecificBumpPtrAllocator<DomainValue> Allocator;
SmallVector<DomainValue*,16> Avail;
const TargetRegisterClass *const RC;
MachineFunction *MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
std::vector<SmallVector<int, 1>> AliasMap;
const unsigned NumRegs;
LiveReg *LiveRegs;
typedef DenseMap<MachineBasicBlock*, LiveReg*> LiveOutMap;
LiveOutMap LiveOuts;
/// List of undefined register reads in this block in forward order.
std::vector<std::pair<MachineInstr*, unsigned> > UndefReads;
/// Storage for register unit liveness.
LivePhysRegs LiveRegSet;
/// Current instruction number.
/// The first instruction in each basic block is 0.
int CurInstr;
/// True when the current block has a predecessor that hasn't been visited
/// yet.
bool SeenUnknownBackEdge;
public:
ExeDepsFix(const TargetRegisterClass *rc)
: MachineFunctionPass(ID), RC(rc), NumRegs(RC->getNumRegs()) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override;
StringRef getPassName() const override {
return "Execution dependency fix";
}
private:
iterator_range<SmallVectorImpl<int>::const_iterator>
regIndices(unsigned Reg) const;
// DomainValue allocation.
DomainValue *alloc(int domain = -1);
DomainValue *retain(DomainValue *DV) {
if (DV) ++DV->Refs;
return DV;
}
void release(DomainValue*);
DomainValue *resolve(DomainValue*&);
// LiveRegs manipulations.
void setLiveReg(int rx, DomainValue *DV);
void kill(int rx);
void force(int rx, unsigned domain);
void collapse(DomainValue *dv, unsigned domain);
bool merge(DomainValue *A, DomainValue *B);
void enterBasicBlock(MachineBasicBlock*);
void leaveBasicBlock(MachineBasicBlock*);
void visitInstr(MachineInstr*);
void processDefs(MachineInstr*, bool Kill);
void visitSoftInstr(MachineInstr*, unsigned mask);
void visitHardInstr(MachineInstr*, unsigned domain);
bool shouldBreakDependence(MachineInstr*, unsigned OpIdx, unsigned Pref);
void processUndefReads(MachineBasicBlock*);
};
}
char ExeDepsFix::ID = 0;
/// Translate TRI register number to a list of indices into our smaller tables
/// of interesting registers.
iterator_range<SmallVectorImpl<int>::const_iterator>
ExeDepsFix::regIndices(unsigned Reg) const {
assert(Reg < AliasMap.size() && "Invalid register");
const auto &Entry = AliasMap[Reg];
return make_range(Entry.begin(), Entry.end());
}
DomainValue *ExeDepsFix::alloc(int domain) {
DomainValue *dv = Avail.empty() ?
new(Allocator.Allocate()) DomainValue :
Avail.pop_back_val();
if (domain >= 0)
dv->addDomain(domain);
assert(dv->Refs == 0 && "Reference count wasn't cleared");
assert(!dv->Next && "Chained DomainValue shouldn't have been recycled");
return dv;
}
/// Release a reference to DV. When the last reference is released,
/// collapse if needed.
void ExeDepsFix::release(DomainValue *DV) {
while (DV) {
assert(DV->Refs && "Bad DomainValue");
if (--DV->Refs)
return;
// There are no more DV references. Collapse any contained instructions.
if (DV->AvailableDomains && !DV->isCollapsed())
collapse(DV, DV->getFirstDomain());
DomainValue *Next = DV->Next;
DV->clear();
Avail.push_back(DV);
// Also release the next DomainValue in the chain.
DV = Next;
}
}
/// Follow the chain of dead DomainValues until a live DomainValue is reached.
/// Update the referenced pointer when necessary.
DomainValue *ExeDepsFix::resolve(DomainValue *&DVRef) {
DomainValue *DV = DVRef;
if (!DV || !DV->Next)
return DV;
// DV has a chain. Find the end.
do DV = DV->Next;
while (DV->Next);
// Update DVRef to point to DV.
retain(DV);
release(DVRef);
DVRef = DV;
return DV;
}
/// Set LiveRegs[rx] = dv, updating reference counts.
void ExeDepsFix::setLiveReg(int rx, DomainValue *dv) {
assert(unsigned(rx) < NumRegs && "Invalid index");
assert(LiveRegs && "Must enter basic block first.");
if (LiveRegs[rx].Value == dv)
return;
if (LiveRegs[rx].Value)
release(LiveRegs[rx].Value);
LiveRegs[rx].Value = retain(dv);
}
// Kill register rx, recycle or collapse any DomainValue.
void ExeDepsFix::kill(int rx) {
assert(unsigned(rx) < NumRegs && "Invalid index");
assert(LiveRegs && "Must enter basic block first.");
if (!LiveRegs[rx].Value)
return;
release(LiveRegs[rx].Value);
LiveRegs[rx].Value = nullptr;
}
/// Force register rx into domain.
void ExeDepsFix::force(int rx, unsigned domain) {
assert(unsigned(rx) < NumRegs && "Invalid index");
assert(LiveRegs && "Must enter basic block first.");
if (DomainValue *dv = LiveRegs[rx].Value) {
if (dv->isCollapsed())
dv->addDomain(domain);
else if (dv->hasDomain(domain))
collapse(dv, domain);
else {
// This is an incompatible open DomainValue. Collapse it to whatever and
// force the new value into domain. This costs a domain crossing.
collapse(dv, dv->getFirstDomain());
assert(LiveRegs[rx].Value && "Not live after collapse?");
LiveRegs[rx].Value->addDomain(domain);
}
} else {
// Set up basic collapsed DomainValue.
setLiveReg(rx, alloc(domain));
}
}
/// Collapse open DomainValue into given domain. If there are multiple
/// registers using dv, they each get a unique collapsed DomainValue.
void ExeDepsFix::collapse(DomainValue *dv, unsigned domain) {
assert(dv->hasDomain(domain) && "Cannot collapse");
// Collapse all the instructions.
while (!dv->Instrs.empty())
TII->setExecutionDomain(dv->Instrs.pop_back_val(), domain);
dv->setSingleDomain(domain);
// If there are multiple users, give them new, unique DomainValues.
if (LiveRegs && dv->Refs > 1)
for (unsigned rx = 0; rx != NumRegs; ++rx)
if (LiveRegs[rx].Value == dv)
setLiveReg(rx, alloc(domain));
}
/// All instructions and registers in B are moved to A, and B is released.
bool ExeDepsFix::merge(DomainValue *A, DomainValue *B) {
assert(!A->isCollapsed() && "Cannot merge into collapsed");
assert(!B->isCollapsed() && "Cannot merge from collapsed");
if (A == B)
return true;
// Restrict to the domains that A and B have in common.
unsigned common = A->getCommonDomains(B->AvailableDomains);
if (!common)
return false;
A->AvailableDomains = common;
A->Instrs.append(B->Instrs.begin(), B->Instrs.end());
// Clear the old DomainValue so we won't try to swizzle instructions twice.
B->clear();
// All uses of B are referred to A.
B->Next = retain(A);
for (unsigned rx = 0; rx != NumRegs; ++rx) {
assert(LiveRegs && "no space allocated for live registers");
if (LiveRegs[rx].Value == B)
setLiveReg(rx, A);
}
return true;
}
/// Set up LiveRegs by merging predecessor live-out values.
void ExeDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
// Detect back-edges from predecessors we haven't processed yet.
SeenUnknownBackEdge = false;
// Reset instruction counter in each basic block.
CurInstr = 0;
// Set up UndefReads to track undefined register reads.
UndefReads.clear();
LiveRegSet.clear();
// Set up LiveRegs to represent registers entering MBB.
if (!LiveRegs)
LiveRegs = new LiveReg[NumRegs];
// Default values are 'nothing happened a long time ago'.
for (unsigned rx = 0; rx != NumRegs; ++rx) {
LiveRegs[rx].Value = nullptr;
LiveRegs[rx].Def = -(1 << 20);
}
// This is the entry block.
if (MBB->pred_empty()) {
for (MachineBasicBlock::livein_iterator i = MBB->livein_begin(),
e = MBB->livein_end(); i != e; ++i) {
for (int rx : regIndices(*i)) {
// Treat function live-ins as if they were defined just before the first
// instruction. Usually, function arguments are set up immediately
// before the call.
LiveRegs[rx].Def = -1;
}
}
DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": entry\n");
return;
}
// Try to coalesce live-out registers from predecessors.
for (MachineBasicBlock::const_pred_iterator pi = MBB->pred_begin(),
pe = MBB->pred_end(); pi != pe; ++pi) {
LiveOutMap::const_iterator fi = LiveOuts.find(*pi);
if (fi == LiveOuts.end()) {
SeenUnknownBackEdge = true;
continue;
}
assert(fi->second && "Can't have NULL entries");
for (unsigned rx = 0; rx != NumRegs; ++rx) {
// Use the most recent predecessor def for each register.
LiveRegs[rx].Def = std::max(LiveRegs[rx].Def, fi->second[rx].Def);
DomainValue *pdv = resolve(fi->second[rx].Value);
if (!pdv)
continue;
if (!LiveRegs[rx].Value) {
setLiveReg(rx, pdv);
continue;
}
// We have a live DomainValue from more than one predecessor.
if (LiveRegs[rx].Value->isCollapsed()) {
// We are already collapsed, but predecessor is not. Force it.
unsigned Domain = LiveRegs[rx].Value->getFirstDomain();
if (!pdv->isCollapsed() && pdv->hasDomain(Domain))
collapse(pdv, Domain);
continue;
}
// Currently open, merge in predecessor.
if (!pdv->isCollapsed())
merge(LiveRegs[rx].Value, pdv);
else
force(rx, pdv->getFirstDomain());
}
}
DEBUG(dbgs() << "BB#" << MBB->getNumber()
<< (SeenUnknownBackEdge ? ": incomplete\n" : ": all preds known\n"));
}
void ExeDepsFix::leaveBasicBlock(MachineBasicBlock *MBB) {
assert(LiveRegs && "Must enter basic block first.");
// Save live registers at end of MBB - used by enterBasicBlock().
// Also use LiveOuts as a visited set to detect back-edges.
bool First = LiveOuts.insert(std::make_pair(MBB, LiveRegs)).second;
if (First) {
// LiveRegs was inserted in LiveOuts. Adjust all defs to be relative to
// the end of this block instead of the beginning.
for (unsigned i = 0, e = NumRegs; i != e; ++i)
LiveRegs[i].Def -= CurInstr;
} else {
// Insertion failed, this must be the second pass.
// Release all the DomainValues instead of keeping them.
for (unsigned i = 0, e = NumRegs; i != e; ++i)
release(LiveRegs[i].Value);
delete[] LiveRegs;
}
LiveRegs = nullptr;
}
void ExeDepsFix::visitInstr(MachineInstr *MI) {
if (MI->isDebugValue())
return;
// Update instructions with explicit execution domains.
std::pair<uint16_t, uint16_t> DomP = TII->getExecutionDomain(MI);
if (DomP.first) {
if (DomP.second)
visitSoftInstr(MI, DomP.second);
else
visitHardInstr(MI, DomP.first);
}
// Process defs to track register ages, and kill values clobbered by generic
// instructions.
processDefs(MI, !DomP.first);
}
/// \brief Return true to if it makes sense to break dependence on a partial def
/// or undef use.
bool ExeDepsFix::shouldBreakDependence(MachineInstr *MI, unsigned OpIdx,
unsigned Pref) {
unsigned reg = MI->getOperand(OpIdx).getReg();
for (int rx : regIndices(reg)) {
unsigned Clearance = CurInstr - LiveRegs[rx].Def;
DEBUG(dbgs() << "Clearance: " << Clearance << ", want " << Pref);
if (Pref > Clearance) {
DEBUG(dbgs() << ": Break dependency.\n");
continue;
}
// The current clearance seems OK, but we may be ignoring a def from a
// back-edge.
if (!SeenUnknownBackEdge || Pref <= unsigned(CurInstr)) {
DEBUG(dbgs() << ": OK .\n");
return false;
}
// A def from an unprocessed back-edge may make us break this dependency.
DEBUG(dbgs() << ": Wait for back-edge to resolve.\n");
return false;
}
return true;
}
// Update def-ages for registers defined by MI.
// If Kill is set, also kill off DomainValues clobbered by the defs.
//
// Also break dependencies on partial defs and undef uses.
void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) {
assert(!MI->isDebugValue() && "Won't process debug values");
// Break dependence on undef uses. Do this before updating LiveRegs below.
unsigned OpNum;
unsigned Pref = TII->getUndefRegClearance(MI, OpNum, TRI);
if (Pref) {
if (shouldBreakDependence(MI, OpNum, Pref))
UndefReads.push_back(std::make_pair(MI, OpNum));
}
const MCInstrDesc &MCID = MI->getDesc();
for (unsigned i = 0,
e = MI->isVariadic() ? MI->getNumOperands() : MCID.getNumDefs();
i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
if (MO.isImplicit())
break;
if (MO.isUse())
continue;
for (int rx : regIndices(MO.getReg())) {
// This instruction explicitly defines rx.
DEBUG(dbgs() << TRI->getName(RC->getRegister(rx)) << ":\t" << CurInstr
<< '\t' << *MI);
// Check clearance before partial register updates.
// Call breakDependence before setting LiveRegs[rx].Def.
unsigned Pref = TII->getPartialRegUpdateClearance(MI, i, TRI);
if (Pref && shouldBreakDependence(MI, i, Pref))
TII->breakPartialRegDependency(MI, i, TRI);
// How many instructions since rx was last written?
LiveRegs[rx].Def = CurInstr;
// Kill off domains redefined by generic instructions.
if (Kill)
kill(rx);
}
}
++CurInstr;
}
/// \break Break false dependencies on undefined register reads.
///
/// Walk the block backward computing precise liveness. This is expensive, so we
/// only do it on demand. Note that the occurrence of undefined register reads
/// that should be broken is very rare, but when they occur we may have many in
/// a single block.
void ExeDepsFix::processUndefReads(MachineBasicBlock *MBB) {
if (UndefReads.empty())
return;
// Collect this block's live out register units.
LiveRegSet.init(TRI);
LiveRegSet.addLiveOuts(MBB);
MachineInstr *UndefMI = UndefReads.back().first;
unsigned OpIdx = UndefReads.back().second;
for (MachineBasicBlock::reverse_iterator I = MBB->rbegin(), E = MBB->rend();
I != E; ++I) {
// Update liveness, including the current instruction's defs.
LiveRegSet.stepBackward(*I);
if (UndefMI == &*I) {
if (!LiveRegSet.contains(UndefMI->getOperand(OpIdx).getReg()))
TII->breakPartialRegDependency(UndefMI, OpIdx, TRI);
UndefReads.pop_back();
if (UndefReads.empty())
return;
UndefMI = UndefReads.back().first;
OpIdx = UndefReads.back().second;
}
}
}
// A hard instruction only works in one domain. All input registers will be
// forced into that domain.
void ExeDepsFix::visitHardInstr(MachineInstr *mi, unsigned domain) {
// Collapse all uses.
for (unsigned i = mi->getDesc().getNumDefs(),
e = mi->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
for (int rx : regIndices(mo.getReg())) {
force(rx, domain);
}
}
// Kill all defs and force them.
for (unsigned i = 0, e = mi->getDesc().getNumDefs(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
for (int rx : regIndices(mo.getReg())) {
kill(rx);
force(rx, domain);
}
}
}
// A soft instruction can be changed to work in other domains given by mask.
void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// Bitmask of available domains for this instruction after taking collapsed
// operands into account.
unsigned available = mask;
// Scan the explicit use operands for incoming domains.
SmallVector<int, 4> used;
if (LiveRegs)
for (unsigned i = mi->getDesc().getNumDefs(),
e = mi->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
for (int rx : regIndices(mo.getReg())) {
DomainValue *dv = LiveRegs[rx].Value;
if (dv == nullptr)
continue;
// Bitmask of domains that dv and available have in common.
unsigned common = dv->getCommonDomains(available);
// Is it possible to use this collapsed register for free?
if (dv->isCollapsed()) {
// Restrict available domains to the ones in common with the operand.
// If there are no common domains, we must pay the cross-domain
// penalty for this operand.
if (common) available = common;
} else if (common)
// Open DomainValue is compatible, save it for merging.
used.push_back(rx);
else
// Open DomainValue is not compatible with instruction. It is useless
// now.
kill(rx);
}
}
// If the collapsed operands force a single domain, propagate the collapse.
if (isPowerOf2_32(available)) {
unsigned domain = countTrailingZeros(available);
TII->setExecutionDomain(mi, domain);
visitHardInstr(mi, domain);
return;
}
// Kill off any remaining uses that don't match available, and build a list of
// incoming DomainValues that we want to merge.
SmallVector<LiveReg, 4> Regs;
for (SmallVectorImpl<int>::iterator i=used.begin(), e=used.end(); i!=e; ++i) {
int rx = *i;
assert(LiveRegs && "no space allocated for live registers");
const LiveReg &LR = LiveRegs[rx];
// This useless DomainValue could have been missed above.
if (!LR.Value->getCommonDomains(available)) {
kill(rx);
continue;
}
// Sorted insertion.
bool Inserted = false;
for (SmallVectorImpl<LiveReg>::iterator i = Regs.begin(), e = Regs.end();
i != e && !Inserted; ++i) {
if (LR.Def < i->Def) {
Inserted = true;
Regs.insert(i, LR);
}
}
if (!Inserted)
Regs.push_back(LR);
}
// doms are now sorted in order of appearance. Try to merge them all, giving
// priority to the latest ones.
DomainValue *dv = nullptr;
while (!Regs.empty()) {
if (!dv) {
dv = Regs.pop_back_val().Value;
// Force the first dv to match the current instruction.
dv->AvailableDomains = dv->getCommonDomains(available);
assert(dv->AvailableDomains && "Domain should have been filtered");
continue;
}
DomainValue *Latest = Regs.pop_back_val().Value;
// Skip already merged values.
if (Latest == dv || Latest->Next)
continue;
if (merge(dv, Latest))
continue;
// If latest didn't merge, it is useless now. Kill all registers using it.
for (int i : used) {
assert(LiveRegs && "no space allocated for live registers");
if (LiveRegs[i].Value == Latest)
kill(i);
}
}
// dv is the DomainValue we are going to use for this instruction.
if (!dv) {
dv = alloc();
dv->AvailableDomains = available;
}
dv->Instrs.push_back(mi);
// Finally set all defs and non-collapsed uses to dv. We must iterate through
// all the operators, including imp-def ones.
for (MachineInstr::mop_iterator ii = mi->operands_begin(),
ee = mi->operands_end();
ii != ee; ++ii) {
MachineOperand &mo = *ii;
if (!mo.isReg()) continue;
for (int rx : regIndices(mo.getReg())) {
if (!LiveRegs[rx].Value || (mo.isDef() && LiveRegs[rx].Value != dv)) {
kill(rx);
setLiveReg(rx, dv);
}
}
}
}
bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
TII = MF->getSubtarget().getInstrInfo();
TRI = MF->getSubtarget().getRegisterInfo();
LiveRegs = nullptr;
assert(NumRegs == RC->getNumRegs() && "Bad regclass");
DEBUG(dbgs() << "********** FIX EXECUTION DEPENDENCIES: "
<< TRI->getRegClassName(RC) << " **********\n");
// If no relevant registers are used in the function, we can skip it
// completely.
bool anyregs = false;
for (TargetRegisterClass::const_iterator I = RC->begin(), E = RC->end();
I != E; ++I)
if (MF->getRegInfo().isPhysRegUsed(*I)) {
anyregs = true;
break;
}
if (!anyregs) return false;
// Initialize the AliasMap on the first use.
if (AliasMap.empty()) {
// Given a PhysReg, AliasMap[PhysReg] returns a list of indices into RC and
// therefore the LiveRegs array.
AliasMap.resize(TRI->getNumRegs());
for (unsigned i = 0, e = RC->getNumRegs(); i != e; ++i)
for (MCRegAliasIterator AI(RC->getRegister(i), TRI, true);
AI.isValid(); ++AI)
AliasMap[*AI].push_back(i);
}
MachineBasicBlock *Entry = MF->begin();
ReversePostOrderTraversal<MachineBasicBlock*> RPOT(Entry);
SmallVector<MachineBasicBlock*, 16> Loops;
for (ReversePostOrderTraversal<MachineBasicBlock*>::rpo_iterator
MBBI = RPOT.begin(), MBBE = RPOT.end(); MBBI != MBBE; ++MBBI) {
MachineBasicBlock *MBB = *MBBI;
enterBasicBlock(MBB);
if (SeenUnknownBackEdge)
Loops.push_back(MBB);
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
++I)
visitInstr(I);
processUndefReads(MBB);
leaveBasicBlock(MBB);
}
// Visit all the loop blocks again in order to merge DomainValues from
// back-edges.
for (unsigned i = 0, e = Loops.size(); i != e; ++i) {
MachineBasicBlock *MBB = Loops[i];
enterBasicBlock(MBB);
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
++I)
if (!I->isDebugValue())
processDefs(I, false);
processUndefReads(MBB);
leaveBasicBlock(MBB);
}
// Clear the LiveOuts vectors and collapse any remaining DomainValues.
for (ReversePostOrderTraversal<MachineBasicBlock*>::rpo_iterator
MBBI = RPOT.begin(), MBBE = RPOT.end(); MBBI != MBBE; ++MBBI) {
LiveOutMap::const_iterator FI = LiveOuts.find(*MBBI);
if (FI == LiveOuts.end() || !FI->second)
continue;
for (unsigned i = 0, e = NumRegs; i != e; ++i)
if (FI->second[i].Value)
release(FI->second[i].Value);
delete[] FI->second;
}
LiveOuts.clear();
UndefReads.clear();
Avail.clear();
Allocator.DestroyAll();
return false;
}
FunctionPass *
llvm::createExecutionDependencyFixPass(const TargetRegisterClass *RC) {
return new ExeDepsFix(RC);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/PrologEpilogInserter.cpp | //===-- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass is responsible for finalizing the functions frame layout, saving
// callee saved registers, and for emitting prolog & epilog code for the
// function.
//
// This pass must be run after register allocation. After this pass is
// executed, it is illegal to construct MO_FrameIndex operands.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
#define DEBUG_TYPE "pei"
namespace {
class PEI : public MachineFunctionPass {
public:
static char ID;
PEI() : MachineFunctionPass(ID) {
initializePEIPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
///
bool runOnMachineFunction(MachineFunction &Fn) override;
private:
RegScavenger *RS;
// MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
// stack frame indexes.
unsigned MinCSFrameIndex, MaxCSFrameIndex;
// Save and Restore blocks of the current function.
MachineBasicBlock *SaveBlock;
SmallVector<MachineBasicBlock *, 4> RestoreBlocks;
// Flag to control whether to use the register scavenger to resolve
// frame index materialization registers. Set according to
// TRI->requiresFrameIndexScavenging() for the current function.
bool FrameIndexVirtualScavenging;
void calculateSets(MachineFunction &Fn);
void calculateCallsInformation(MachineFunction &Fn);
void assignCalleeSavedSpillSlots(MachineFunction &Fn,
const BitVector &SavedRegs);
void insertCSRSpillsAndRestores(MachineFunction &Fn);
void calculateFrameObjectOffsets(MachineFunction &Fn);
void replaceFrameIndices(MachineFunction &Fn);
void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
int &SPAdj);
void scavengeFrameVirtualRegs(MachineFunction &Fn);
void insertPrologEpilogCode(MachineFunction &Fn);
// Convenience for recognizing return blocks.
bool isReturnBlock(const MachineBasicBlock *MBB) const;
};
} // namespace
char PEI::ID = 0;
char &llvm::PrologEpilogCodeInserterID = PEI::ID;
static cl::opt<unsigned>
WarnStackSize("warn-stack-size", cl::Hidden, cl::init((unsigned)-1),
cl::desc("Warn for stack size bigger than the given"
" number"));
INITIALIZE_PASS_BEGIN(PEI, "prologepilog",
"Prologue/Epilogue Insertion", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(StackProtector)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(PEI, "prologepilog",
"Prologue/Epilogue Insertion & Frame Finalization",
false, false)
STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
STATISTIC(NumBytesStackSpace,
"Number of bytes used for stack in all functions");
void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addPreserved<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<StackProtector>();
AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool PEI::isReturnBlock(const MachineBasicBlock* MBB) const {
return (MBB && !MBB->empty() && MBB->back().isReturn());
}
/// Compute the set of return blocks
void PEI::calculateSets(MachineFunction &Fn) {
const MachineFrameInfo *MFI = Fn.getFrameInfo();
// Even when we do not change any CSR, we still want to insert the
// prologue and epilogue of the function.
// So set the save points for those.
// Use the points found by shrink-wrapping, if any.
if (MFI->getSavePoint()) {
SaveBlock = MFI->getSavePoint();
assert(MFI->getRestorePoint() && "Both restore and save must be set");
MachineBasicBlock *RestoreBlock = MFI->getRestorePoint();
// If RestoreBlock does not have any successor and is not a return block
// then the end point is unreachable and we do not need to insert any
// epilogue.
if (!RestoreBlock->succ_empty() || isReturnBlock(RestoreBlock))
RestoreBlocks.push_back(RestoreBlock);
return;
}
// Save refs to entry and return blocks.
SaveBlock = Fn.begin();
for (MachineFunction::iterator MBB = Fn.begin(), E = Fn.end();
MBB != E; ++MBB)
if (isReturnBlock(MBB))
RestoreBlocks.push_back(MBB);
return;
}
/// StackObjSet - A set of stack object indexes
typedef SmallSetVector<int, 8> StackObjSet;
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
///
bool PEI::runOnMachineFunction(MachineFunction &Fn) {
const Function* F = Fn.getFunction();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
assert(!Fn.getRegInfo().getNumVirtRegs() && "Regalloc must assign all vregs");
RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : nullptr;
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
// Calculate the MaxCallFrameSize and AdjustsStack variables for the
// function's frame information. Also eliminates call frame pseudo
// instructions.
calculateCallsInformation(Fn);
// Determine which of the registers in the callee save list should be saved.
BitVector SavedRegs;
TFI->determineCalleeSaves(Fn, SavedRegs, RS);
// Insert spill code for any callee saved registers that are modified.
assignCalleeSavedSpillSlots(Fn, SavedRegs);
// Determine placement of CSR spill/restore code:
// place all spills in the entry block, all restores in return blocks.
calculateSets(Fn);
// Add the code to save and restore the callee saved registers
if (!F->hasFnAttribute(Attribute::Naked))
insertCSRSpillsAndRestores(Fn);
// Allow the target machine to make final modifications to the function
// before the frame layout is finalized.
TFI->processFunctionBeforeFrameFinalized(Fn, RS);
// Calculate actual frame offsets for all abstract stack objects...
calculateFrameObjectOffsets(Fn);
// Add prolog and epilog code to the function. This function is required
// to align the stack frame as necessary for any stack variables or
// called functions. Because of this, calculateCalleeSavedRegisters()
// must be called before this function in order to set the AdjustsStack
// and MaxCallFrameSize variables.
if (!F->hasFnAttribute(Attribute::Naked))
insertPrologEpilogCode(Fn);
// Replace all MO_FrameIndex operands with physical register references
// and actual offsets.
//
replaceFrameIndices(Fn);
// If register scavenging is needed, as we've enabled doing it as a
// post-pass, scavenge the virtual registers that frame index elimination
// inserted.
if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging)
scavengeFrameVirtualRegs(Fn);
// Clear any vregs created by virtual scavenging.
Fn.getRegInfo().clearVirtRegs();
// Warn on stack size when we exceeds the given limit.
MachineFrameInfo *MFI = Fn.getFrameInfo();
uint64_t StackSize = MFI->getStackSize();
if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) {
DiagnosticInfoStackSize DiagStackSize(*F, StackSize);
F->getContext().diagnose(DiagStackSize);
}
delete RS;
RestoreBlocks.clear();
return true;
}
/// calculateCallsInformation - Calculate the MaxCallFrameSize and AdjustsStack
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
MachineFrameInfo *MFI = Fn.getFrameInfo();
unsigned MaxCallFrameSize = 0;
bool AdjustsStack = MFI->adjustsStack();
// Get the function call frame set-up and tear-down instruction opcode
unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
// Early exit for targets which have no call frame setup/destroy pseudo
// instructions.
if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
return;
std::vector<MachineBasicBlock::iterator> FrameSDOps;
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
if (I->getOpcode() == FrameSetupOpcode ||
I->getOpcode() == FrameDestroyOpcode) {
assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo"
" instructions should have a single immediate argument!");
unsigned Size = I->getOperand(0).getImm();
if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
AdjustsStack = true;
FrameSDOps.push_back(I);
} else if (I->isInlineAsm()) {
// Some inline asm's need a stack frame, as indicated by operand 1.
unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
AdjustsStack = true;
}
MFI->setAdjustsStack(AdjustsStack);
MFI->setMaxCallFrameSize(MaxCallFrameSize);
for (std::vector<MachineBasicBlock::iterator>::iterator
i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
MachineBasicBlock::iterator I = *i;
// If call frames are not being included as part of the stack frame, and
// the target doesn't indicate otherwise, remove the call frame pseudos
// here. The sub/add sp instruction pairs are still inserted, but we don't
// need to track the SP adjustment for frame index elimination.
if (TFI->canSimplifyCallFramePseudos(Fn))
TFI->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I);
}
}
void PEI::assignCalleeSavedSpillSlots(MachineFunction &F,
const BitVector &SavedRegs) {
// These are used to keep track the callee-save area. Initialize them.
MinCSFrameIndex = INT_MAX;
MaxCSFrameIndex = 0;
if (SavedRegs.empty())
return;
const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&F);
std::vector<CalleeSavedInfo> CSI;
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
if (SavedRegs.test(Reg))
CSI.push_back(CalleeSavedInfo(Reg));
}
const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
MachineFrameInfo *MFI = F.getFrameInfo();
if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) {
// If target doesn't implement this, use generic code.
if (CSI.empty())
return; // Early exit if no callee saved registers are modified!
unsigned NumFixedSpillSlots;
const TargetFrameLowering::SpillSlot *FixedSpillSlots =
TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
// Now that we know which registers need to be saved and restored, allocate
// stack slots for them.
for (std::vector<CalleeSavedInfo>::iterator I = CSI.begin(), E = CSI.end();
I != E; ++I) {
unsigned Reg = I->getReg();
const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
int FrameIdx;
if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
I->setFrameIdx(FrameIdx);
continue;
}
// Check to see if this physreg must be spilled to a particular stack slot
// on this target.
const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots &&
FixedSlot->Reg != Reg)
++FixedSlot;
if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
// Nope, just spill it anywhere convenient.
unsigned Align = RC->getAlignment();
unsigned StackAlign = TFI->getStackAlignment();
// We may not be able to satisfy the desired alignment specification of
// the TargetRegisterClass if the stack alignment is smaller. Use the
// min.
Align = std::min(Align, StackAlign);
FrameIdx = MFI->CreateStackObject(RC->getSize(), Align, true);
if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
} else {
// Spill it to the stack where we must.
FrameIdx =
MFI->CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset);
}
I->setFrameIdx(FrameIdx);
}
}
MFI->setCalleeSavedInfo(CSI);
}
/// Helper function to update the liveness information for the callee-saved
/// registers.
static void updateLiveness(MachineFunction &MF) {
MachineFrameInfo *MFI = MF.getFrameInfo();
// Visited will contain all the basic blocks that are in the region
// where the callee saved registers are alive:
// - Anything that is not Save or Restore -> LiveThrough.
// - Save -> LiveIn.
// - Restore -> LiveOut.
// The live-out is not attached to the block, so no need to keep
// Restore in this set.
SmallPtrSet<MachineBasicBlock *, 8> Visited;
SmallVector<MachineBasicBlock *, 8> WorkList;
MachineBasicBlock *Entry = &MF.front();
MachineBasicBlock *Save = MFI->getSavePoint();
if (!Save)
Save = Entry;
if (Entry != Save) {
WorkList.push_back(Entry);
Visited.insert(Entry);
}
Visited.insert(Save);
MachineBasicBlock *Restore = MFI->getRestorePoint();
if (Restore)
// By construction Restore cannot be visited, otherwise it
// means there exists a path to Restore that does not go
// through Save.
WorkList.push_back(Restore);
while (!WorkList.empty()) {
const MachineBasicBlock *CurBB = WorkList.pop_back_val();
// By construction, the region that is after the save point is
// dominated by the Save and post-dominated by the Restore.
if (CurBB == Save)
continue;
// Enqueue all the successors not already visited.
// Those are by construction either before Save or after Restore.
for (MachineBasicBlock *SuccBB : CurBB->successors())
if (Visited.insert(SuccBB).second)
WorkList.push_back(SuccBB);
}
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
for (MachineBasicBlock *MBB : Visited)
// Add the callee-saved register as live-in.
// It's killed at the spill.
MBB->addLiveIn(CSI[i].getReg());
}
}
/// insertCSRSpillsAndRestores - Insert spill and restore code for
/// callee saved registers used in the function.
///
void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Get callee saved register information.
MachineFrameInfo *MFI = Fn.getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
MFI->setCalleeSavedInfoValid(true);
// Early exit if no callee saved registers are modified!
if (CSI.empty())
return;
const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
MachineBasicBlock::iterator I;
// Spill using target interface.
I = SaveBlock->begin();
if (!TFI->spillCalleeSavedRegisters(*SaveBlock, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
// Insert the spill to the stack frame.
unsigned Reg = CSI[i].getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(*SaveBlock, I, Reg, true, CSI[i].getFrameIdx(),
RC, TRI);
}
}
// Update the live-in information of all the blocks up to the save point.
updateLiveness(Fn);
// Restore using target interface.
for (MachineBasicBlock *MBB : RestoreBlocks) {
I = MBB->end();
// Skip over all terminator instructions, which are part of the return
// sequence.
MachineBasicBlock::iterator I2 = I;
while (I2 != MBB->begin() && (--I2)->isTerminator())
I = I2;
bool AtStart = I == MBB->begin();
MachineBasicBlock::iterator BeforeI = I;
if (!AtStart)
--BeforeI;
// Restore all registers immediately before the return and any
// terminators that precede it.
if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(*MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI);
assert(I != MBB->begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
// multiple instructions.
if (AtStart)
I = MBB->begin();
else {
I = BeforeI;
++I;
}
}
}
}
}
/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
static inline void
AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
bool StackGrowsDown, int64_t &Offset,
unsigned &MaxAlign) {
// If the stack grows down, add the object size to find the lowest address.
if (StackGrowsDown)
Offset += MFI->getObjectSize(FrameIdx);
unsigned Align = MFI->getObjectAlignment(FrameIdx);
// If the alignment of this object is greater than that of the stack, then
// increase the stack alignment to match.
MaxAlign = std::max(MaxAlign, Align);
// Adjust to alignment boundary.
Offset = (Offset + Align - 1) / Align * Align;
if (StackGrowsDown) {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
MFI->setObjectOffset(FrameIdx, -Offset); // Set the computed offset
} else {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
MFI->setObjectOffset(FrameIdx, Offset);
Offset += MFI->getObjectSize(FrameIdx);
}
}
/// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
/// those required to be close to the Stack Protector) to stack offsets.
static void
AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
SmallSet<int, 16> &ProtectedObjs,
MachineFrameInfo *MFI, bool StackGrowsDown,
int64_t &Offset, unsigned &MaxAlign) {
for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
E = UnassignedObjs.end(); I != E; ++I) {
int i = *I;
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
ProtectedObjs.insert(i);
}
}
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
StackProtector *SP = &getAnalysis<StackProtector>();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
// Loop over all of the stack objects, assigning sequential addresses...
MachineFrameInfo *MFI = Fn.getFrameInfo();
// Start at the beginning of the local area.
// The Offset is the distance from the stack top in the direction
// of stack growth -- so it's always nonnegative.
int LocalAreaOffset = TFI.getOffsetOfLocalArea();
if (StackGrowsDown)
LocalAreaOffset = -LocalAreaOffset;
assert(LocalAreaOffset >= 0
&& "Local area offset should be in direction of stack growth");
int64_t Offset = LocalAreaOffset;
// If there are fixed sized objects that are preallocated in the local area,
// non-fixed objects can't be allocated right at the start of local area.
// We currently don't support filling in holes in between fixed sized
// objects, so we adjust 'Offset' to point to the end of last fixed sized
// preallocated object.
for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) {
int64_t FixedOff;
if (StackGrowsDown) {
// The maximum distance from the stack pointer is at lower address of
// the object -- which is given by offset. For down growing stack
// the offset is negative, so we negate the offset to get the distance.
FixedOff = -MFI->getObjectOffset(i);
} else {
// The maximum distance from the start pointer is at the upper
// address of the object.
FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i);
}
if (FixedOff > Offset) Offset = FixedOff;
}
// First assign frame offsets to stack objects that are used to spill
// callee saved registers.
if (StackGrowsDown) {
for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) {
// If the stack grows down, we need to add the size to find the lowest
// address of the object.
Offset += MFI->getObjectSize(i);
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
Offset = RoundUpToAlignment(Offset, Align);
MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
} else {
int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex;
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
Offset = RoundUpToAlignment(Offset, Align);
MFI->setObjectOffset(i, Offset);
Offset += MFI->getObjectSize(i);
}
}
unsigned MaxAlign = MFI->getMaxAlignment();
// Make sure the special register scavenging spill slot is closest to the
// incoming stack pointer if a frame pointer is required and is closer
// to the incoming rather than the final stack pointer.
const TargetRegisterInfo *RegInfo = Fn.getSubtarget().getRegisterInfo();
bool EarlyScavengingSlots = (TFI.hasFP(Fn) &&
TFI.isFPCloseToIncomingSP() &&
RegInfo->useFPForScavengingIndex(Fn) &&
!RegInfo->needsStackRealignment(Fn));
if (RS && EarlyScavengingSlots) {
SmallVector<int, 2> SFIs;
RS->getScavengingFrameIndices(SFIs);
for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
IE = SFIs.end(); I != IE; ++I)
AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
}
// FIXME: Once this is working, then enable flag will change to a target
// check for whether the frame is large enough to want to use virtual
// frame index registers. Functions which don't want/need this optimization
// will continue to use the existing code path.
if (MFI->getUseLocalStackAllocationBlock()) {
unsigned Align = MFI->getLocalFrameMaxAlign();
// Adjust to alignment boundary.
Offset = RoundUpToAlignment(Offset, Align);
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
// Resolve offsets for objects in the local block.
for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) {
std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
FIOffset << "]\n");
MFI->setObjectOffset(Entry.first, FIOffset);
}
// Allocate the local block
Offset += MFI->getLocalFrameSize();
MaxAlign = std::max(Align, MaxAlign);
}
// Make sure that the stack protector comes before the local variables on the
// stack.
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
StackObjSet SmallArrayObjs;
StackObjSet AddrOfObjs;
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign);
// Assign large stack objects first.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isObjectPreAllocated(i) &&
MFI->getUseLocalStackAllocationBlock())
continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && RS->isScavengingFrameIndex((int)i))
continue;
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
continue;
case StackProtector::SSPLK_SmallArray:
SmallArrayObjs.insert(i);
continue;
case StackProtector::SSPLK_AddrOf:
AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
continue;
}
llvm_unreachable("Unexpected SSPLayoutKind.");
}
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
// callee saved registers.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isObjectPreAllocated(i) &&
MFI->getUseLocalStackAllocationBlock())
continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && RS->isScavengingFrameIndex((int)i))
continue;
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
if (ProtectedObjs.count(i))
continue;
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
}
// Make sure the special register scavenging spill slot is closest to the
// stack pointer.
if (RS && !EarlyScavengingSlots) {
SmallVector<int, 2> SFIs;
RS->getScavengingFrameIndices(SFIs);
for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
IE = SFIs.end(); I != IE; ++I)
AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
}
if (!TFI.targetHandlesStackFrameRounding()) {
// If we have reserved argument space for call sites in the function
// immediately on entry to the current function, count it as part of the
// overall stack size.
if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn))
Offset += MFI->getMaxCallFrameSize();
// Round up the size to a multiple of the alignment. If the function has
// any calls or alloca's, align to the target's StackAlignment value to
// ensure that the callee's frame or the alloca data is suitably aligned;
// otherwise, for leaf functions, align to the TransientStackAlignment
// value.
unsigned StackAlign;
if (MFI->adjustsStack() || MFI->hasVarSizedObjects() ||
(RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0))
StackAlign = TFI.getStackAlignment();
else
StackAlign = TFI.getTransientStackAlignment();
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
Offset = RoundUpToAlignment(Offset, StackAlign);
}
// Update frame info to pretend that this is part of the stack...
int64_t StackSize = Offset - LocalAreaOffset;
MFI->setStackSize(StackSize);
NumBytesStackSpace += StackSize;
}
/// insertPrologEpilogCode - Scan the function for modified callee saved
/// registers, insert spill code for these callee saved registers, then add
/// prolog and epilog code to the function.
///
void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
// Add prologue to the function...
TFI.emitPrologue(Fn, *SaveBlock);
// Add epilogue to restore the callee-save registers in each exiting block.
for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
TFI.emitEpilogue(Fn, *RestoreBlock);
// Emit additional code that is required to support segmented stacks, if
// we've been asked for it. This, when linked with a runtime with support
// for segmented stacks (libgcc is one), will result in allocating stack
// space in small chunks instead of one large contiguous block.
if (Fn.shouldSplitStack())
TFI.adjustForSegmentedStacks(Fn, *SaveBlock);
// Emit additional code that is required to explicitly handle the stack in
// HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
// approach is rather similar to that of Segmented Stacks, but it uses a
// different conditional check and another BIF for allocating more stack
// space.
if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE)
TFI.adjustForHiPEPrologue(Fn, *SaveBlock);
}
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
///
void PEI::replaceFrameIndices(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
if (!TFI.needsFrameIndexResolution(Fn)) return;
MachineModuleInfo &MMI = Fn.getMMI();
const Function *F = Fn.getFunction();
const Function *ParentF = MMI.getWinEHParent(F);
unsigned FrameReg;
if (F == ParentF) {
WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(Fn.getFunction());
// FIXME: This should be unconditional but we have bugs in the preparation
// pass.
if (FuncInfo.UnwindHelpFrameIdx != INT_MAX)
FuncInfo.UnwindHelpFrameOffset = TFI.getFrameIndexReferenceFromSP(
Fn, FuncInfo.UnwindHelpFrameIdx, FrameReg);
} else if (MMI.hasWinEHFuncInfo(F)) {
WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(Fn.getFunction());
auto I = FuncInfo.CatchHandlerParentFrameObjIdx.find(F);
if (I != FuncInfo.CatchHandlerParentFrameObjIdx.end())
FuncInfo.CatchHandlerParentFrameObjOffset[F] =
TFI.getFrameIndexReferenceFromSP(Fn, I->second, FrameReg);
}
// Store SPAdj at exit of a basic block.
SmallVector<int, 8> SPState;
SPState.resize(Fn.getNumBlockIDs());
SmallPtrSet<MachineBasicBlock*, 8> Reachable;
// Iterate over the reachable blocks in DFS order.
for (auto DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable);
DFI != DFE; ++DFI) {
int SPAdj = 0;
// Check the exit state of the DFS stack predecessor.
if (DFI.getPathLength() >= 2) {
MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
assert(Reachable.count(StackPred) &&
"DFS stack predecessor is already visited.\n");
SPAdj = SPState[StackPred->getNumber()];
}
MachineBasicBlock *BB = *DFI;
replaceFrameIndices(BB, Fn, SPAdj);
SPState[BB->getNumber()] = SPAdj;
}
// Handle the unreachable blocks.
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
if (Reachable.count(BB))
// Already handled in DFS traversal.
continue;
int SPAdj = 0;
replaceFrameIndices(BB, Fn, SPAdj);
}
}
void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
int &SPAdj) {
assert(Fn.getSubtarget().getRegisterInfo() &&
"getRegisterInfo() must be implemented!");
const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
const TargetRegisterInfo &TRI = *Fn.getSubtarget().getRegisterInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(BB);
bool InsideCallSequence = false;
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
if (I->getOpcode() == FrameSetupOpcode ||
I->getOpcode() == FrameDestroyOpcode) {
InsideCallSequence = (I->getOpcode() == FrameSetupOpcode);
SPAdj += TII.getSPAdjust(I);
MachineBasicBlock::iterator PrevI = BB->end();
if (I != BB->begin()) PrevI = std::prev(I);
TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
// Visit the instructions created by eliminateCallFramePseudoInstr().
if (PrevI == BB->end())
I = BB->begin(); // The replaced instr was the first in the block.
else
I = std::next(PrevI);
continue;
}
MachineInstr *MI = I;
bool DoIncr = true;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (!MI->getOperand(i).isFI())
continue;
// Frame indicies in debug values are encoded in a target independent
// way with simply the frame index and offset rather than any
// target-specific addressing mode.
if (MI->isDebugValue()) {
assert(i == 0 && "Frame indicies can only appear as the first "
"operand of a DBG_VALUE machine instruction");
unsigned Reg;
MachineOperand &Offset = MI->getOperand(1);
Offset.setImm(Offset.getImm() +
TFI->getFrameIndexReference(
Fn, MI->getOperand(0).getIndex(), Reg));
MI->getOperand(0).ChangeToRegister(Reg, false /*isDef*/);
continue;
}
// TODO: This code should be commoned with the code for
// PATCHPOINT. There's no good reason for the difference in
// implementation other than historical accident. The only
// remaining difference is the unconditional use of the stack
// pointer as the base register.
if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
assert((!MI->isDebugValue() || i == 0) &&
"Frame indicies can only appear as the first operand of a "
"DBG_VALUE machine instruction");
unsigned Reg;
MachineOperand &Offset = MI->getOperand(i + 1);
const unsigned refOffset =
TFI->getFrameIndexReferenceFromSP(Fn, MI->getOperand(i).getIndex(),
Reg);
Offset.setImm(Offset.getImm() + refOffset);
MI->getOperand(i).ChangeToRegister(Reg, false /*isDef*/);
continue;
}
// Some instructions (e.g. inline asm instructions) can have
// multiple frame indices and/or cause eliminateFrameIndex
// to insert more than one instruction. We need the register
// scavenger to go through all of these instructions so that
// it can update its register information. We keep the
// iterator at the point before insertion so that we can
// revisit them in full.
bool AtBeginning = (I == BB->begin());
if (!AtBeginning) --I;
// If this instruction has a FrameIndex operand, we need to
// use that target machine register info object to eliminate
// it.
TRI.eliminateFrameIndex(MI, SPAdj, i,
FrameIndexVirtualScavenging ? nullptr : RS);
// Reset the iterator if we were at the beginning of the BB.
if (AtBeginning) {
I = BB->begin();
DoIncr = false;
}
MI = nullptr;
break;
}
// If we are looking at a call sequence, we need to keep track of
// the SP adjustment made by each instruction in the sequence.
// This includes both the frame setup/destroy pseudos (handled above),
// as well as other instructions that have side effects w.r.t the SP.
// Note that this must come after eliminateFrameIndex, because
// if I itself referred to a frame index, we shouldn't count its own
// adjustment.
if (MI && InsideCallSequence)
SPAdj += TII.getSPAdjust(MI);
if (DoIncr && I != BB->end()) ++I;
// Update register states.
if (RS && !FrameIndexVirtualScavenging && MI) RS->forward(MI);
}
}
/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
/// with physical registers. Use the register scavenger to find an
/// appropriate register to use.
///
/// FIXME: Iterating over the instruction stream is unnecessary. We can simply
/// iterate over the vreg use list, which at this point only contains machine
/// operands for which eliminateFrameIndex need a new scratch reg.
void
PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// Run through the instructions and find any virtual registers.
for (MachineFunction::iterator BB = Fn.begin(),
E = Fn.end(); BB != E; ++BB) {
RS->enterBasicBlock(BB);
int SPAdj = 0;
// The instruction stream may change in the loop, so check BB->end()
// directly.
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
// We might end up here again with a NULL iterator if we scavenged a
// register for which we inserted spill code for definition by what was
// originally the first instruction in BB.
if (I == MachineBasicBlock::iterator(nullptr))
I = BB->begin();
MachineInstr *MI = I;
MachineBasicBlock::iterator J = std::next(I);
MachineBasicBlock::iterator P =
I == BB->begin() ? MachineBasicBlock::iterator(nullptr)
: std::prev(I);
// RS should process this instruction before we might scavenge at this
// location. This is because we might be replacing a virtual register
// defined by this instruction, and if so, registers killed by this
// instruction are available, and defined registers are not.
RS->forward(I);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (MI->getOperand(i).isReg()) {
MachineOperand &MO = MI->getOperand(i);
unsigned Reg = MO.getReg();
if (Reg == 0)
continue;
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
// When we first encounter a new virtual register, it
// must be a definition.
assert(MI->getOperand(i).isDef() &&
"frame index virtual missing def!");
// Scavenge a new scratch register
const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj);
++NumScavengedRegs;
// Replace this reference to the virtual register with the
// scratch register.
assert (ScratchReg && "Missing scratch register!");
MachineRegisterInfo &MRI = Fn.getRegInfo();
Fn.getRegInfo().replaceRegWith(Reg, ScratchReg);
// Make sure MRI now accounts this register as used.
MRI.setPhysRegUsed(ScratchReg);
// Because this instruction was processed by the RS before this
// register was allocated, make sure that the RS now records the
// register as being used.
RS->setRegUsed(ScratchReg);
}
}
// If the scavenger needed to use one of its spill slots, the
// spill code will have been inserted in between I and J. This is a
// problem because we need the spill code before I: Move I to just
// prior to J.
if (I != std::prev(J)) {
BB->splice(J, BB, I);
// Before we move I, we need to prepare the RS to visit I again.
// Specifically, RS will assert if it sees uses of registers that
// it believes are undefined. Because we have already processed
// register kills in I, when it visits I again, it will believe that
// those registers are undefined. To avoid this situation, unprocess
// the instruction I.
assert(RS->getCurrentPosition() == I &&
"The register scavenger has an unexpected position");
I = P;
RS->unprocess(P);
} else
++I;
}
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LivePhysRegs.cpp | //===--- LivePhysRegs.cpp - Live Physical Register Set --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LivePhysRegs utility for tracking liveness of
// physical registers across machine instructions in forward or backward order.
// A more detailed description can be found in the corresponding header file.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
/// \brief Remove all registers from the set that get clobbered by the register
/// mask.
/// The clobbers set will be the list of live registers clobbered
/// by the regmask.
void LivePhysRegs::removeRegsInMask(const MachineOperand &MO,
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers) {
SparseSet<unsigned>::iterator LRI = LiveRegs.begin();
while (LRI != LiveRegs.end()) {
if (MO.clobbersPhysReg(*LRI)) {
if (Clobbers)
Clobbers->push_back(std::make_pair(*LRI, &MO));
LRI = LiveRegs.erase(LRI);
} else
++LRI;
}
}
/// Simulates liveness when stepping backwards over an instruction(bundle):
/// Remove Defs, add uses. This is the recommended way of calculating liveness.
void LivePhysRegs::stepBackward(const MachineInstr &MI) {
// Remove defined registers and regmask kills from the set.
for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
if (O->isReg()) {
if (!O->isDef())
continue;
unsigned Reg = O->getReg();
if (Reg == 0)
continue;
removeReg(Reg);
} else if (O->isRegMask())
removeRegsInMask(*O, nullptr);
}
// Add uses to the set.
for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
if (!O->isReg() || !O->readsReg() || O->isUndef())
continue;
unsigned Reg = O->getReg();
if (Reg == 0)
continue;
addReg(Reg);
}
}
/// Simulates liveness when stepping forward over an instruction(bundle): Remove
/// killed-uses, add defs. This is the not recommended way, because it depends
/// on accurate kill flags. If possible use stepBackwards() instead of this
/// function.
void LivePhysRegs::stepForward(const MachineInstr &MI,
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers) {
// Remove killed registers from the set.
for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
if (O->isReg()) {
unsigned Reg = O->getReg();
if (Reg == 0)
continue;
if (O->isDef()) {
// Note, dead defs are still recorded. The caller should decide how to
// handle them.
Clobbers.push_back(std::make_pair(Reg, &*O));
} else {
if (!O->isKill())
continue;
assert(O->isUse());
removeReg(Reg);
}
} else if (O->isRegMask())
removeRegsInMask(*O, &Clobbers);
}
// Add defs to the set.
for (auto Reg : Clobbers) {
// Skip dead defs. They shouldn't be added to the set.
if (Reg.second->isReg() && Reg.second->isDead())
continue;
addReg(Reg.first);
}
}
/// Prin the currently live registers to OS.
void LivePhysRegs::print(raw_ostream &OS) const {
OS << "Live Registers:";
if (!TRI) {
OS << " (uninitialized)\n";
return;
}
if (empty()) {
OS << " (empty)\n";
return;
}
for (const_iterator I = begin(), E = end(); I != E; ++I)
OS << " " << PrintReg(*I, TRI);
OS << "\n";
}
/// Dumps the currently live registers to the debug output.
void LivePhysRegs::dump() const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dbgs() << " " << *this;
#endif
}
/// Add live-in registers of basic block \p MBB to \p LiveRegs.
static void addLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB) {
for (unsigned Reg : make_range(MBB.livein_begin(), MBB.livein_end()))
LiveRegs.addReg(Reg);
}
/// Add pristine registers to the given \p LiveRegs. This function removes
/// actually saved callee save registers when \p InPrologueEpilogue is false.
static void addPristines(LivePhysRegs &LiveRegs, const MachineFunction &MF,
const TargetRegisterInfo &TRI) {
const MachineFrameInfo &MFI = *MF.getFrameInfo();
if (!MFI.isCalleeSavedInfoValid())
return;
for (const MCPhysReg *CSR = TRI.getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
LiveRegs.addReg(*CSR);
for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo())
LiveRegs.removeReg(Info.getReg());
}
void LivePhysRegs::addLiveOuts(const MachineBasicBlock *MBB,
bool AddPristines) {
if (AddPristines) {
const MachineFunction &MF = *MBB->getParent();
addPristines(*this, MF, *TRI);
}
for (const MachineBasicBlock *Succ : MBB->successors())
::addLiveIns(*this, *Succ);
}
void LivePhysRegs::addLiveIns(const MachineBasicBlock *MBB,
bool AddPristines) {
if (AddPristines) {
const MachineFunction &MF = *MBB->getParent();
addPristines(*this, MF, *TRI);
}
::addLiveIns(*this, *MBB);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/IntrinsicLowering.cpp | //===-- IntrinsicLowering.cpp - Intrinsic Lowering default implementation -===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the IntrinsicLowering class.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
template <class ArgIt>
static void EnsureFunctionExists(Module &M, const char *Name,
ArgIt ArgBegin, ArgIt ArgEnd,
Type *RetTy) {
// Insert a correctly-typed definition now.
std::vector<Type *> ParamTys;
for (ArgIt I = ArgBegin; I != ArgEnd; ++I)
ParamTys.push_back(I->getType());
M.getOrInsertFunction(Name, FunctionType::get(RetTy, ParamTys, false));
}
static void EnsureFPIntrinsicsExist(Module &M, Function *Fn,
const char *FName,
const char *DName, const char *LDName) {
// Insert definitions for all the floating point types.
switch((int)Fn->arg_begin()->getType()->getTypeID()) {
case Type::FloatTyID:
EnsureFunctionExists(M, FName, Fn->arg_begin(), Fn->arg_end(),
Type::getFloatTy(M.getContext()));
break;
case Type::DoubleTyID:
EnsureFunctionExists(M, DName, Fn->arg_begin(), Fn->arg_end(),
Type::getDoubleTy(M.getContext()));
break;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
EnsureFunctionExists(M, LDName, Fn->arg_begin(), Fn->arg_end(),
Fn->arg_begin()->getType());
break;
}
}
/// ReplaceCallWith - This function is used when we want to lower an intrinsic
/// call to a call of an external function. This handles hard cases such as
/// when there was already a prototype for the external function, and if that
/// prototype doesn't match the arguments we expect to pass in.
template <class ArgIt>
static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
ArgIt ArgBegin, ArgIt ArgEnd,
Type *RetTy) {
// If we haven't already looked up this function, check to see if the
// program already contains a function with this name.
Module *M = CI->getParent()->getParent()->getParent();
// Get or insert the definition now.
std::vector<Type *> ParamTys;
for (ArgIt I = ArgBegin; I != ArgEnd; ++I)
ParamTys.push_back((*I)->getType());
Constant* FCache = M->getOrInsertFunction(NewFn,
FunctionType::get(RetTy, ParamTys, false));
IRBuilder<> Builder(CI->getParent(), CI);
SmallVector<Value *, 8> Args(ArgBegin, ArgEnd);
CallInst *NewCI = Builder.CreateCall(FCache, Args);
NewCI->setName(CI->getName());
if (!CI->use_empty())
CI->replaceAllUsesWith(NewCI);
return NewCI;
}
// VisualStudio defines setjmp as _setjmp
#if defined(_MSC_VER) && defined(setjmp) && \
!defined(setjmp_undefined_for_msvc)
# pragma push_macro("setjmp")
# undef setjmp
# define setjmp_undefined_for_msvc
#endif
void IntrinsicLowering::AddPrototypes(Module &M) {
LLVMContext &Context = M.getContext();
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (I->isDeclaration() && !I->use_empty())
switch (I->getIntrinsicID()) {
default: break;
case Intrinsic::setjmp:
EnsureFunctionExists(M, "setjmp", I->arg_begin(), I->arg_end(),
Type::getInt32Ty(M.getContext()));
break;
case Intrinsic::longjmp:
EnsureFunctionExists(M, "longjmp", I->arg_begin(), I->arg_end(),
Type::getVoidTy(M.getContext()));
break;
case Intrinsic::siglongjmp:
EnsureFunctionExists(M, "abort", I->arg_end(), I->arg_end(),
Type::getVoidTy(M.getContext()));
break;
case Intrinsic::memcpy:
M.getOrInsertFunction("memcpy",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::memmove:
M.getOrInsertFunction("memmove",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::memset:
M.getOrInsertFunction("memset",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt32Ty(M.getContext()),
DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::sqrt:
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
break;
case Intrinsic::sin:
EnsureFPIntrinsicsExist(M, I, "sinf", "sin", "sinl");
break;
case Intrinsic::cos:
EnsureFPIntrinsicsExist(M, I, "cosf", "cos", "cosl");
break;
case Intrinsic::pow:
EnsureFPIntrinsicsExist(M, I, "powf", "pow", "powl");
break;
case Intrinsic::log:
EnsureFPIntrinsicsExist(M, I, "logf", "log", "logl");
break;
case Intrinsic::log2:
EnsureFPIntrinsicsExist(M, I, "log2f", "log2", "log2l");
break;
case Intrinsic::log10:
EnsureFPIntrinsicsExist(M, I, "log10f", "log10", "log10l");
break;
case Intrinsic::exp:
EnsureFPIntrinsicsExist(M, I, "expf", "exp", "expl");
break;
case Intrinsic::exp2:
EnsureFPIntrinsicsExist(M, I, "exp2f", "exp2", "exp2l");
break;
}
}
/// LowerBSWAP - Emit the code to lower bswap of V before the specified
/// instruction IP.
static Value *LowerBSWAP(LLVMContext &Context, Value *V, Instruction *IP) {
assert(V->getType()->isIntegerTy() && "Can't bswap a non-integer type!");
unsigned BitSize = V->getType()->getPrimitiveSizeInBits();
IRBuilder<> Builder(IP->getParent(), IP);
switch(BitSize) {
default: llvm_unreachable("Unhandled type size of value to byteswap!");
case 16: {
Value *Tmp1 = Builder.CreateShl(V, ConstantInt::get(V->getType(), 8),
"bswap.2");
Value *Tmp2 = Builder.CreateLShr(V, ConstantInt::get(V->getType(), 8),
"bswap.1");
V = Builder.CreateOr(Tmp1, Tmp2, "bswap.i16");
break;
}
case 32: {
Value *Tmp4 = Builder.CreateShl(V, ConstantInt::get(V->getType(), 24),
"bswap.4");
Value *Tmp3 = Builder.CreateShl(V, ConstantInt::get(V->getType(), 8),
"bswap.3");
Value *Tmp2 = Builder.CreateLShr(V, ConstantInt::get(V->getType(), 8),
"bswap.2");
Value *Tmp1 = Builder.CreateLShr(V,ConstantInt::get(V->getType(), 24),
"bswap.1");
Tmp3 = Builder.CreateAnd(Tmp3,
ConstantInt::get(Type::getInt32Ty(Context), 0xFF0000),
"bswap.and3");
Tmp2 = Builder.CreateAnd(Tmp2,
ConstantInt::get(Type::getInt32Ty(Context), 0xFF00),
"bswap.and2");
Tmp4 = Builder.CreateOr(Tmp4, Tmp3, "bswap.or1");
Tmp2 = Builder.CreateOr(Tmp2, Tmp1, "bswap.or2");
V = Builder.CreateOr(Tmp4, Tmp2, "bswap.i32");
break;
}
case 64: {
Value *Tmp8 = Builder.CreateShl(V, ConstantInt::get(V->getType(), 56),
"bswap.8");
Value *Tmp7 = Builder.CreateShl(V, ConstantInt::get(V->getType(), 40),
"bswap.7");
Value *Tmp6 = Builder.CreateShl(V, ConstantInt::get(V->getType(), 24),
"bswap.6");
Value *Tmp5 = Builder.CreateShl(V, ConstantInt::get(V->getType(), 8),
"bswap.5");
Value* Tmp4 = Builder.CreateLShr(V, ConstantInt::get(V->getType(), 8),
"bswap.4");
Value* Tmp3 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 24),
"bswap.3");
Value* Tmp2 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 40),
"bswap.2");
Value* Tmp1 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 56),
"bswap.1");
Tmp7 = Builder.CreateAnd(Tmp7,
ConstantInt::get(Type::getInt64Ty(Context),
0xFF000000000000ULL),
"bswap.and7");
Tmp6 = Builder.CreateAnd(Tmp6,
ConstantInt::get(Type::getInt64Ty(Context),
0xFF0000000000ULL),
"bswap.and6");
Tmp5 = Builder.CreateAnd(Tmp5,
ConstantInt::get(Type::getInt64Ty(Context),
0xFF00000000ULL),
"bswap.and5");
Tmp4 = Builder.CreateAnd(Tmp4,
ConstantInt::get(Type::getInt64Ty(Context),
0xFF000000ULL),
"bswap.and4");
Tmp3 = Builder.CreateAnd(Tmp3,
ConstantInt::get(Type::getInt64Ty(Context),
0xFF0000ULL),
"bswap.and3");
Tmp2 = Builder.CreateAnd(Tmp2,
ConstantInt::get(Type::getInt64Ty(Context),
0xFF00ULL),
"bswap.and2");
Tmp8 = Builder.CreateOr(Tmp8, Tmp7, "bswap.or1");
Tmp6 = Builder.CreateOr(Tmp6, Tmp5, "bswap.or2");
Tmp4 = Builder.CreateOr(Tmp4, Tmp3, "bswap.or3");
Tmp2 = Builder.CreateOr(Tmp2, Tmp1, "bswap.or4");
Tmp8 = Builder.CreateOr(Tmp8, Tmp6, "bswap.or5");
Tmp4 = Builder.CreateOr(Tmp4, Tmp2, "bswap.or6");
V = Builder.CreateOr(Tmp8, Tmp4, "bswap.i64");
break;
}
}
return V;
}
/// LowerCTPOP - Emit the code to lower ctpop of V before the specified
/// instruction IP.
static Value *LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP) {
assert(V->getType()->isIntegerTy() && "Can't ctpop a non-integer type!");
static const uint64_t MaskValues[6] = {
0x5555555555555555ULL, 0x3333333333333333ULL,
0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL,
0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL
};
IRBuilder<> Builder(IP->getParent(), IP);
unsigned BitSize = V->getType()->getPrimitiveSizeInBits();
unsigned WordSize = (BitSize + 63) / 64;
Value *Count = ConstantInt::get(V->getType(), 0);
for (unsigned n = 0; n < WordSize; ++n) {
Value *PartValue = V;
for (unsigned i = 1, ct = 0; i < (BitSize>64 ? 64 : BitSize);
i <<= 1, ++ct) {
Value *MaskCst = ConstantInt::get(V->getType(), MaskValues[ct]);
Value *LHS = Builder.CreateAnd(PartValue, MaskCst, "cppop.and1");
Value *VShift = Builder.CreateLShr(PartValue,
ConstantInt::get(V->getType(), i),
"ctpop.sh");
Value *RHS = Builder.CreateAnd(VShift, MaskCst, "cppop.and2");
PartValue = Builder.CreateAdd(LHS, RHS, "ctpop.step");
}
Count = Builder.CreateAdd(PartValue, Count, "ctpop.part");
if (BitSize > 64) {
V = Builder.CreateLShr(V, ConstantInt::get(V->getType(), 64),
"ctpop.part.sh");
BitSize -= 64;
}
}
return Count;
}
/// LowerCTLZ - Emit the code to lower ctlz of V before the specified
/// instruction IP.
static Value *LowerCTLZ(LLVMContext &Context, Value *V, Instruction *IP) {
IRBuilder<> Builder(IP->getParent(), IP);
unsigned BitSize = V->getType()->getPrimitiveSizeInBits();
for (unsigned i = 1; i < BitSize; i <<= 1) {
Value *ShVal = ConstantInt::get(V->getType(), i);
ShVal = Builder.CreateLShr(V, ShVal, "ctlz.sh");
V = Builder.CreateOr(V, ShVal, "ctlz.step");
}
V = Builder.CreateNot(V);
return LowerCTPOP(Context, V, IP);
}
static void ReplaceFPIntrinsicWithCall(CallInst *CI, const char *Fname,
const char *Dname,
const char *LDname) {
CallSite CS(CI);
switch (CI->getArgOperand(0)->getType()->getTypeID()) {
default: llvm_unreachable("Invalid type in intrinsic");
case Type::FloatTyID:
ReplaceCallWith(Fname, CI, CS.arg_begin(), CS.arg_end(),
Type::getFloatTy(CI->getContext()));
break;
case Type::DoubleTyID:
ReplaceCallWith(Dname, CI, CS.arg_begin(), CS.arg_end(),
Type::getDoubleTy(CI->getContext()));
break;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
ReplaceCallWith(LDname, CI, CS.arg_begin(), CS.arg_end(),
CI->getArgOperand(0)->getType());
break;
}
}
void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
IRBuilder<> Builder(CI->getParent(), CI);
LLVMContext &Context = CI->getContext();
const Function *Callee = CI->getCalledFunction();
assert(Callee && "Cannot lower an indirect call!");
CallSite CS(CI);
switch (Callee->getIntrinsicID()) {
case Intrinsic::not_intrinsic:
report_fatal_error("Cannot lower a call to a non-intrinsic function '"+
Callee->getName() + "'!");
default:
report_fatal_error("Code generator does not support intrinsic function '"+
Callee->getName()+"'!");
case Intrinsic::expect: {
// Just replace __builtin_expect(exp, c) with EXP.
Value *V = CI->getArgOperand(0);
CI->replaceAllUsesWith(V);
break;
}
// The setjmp/longjmp intrinsics should only exist in the code if it was
// never optimized (ie, right out of the CFE), or if it has been hacked on
// by the lowerinvoke pass. In both cases, the right thing to do is to
// convert the call to an explicit setjmp or longjmp call.
case Intrinsic::setjmp: {
Value *V = ReplaceCallWith("setjmp", CI, CS.arg_begin(), CS.arg_end(),
Type::getInt32Ty(Context));
if (!CI->getType()->isVoidTy())
CI->replaceAllUsesWith(V);
break;
}
case Intrinsic::sigsetjmp:
if (!CI->getType()->isVoidTy())
CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
break;
case Intrinsic::longjmp: {
ReplaceCallWith("longjmp", CI, CS.arg_begin(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
case Intrinsic::siglongjmp: {
// Insert the call to abort
ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
case Intrinsic::ctpop:
CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::bswap:
CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::ctlz:
CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::cttz: {
// cttz(x) -> ctpop(~X & (X-1))
Value *Src = CI->getArgOperand(0);
Value *NotSrc = Builder.CreateNot(Src);
NotSrc->setName(Src->getName() + ".not");
Value *SrcM1 = ConstantInt::get(Src->getType(), 1);
SrcM1 = Builder.CreateSub(Src, SrcM1);
Src = LowerCTPOP(Context, Builder.CreateAnd(NotSrc, SrcM1), CI);
CI->replaceAllUsesWith(Src);
break;
}
case Intrinsic::stacksave:
case Intrinsic::stackrestore: {
if (!Warned)
errs() << "WARNING: this target does not support the llvm.stack"
<< (Callee->getIntrinsicID() == Intrinsic::stacksave ?
"save" : "restore") << " intrinsic.\n";
Warned = true;
if (Callee->getIntrinsicID() == Intrinsic::stacksave)
CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
break;
}
case Intrinsic::returnaddress:
case Intrinsic::frameaddress:
errs() << "WARNING: this target does not support the llvm."
<< (Callee->getIntrinsicID() == Intrinsic::returnaddress ?
"return" : "frame") << "address intrinsic.\n";
CI->replaceAllUsesWith(ConstantPointerNull::get(
cast<PointerType>(CI->getType())));
break;
case Intrinsic::prefetch:
break; // Simply strip out prefetches on unsupported architectures
case Intrinsic::pcmarker:
break; // Simply strip out pcmarker on unsupported architectures
case Intrinsic::readcyclecounter: {
errs() << "WARNING: this target does not support the llvm.readcyclecoun"
<< "ter intrinsic. It is being lowered to a constant 0\n";
CI->replaceAllUsesWith(ConstantInt::get(Type::getInt64Ty(Context), 0));
break;
}
case Intrinsic::dbg_declare:
break; // Simply strip out debugging intrinsics
case Intrinsic::eh_typeid_for:
// Return something different to eh_selector.
CI->replaceAllUsesWith(ConstantInt::get(CI->getType(), 1));
break;
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
// Just drop the annotation, but forward the value
CI->replaceAllUsesWith(CI->getOperand(0));
break;
case Intrinsic::assume:
case Intrinsic::var_annotation:
break; // Strip out these intrinsics
case Intrinsic::memcpy: {
Type *IntPtr = DL.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
Ops[0] = CI->getArgOperand(0);
Ops[1] = CI->getArgOperand(1);
Ops[2] = Size;
ReplaceCallWith("memcpy", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::memmove: {
Type *IntPtr = DL.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
Ops[0] = CI->getArgOperand(0);
Ops[1] = CI->getArgOperand(1);
Ops[2] = Size;
ReplaceCallWith("memmove", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::memset: {
Value *Op0 = CI->getArgOperand(0);
Type *IntPtr = DL.getIntPtrType(Op0->getType());
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
Ops[0] = Op0;
// Extend the amount to i32.
Ops[1] = Builder.CreateIntCast(CI->getArgOperand(1),
Type::getInt32Ty(Context),
/* isSigned */ false);
Ops[2] = Size;
ReplaceCallWith("memset", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::sqrt: {
ReplaceFPIntrinsicWithCall(CI, "sqrtf", "sqrt", "sqrtl");
break;
}
case Intrinsic::log: {
ReplaceFPIntrinsicWithCall(CI, "logf", "log", "logl");
break;
}
case Intrinsic::log2: {
ReplaceFPIntrinsicWithCall(CI, "log2f", "log2", "log2l");
break;
}
case Intrinsic::log10: {
ReplaceFPIntrinsicWithCall(CI, "log10f", "log10", "log10l");
break;
}
case Intrinsic::exp: {
ReplaceFPIntrinsicWithCall(CI, "expf", "exp", "expl");
break;
}
case Intrinsic::exp2: {
ReplaceFPIntrinsicWithCall(CI, "exp2f", "exp2", "exp2l");
break;
}
case Intrinsic::pow: {
ReplaceFPIntrinsicWithCall(CI, "powf", "pow", "powl");
break;
}
case Intrinsic::sin: {
ReplaceFPIntrinsicWithCall(CI, "sinf", "sin", "sinl");
break;
}
case Intrinsic::cos: {
ReplaceFPIntrinsicWithCall(CI, "cosf", "cos", "cosl");
break;
}
case Intrinsic::floor: {
ReplaceFPIntrinsicWithCall(CI, "floorf", "floor", "floorl");
break;
}
case Intrinsic::ceil: {
ReplaceFPIntrinsicWithCall(CI, "ceilf", "ceil", "ceill");
break;
}
case Intrinsic::trunc: {
ReplaceFPIntrinsicWithCall(CI, "truncf", "trunc", "truncl");
break;
}
case Intrinsic::round: {
ReplaceFPIntrinsicWithCall(CI, "roundf", "round", "roundl");
break;
}
case Intrinsic::copysign: {
ReplaceFPIntrinsicWithCall(CI, "copysignf", "copysign", "copysignl");
break;
}
case Intrinsic::flt_rounds:
// Lower to "round to the nearest"
if (!CI->getType()->isVoidTy())
CI->replaceAllUsesWith(ConstantInt::get(CI->getType(), 1));
break;
case Intrinsic::invariant_start:
case Intrinsic::lifetime_start:
// Discard region information.
CI->replaceAllUsesWith(UndefValue::get(CI->getType()));
break;
case Intrinsic::invariant_end:
case Intrinsic::lifetime_end:
// Discard region information.
break;
}
assert(CI->use_empty() &&
"Lowering should have eliminated any uses of the intrinsic call!");
CI->eraseFromParent();
}
bool IntrinsicLowering::LowerToByteSwap(CallInst *CI) {
// Verify this is a simple bswap.
if (CI->getNumArgOperands() != 1 ||
CI->getType() != CI->getArgOperand(0)->getType() ||
!CI->getType()->isIntegerTy())
return false;
IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty)
return false;
// Okay, we can do this xform, do so now.
Module *M = CI->getParent()->getParent()->getParent();
Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Ty);
Value *Op = CI->getArgOperand(0);
Op = CallInst::Create(Int, Op, CI->getName(), CI);
CI->replaceAllUsesWith(Op);
CI->eraseFromParent();
return true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/EarlyIfConversion.cpp | //===-- EarlyIfConversion.cpp - If-conversion on SSA form machine code ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Early if-conversion is for out-of-order CPUs that don't have a lot of
// predicable instructions. The goal is to eliminate conditional branches that
// may mispredict.
//
// Instructions from both sides of the branch are executed specutatively, and a
// cmov instruction selects the result.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineTraceMetrics.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "early-ifcvt"
// Absolute maximum number of instructions allowed per speculated block.
// This bypasses all other heuristics, so it should be set fairly high.
static cl::opt<unsigned>
BlockInstrLimit("early-ifcvt-limit", cl::init(30), cl::Hidden,
cl::desc("Maximum number of instructions per speculated block."));
// Stress testing mode - disable heuristics.
static cl::opt<bool> Stress("stress-early-ifcvt", cl::Hidden,
cl::desc("Turn all knobs to 11"));
STATISTIC(NumDiamondsSeen, "Number of diamonds");
STATISTIC(NumDiamondsConv, "Number of diamonds converted");
STATISTIC(NumTrianglesSeen, "Number of triangles");
STATISTIC(NumTrianglesConv, "Number of triangles converted");
//===----------------------------------------------------------------------===//
// SSAIfConv
//===----------------------------------------------------------------------===//
//
// The SSAIfConv class performs if-conversion on SSA form machine code after
// determining if it is possible. The class contains no heuristics; external
// code should be used to determine when if-conversion is a good idea.
//
// SSAIfConv can convert both triangles and diamonds:
//
// Triangle: Head Diamond: Head
// | \ / \_
// | \ / |
// | [TF]BB FBB TBB
// | / \ /
// | / \ /
// Tail Tail
//
// Instructions in the conditional blocks TBB and/or FBB are spliced into the
// Head block, and phis in the Tail block are converted to select instructions.
//
namespace {
class SSAIfConv {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
public:
/// The block containing the conditional branch.
MachineBasicBlock *Head;
/// The block containing phis after the if-then-else.
MachineBasicBlock *Tail;
/// The 'true' conditional block as determined by AnalyzeBranch.
MachineBasicBlock *TBB;
/// The 'false' conditional block as determined by AnalyzeBranch.
MachineBasicBlock *FBB;
/// isTriangle - When there is no 'else' block, either TBB or FBB will be
/// equal to Tail.
bool isTriangle() const { return TBB == Tail || FBB == Tail; }
/// Returns the Tail predecessor for the True side.
MachineBasicBlock *getTPred() const { return TBB == Tail ? Head : TBB; }
/// Returns the Tail predecessor for the False side.
MachineBasicBlock *getFPred() const { return FBB == Tail ? Head : FBB; }
/// Information about each phi in the Tail block.
struct PHIInfo {
MachineInstr *PHI;
unsigned TReg, FReg;
// Latencies from Cond+Branch, TReg, and FReg to DstReg.
int CondCycles, TCycles, FCycles;
PHIInfo(MachineInstr *phi)
: PHI(phi), TReg(0), FReg(0), CondCycles(0), TCycles(0), FCycles(0) {}
};
SmallVector<PHIInfo, 8> PHIs;
private:
/// The branch condition determined by AnalyzeBranch.
SmallVector<MachineOperand, 4> Cond;
/// Instructions in Head that define values used by the conditional blocks.
/// The hoisted instructions must be inserted after these instructions.
SmallPtrSet<MachineInstr*, 8> InsertAfter;
/// Register units clobbered by the conditional blocks.
BitVector ClobberedRegUnits;
// Scratch pad for findInsertionPoint.
SparseSet<unsigned> LiveRegUnits;
/// Insertion point in Head for speculatively executed instructions form TBB
/// and FBB.
MachineBasicBlock::iterator InsertionPoint;
/// Return true if all non-terminator instructions in MBB can be safely
/// speculated.
bool canSpeculateInstrs(MachineBasicBlock *MBB);
/// Find a valid insertion point in Head.
bool findInsertionPoint();
/// Replace PHI instructions in Tail with selects.
void replacePHIInstrs();
/// Insert selects and rewrite PHI operands to use them.
void rewritePHIOperands();
public:
/// runOnMachineFunction - Initialize per-function data structures.
void runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
LiveRegUnits.clear();
LiveRegUnits.setUniverse(TRI->getNumRegUnits());
ClobberedRegUnits.clear();
ClobberedRegUnits.resize(TRI->getNumRegUnits());
}
/// canConvertIf - If the sub-CFG headed by MBB can be if-converted,
/// initialize the internal state, and return true.
bool canConvertIf(MachineBasicBlock *MBB);
/// convertIf - If-convert the last block passed to canConvertIf(), assuming
/// it is possible. Add any erased blocks to RemovedBlocks.
void convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks);
};
} // end anonymous namespace
/// canSpeculateInstrs - Returns true if all the instructions in MBB can safely
/// be speculated. The terminators are not considered.
///
/// If instructions use any values that are defined in the head basic block,
/// the defining instructions are added to InsertAfter.
///
/// Any clobbered regunits are added to ClobberedRegUnits.
///
bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
// Reject any live-in physregs. It's probably CPSR/EFLAGS, and very hard to
// get right.
if (!MBB->livein_empty()) {
DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n");
return false;
}
unsigned InstrCount = 0;
// Check all instructions, except the terminators. It is assumed that
// terminators never have side effects or define any used register values.
for (MachineBasicBlock::iterator I = MBB->begin(),
E = MBB->getFirstTerminator(); I != E; ++I) {
if (I->isDebugValue())
continue;
if (++InstrCount > BlockInstrLimit && !Stress) {
DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than "
<< BlockInstrLimit << " instructions.\n");
return false;
}
// There shouldn't normally be any phis in a single-predecessor block.
if (I->isPHI()) {
DEBUG(dbgs() << "Can't hoist: " << *I);
return false;
}
// Don't speculate loads. Note that it may be possible and desirable to
// speculate GOT or constant pool loads that are guaranteed not to trap,
// but we don't support that for now.
if (I->mayLoad()) {
DEBUG(dbgs() << "Won't speculate load: " << *I);
return false;
}
// We never speculate stores, so an AA pointer isn't necessary.
bool DontMoveAcrossStore = true;
if (!I->isSafeToMove(nullptr, DontMoveAcrossStore)) {
DEBUG(dbgs() << "Can't speculate: " << *I);
return false;
}
// Check for any dependencies on Head instructions.
for (const MachineOperand &MO : I->operands()) {
if (MO.isRegMask()) {
DEBUG(dbgs() << "Won't speculate regmask: " << *I);
return false;
}
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
// Remember clobbered regunits.
if (MO.isDef() && TargetRegisterInfo::isPhysicalRegister(Reg))
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
ClobberedRegUnits.set(*Units);
if (!MO.readsReg() || !TargetRegisterInfo::isVirtualRegister(Reg))
continue;
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (!DefMI || DefMI->getParent() != Head)
continue;
if (InsertAfter.insert(DefMI).second)
DEBUG(dbgs() << "BB#" << MBB->getNumber() << " depends on " << *DefMI);
if (DefMI->isTerminator()) {
DEBUG(dbgs() << "Can't insert instructions below terminator.\n");
return false;
}
}
}
return true;
}
/// Find an insertion point in Head for the speculated instructions. The
/// insertion point must be:
///
/// 1. Before any terminators.
/// 2. After any instructions in InsertAfter.
/// 3. Not have any clobbered regunits live.
///
/// This function sets InsertionPoint and returns true when successful, it
/// returns false if no valid insertion point could be found.
///
bool SSAIfConv::findInsertionPoint() {
// Keep track of live regunits before the current position.
// Only track RegUnits that are also in ClobberedRegUnits.
LiveRegUnits.clear();
SmallVector<unsigned, 8> Reads;
MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
MachineBasicBlock::iterator I = Head->end();
MachineBasicBlock::iterator B = Head->begin();
while (I != B) {
--I;
// Some of the conditional code depends in I.
if (InsertAfter.count(I)) {
DEBUG(dbgs() << "Can't insert code after " << *I);
return false;
}
// Update live regunits.
for (const MachineOperand &MO : I->operands()) {
// We're ignoring regmask operands. That is conservatively correct.
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
// I clobbers Reg, so it isn't live before I.
if (MO.isDef())
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
LiveRegUnits.erase(*Units);
// Unless I reads Reg.
if (MO.readsReg())
Reads.push_back(Reg);
}
// Anything read by I is live before I.
while (!Reads.empty())
for (MCRegUnitIterator Units(Reads.pop_back_val(), TRI); Units.isValid();
++Units)
if (ClobberedRegUnits.test(*Units))
LiveRegUnits.insert(*Units);
// We can't insert before a terminator.
if (I != FirstTerm && I->isTerminator())
continue;
// Some of the clobbered registers are live before I, not a valid insertion
// point.
if (!LiveRegUnits.empty()) {
DEBUG({
dbgs() << "Would clobber";
for (SparseSet<unsigned>::const_iterator
i = LiveRegUnits.begin(), e = LiveRegUnits.end(); i != e; ++i)
dbgs() << ' ' << PrintRegUnit(*i, TRI);
dbgs() << " live before " << *I;
});
continue;
}
// This is a valid insertion point.
InsertionPoint = I;
DEBUG(dbgs() << "Can insert before " << *I);
return true;
}
DEBUG(dbgs() << "No legal insertion point found.\n");
return false;
}
/// canConvertIf - analyze the sub-cfg rooted in MBB, and return true if it is
/// a potential candidate for if-conversion. Fill out the internal state.
///
bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
Head = MBB;
TBB = FBB = Tail = nullptr;
if (Head->succ_size() != 2)
return false;
MachineBasicBlock *Succ0 = Head->succ_begin()[0];
MachineBasicBlock *Succ1 = Head->succ_begin()[1];
// Canonicalize so Succ0 has MBB as its single predecessor.
if (Succ0->pred_size() != 1)
std::swap(Succ0, Succ1);
if (Succ0->pred_size() != 1 || Succ0->succ_size() != 1)
return false;
Tail = Succ0->succ_begin()[0];
// This is not a triangle.
if (Tail != Succ1) {
// Check for a diamond. We won't deal with any critical edges.
if (Succ1->pred_size() != 1 || Succ1->succ_size() != 1 ||
Succ1->succ_begin()[0] != Tail)
return false;
DEBUG(dbgs() << "\nDiamond: BB#" << Head->getNumber()
<< " -> BB#" << Succ0->getNumber()
<< "/BB#" << Succ1->getNumber()
<< " -> BB#" << Tail->getNumber() << '\n');
// Live-in physregs are tricky to get right when speculating code.
if (!Tail->livein_empty()) {
DEBUG(dbgs() << "Tail has live-ins.\n");
return false;
}
} else {
DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber()
<< " -> BB#" << Succ0->getNumber()
<< " -> BB#" << Tail->getNumber() << '\n');
}
// This is a triangle or a diamond.
// If Tail doesn't have any phis, there must be side effects.
if (Tail->empty() || !Tail->front().isPHI()) {
DEBUG(dbgs() << "No phis in tail.\n");
return false;
}
// The branch we're looking to eliminate must be analyzable.
Cond.clear();
if (TII->AnalyzeBranch(*Head, TBB, FBB, Cond)) {
DEBUG(dbgs() << "Branch not analyzable.\n");
return false;
}
// This is weird, probably some sort of degenerate CFG.
if (!TBB) {
DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch.\n");
return false;
}
// AnalyzeBranch doesn't set FBB on a fall-through branch.
// Make sure it is always set.
FBB = TBB == Succ0 ? Succ1 : Succ0;
// Any phis in the tail block must be convertible to selects.
PHIs.clear();
MachineBasicBlock *TPred = getTPred();
MachineBasicBlock *FPred = getFPred();
for (MachineBasicBlock::iterator I = Tail->begin(), E = Tail->end();
I != E && I->isPHI(); ++I) {
PHIs.push_back(&*I);
PHIInfo &PI = PHIs.back();
// Find PHI operands corresponding to TPred and FPred.
for (unsigned i = 1; i != PI.PHI->getNumOperands(); i += 2) {
if (PI.PHI->getOperand(i+1).getMBB() == TPred)
PI.TReg = PI.PHI->getOperand(i).getReg();
if (PI.PHI->getOperand(i+1).getMBB() == FPred)
PI.FReg = PI.PHI->getOperand(i).getReg();
}
assert(TargetRegisterInfo::isVirtualRegister(PI.TReg) && "Bad PHI");
assert(TargetRegisterInfo::isVirtualRegister(PI.FReg) && "Bad PHI");
// Get target information.
if (!TII->canInsertSelect(*Head, Cond, PI.TReg, PI.FReg,
PI.CondCycles, PI.TCycles, PI.FCycles)) {
DEBUG(dbgs() << "Can't convert: " << *PI.PHI);
return false;
}
}
// Check that the conditional instructions can be speculated.
InsertAfter.clear();
ClobberedRegUnits.reset();
if (TBB != Tail && !canSpeculateInstrs(TBB))
return false;
if (FBB != Tail && !canSpeculateInstrs(FBB))
return false;
// Try to find a valid insertion point for the speculated instructions in the
// head basic block.
if (!findInsertionPoint())
return false;
if (isTriangle())
++NumTrianglesSeen;
else
++NumDiamondsSeen;
return true;
}
/// replacePHIInstrs - Completely replace PHI instructions with selects.
/// This is possible when the only Tail predecessors are the if-converted
/// blocks.
void SSAIfConv::replacePHIInstrs() {
assert(Tail->pred_size() == 2 && "Cannot replace PHIs");
MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
assert(FirstTerm != Head->end() && "No terminators");
DebugLoc HeadDL = FirstTerm->getDebugLoc();
// Convert all PHIs to select instructions inserted before FirstTerm.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
PHIInfo &PI = PHIs[i];
DEBUG(dbgs() << "If-converting " << *PI.PHI);
unsigned DstReg = PI.PHI->getOperand(0).getReg();
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
PI.PHI->eraseFromParent();
PI.PHI = nullptr;
}
}
/// rewritePHIOperands - When there are additional Tail predecessors, insert
/// select instructions in Head and rewrite PHI operands to use the selects.
/// Keep the PHI instructions in Tail to handle the other predecessors.
void SSAIfConv::rewritePHIOperands() {
MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
assert(FirstTerm != Head->end() && "No terminators");
DebugLoc HeadDL = FirstTerm->getDebugLoc();
// Convert all PHIs to select instructions inserted before FirstTerm.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
PHIInfo &PI = PHIs[i];
unsigned DstReg = 0;
DEBUG(dbgs() << "If-converting " << *PI.PHI);
if (PI.TReg == PI.FReg) {
// We do not need the select instruction if both incoming values are
// equal.
DstReg = PI.TReg;
} else {
unsigned PHIDst = PI.PHI->getOperand(0).getReg();
DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
TII->insertSelect(*Head, FirstTerm, HeadDL,
DstReg, Cond, PI.TReg, PI.FReg);
DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
}
// Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) {
MachineBasicBlock *MBB = PI.PHI->getOperand(i-1).getMBB();
if (MBB == getTPred()) {
PI.PHI->getOperand(i-1).setMBB(Head);
PI.PHI->getOperand(i-2).setReg(DstReg);
} else if (MBB == getFPred()) {
PI.PHI->RemoveOperand(i-1);
PI.PHI->RemoveOperand(i-2);
}
}
DEBUG(dbgs() << " --> " << *PI.PHI);
}
}
/// convertIf - Execute the if conversion after canConvertIf has determined the
/// feasibility.
///
/// Any basic blocks erased will be added to RemovedBlocks.
///
void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
assert(Head && Tail && TBB && FBB && "Call canConvertIf first.");
// Update statistics.
if (isTriangle())
++NumTrianglesConv;
else
++NumDiamondsConv;
// Move all instructions into Head, except for the terminators.
if (TBB != Tail)
Head->splice(InsertionPoint, TBB, TBB->begin(), TBB->getFirstTerminator());
if (FBB != Tail)
Head->splice(InsertionPoint, FBB, FBB->begin(), FBB->getFirstTerminator());
// Are there extra Tail predecessors?
bool ExtraPreds = Tail->pred_size() != 2;
if (ExtraPreds)
rewritePHIOperands();
else
replacePHIInstrs();
// Fix up the CFG, temporarily leave Head without any successors.
Head->removeSuccessor(TBB);
Head->removeSuccessor(FBB);
if (TBB != Tail)
TBB->removeSuccessor(Tail);
if (FBB != Tail)
FBB->removeSuccessor(Tail);
// Fix up Head's terminators.
// It should become a single branch or a fallthrough.
DebugLoc HeadDL = Head->getFirstTerminator()->getDebugLoc();
TII->RemoveBranch(*Head);
// Erase the now empty conditional blocks. It is likely that Head can fall
// through to Tail, and we can join the two blocks.
if (TBB != Tail) {
RemovedBlocks.push_back(TBB);
TBB->eraseFromParent();
}
if (FBB != Tail) {
RemovedBlocks.push_back(FBB);
FBB->eraseFromParent();
}
assert(Head->succ_empty() && "Additional head successors?");
if (!ExtraPreds && Head->isLayoutSuccessor(Tail)) {
// Splice Tail onto the end of Head.
DEBUG(dbgs() << "Joining tail BB#" << Tail->getNumber()
<< " into head BB#" << Head->getNumber() << '\n');
Head->splice(Head->end(), Tail,
Tail->begin(), Tail->end());
Head->transferSuccessorsAndUpdatePHIs(Tail);
RemovedBlocks.push_back(Tail);
Tail->eraseFromParent();
} else {
// We need a branch to Tail, let code placement work it out later.
DEBUG(dbgs() << "Converting to unconditional branch.\n");
SmallVector<MachineOperand, 0> EmptyCond;
TII->InsertBranch(*Head, Tail, nullptr, EmptyCond, HeadDL);
Head->addSuccessor(Tail);
}
DEBUG(dbgs() << *Head);
}
//===----------------------------------------------------------------------===//
// EarlyIfConverter Pass
//===----------------------------------------------------------------------===//
namespace {
class EarlyIfConverter : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MCSchedModel SchedModel;
MachineRegisterInfo *MRI;
MachineDominatorTree *DomTree;
MachineLoopInfo *Loops;
MachineTraceMetrics *Traces;
MachineTraceMetrics::Ensemble *MinInstr;
SSAIfConv IfConv;
public:
static char ID;
EarlyIfConverter() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction &MF) override;
StringRef getPassName() const override { return "Early If-Conversion"; }
private:
bool tryConvertIf(MachineBasicBlock*);
void updateDomTree(ArrayRef<MachineBasicBlock*> Removed);
void updateLoops(ArrayRef<MachineBasicBlock*> Removed);
void invalidateTraces();
bool shouldConvertIf();
};
} // end anonymous namespace
char EarlyIfConverter::ID = 0;
char &llvm::EarlyIfConverterID = EarlyIfConverter::ID;
INITIALIZE_PASS_BEGIN(EarlyIfConverter,
"early-ifcvt", "Early If Converter", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
INITIALIZE_PASS_END(EarlyIfConverter,
"early-ifcvt", "Early If Converter", false, false)
void EarlyIfConverter::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addRequired<MachineTraceMetrics>();
AU.addPreserved<MachineTraceMetrics>();
MachineFunctionPass::getAnalysisUsage(AU);
}
/// Update the dominator tree after if-conversion erased some blocks.
void EarlyIfConverter::updateDomTree(ArrayRef<MachineBasicBlock*> Removed) {
// convertIf can remove TBB, FBB, and Tail can be merged into Head.
// TBB and FBB should not dominate any blocks.
// Tail children should be transferred to Head.
MachineDomTreeNode *HeadNode = DomTree->getNode(IfConv.Head);
for (unsigned i = 0, e = Removed.size(); i != e; ++i) {
MachineDomTreeNode *Node = DomTree->getNode(Removed[i]);
assert(Node != HeadNode && "Cannot erase the head node");
while (Node->getNumChildren()) {
assert(Node->getBlock() == IfConv.Tail && "Unexpected children");
DomTree->changeImmediateDominator(Node->getChildren().back(), HeadNode);
}
DomTree->eraseNode(Removed[i]);
}
}
/// Update LoopInfo after if-conversion.
void EarlyIfConverter::updateLoops(ArrayRef<MachineBasicBlock*> Removed) {
if (!Loops)
return;
// If-conversion doesn't change loop structure, and it doesn't mess with back
// edges, so updating LoopInfo is simply removing the dead blocks.
for (unsigned i = 0, e = Removed.size(); i != e; ++i)
Loops->removeBlock(Removed[i]);
}
/// Invalidate MachineTraceMetrics before if-conversion.
void EarlyIfConverter::invalidateTraces() {
Traces->verifyAnalysis();
Traces->invalidate(IfConv.Head);
Traces->invalidate(IfConv.Tail);
Traces->invalidate(IfConv.TBB);
Traces->invalidate(IfConv.FBB);
Traces->verifyAnalysis();
}
// Adjust cycles with downward saturation.
static unsigned adjCycles(unsigned Cyc, int Delta) {
if (Delta < 0 && Cyc + Delta > Cyc)
return 0;
return Cyc + Delta;
}
/// Apply cost model and heuristics to the if-conversion in IfConv.
/// Return true if the conversion is a good idea.
///
bool EarlyIfConverter::shouldConvertIf() {
// Stress testing mode disables all cost considerations.
if (Stress)
return true;
if (!MinInstr)
MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
MachineTraceMetrics::Trace TBBTrace = MinInstr->getTrace(IfConv.getTPred());
MachineTraceMetrics::Trace FBBTrace = MinInstr->getTrace(IfConv.getFPred());
DEBUG(dbgs() << "TBB: " << TBBTrace << "FBB: " << FBBTrace);
unsigned MinCrit = std::min(TBBTrace.getCriticalPath(),
FBBTrace.getCriticalPath());
// Set a somewhat arbitrary limit on the critical path extension we accept.
unsigned CritLimit = SchedModel.MispredictPenalty/2;
// If-conversion only makes sense when there is unexploited ILP. Compute the
// maximum-ILP resource length of the trace after if-conversion. Compare it
// to the shortest critical path.
SmallVector<const MachineBasicBlock*, 1> ExtraBlocks;
if (IfConv.TBB != IfConv.Tail)
ExtraBlocks.push_back(IfConv.TBB);
unsigned ResLength = FBBTrace.getResourceLength(ExtraBlocks);
DEBUG(dbgs() << "Resource length " << ResLength
<< ", minimal critical path " << MinCrit << '\n');
if (ResLength > MinCrit + CritLimit) {
DEBUG(dbgs() << "Not enough available ILP.\n");
return false;
}
// Assume that the depth of the first head terminator will also be the depth
// of the select instruction inserted, as determined by the flag dependency.
// TBB / FBB data dependencies may delay the select even more.
MachineTraceMetrics::Trace HeadTrace = MinInstr->getTrace(IfConv.Head);
unsigned BranchDepth =
HeadTrace.getInstrCycles(IfConv.Head->getFirstTerminator()).Depth;
DEBUG(dbgs() << "Branch depth: " << BranchDepth << '\n');
// Look at all the tail phis, and compute the critical path extension caused
// by inserting select instructions.
MachineTraceMetrics::Trace TailTrace = MinInstr->getTrace(IfConv.Tail);
for (unsigned i = 0, e = IfConv.PHIs.size(); i != e; ++i) {
SSAIfConv::PHIInfo &PI = IfConv.PHIs[i];
unsigned Slack = TailTrace.getInstrSlack(PI.PHI);
unsigned MaxDepth = Slack + TailTrace.getInstrCycles(PI.PHI).Depth;
DEBUG(dbgs() << "Slack " << Slack << ":\t" << *PI.PHI);
// The condition is pulled into the critical path.
unsigned CondDepth = adjCycles(BranchDepth, PI.CondCycles);
if (CondDepth > MaxDepth) {
unsigned Extra = CondDepth - MaxDepth;
DEBUG(dbgs() << "Condition adds " << Extra << " cycles.\n");
if (Extra > CritLimit) {
DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
return false;
}
}
// The TBB value is pulled into the critical path.
unsigned TDepth = adjCycles(TBBTrace.getPHIDepth(PI.PHI), PI.TCycles);
if (TDepth > MaxDepth) {
unsigned Extra = TDepth - MaxDepth;
DEBUG(dbgs() << "TBB data adds " << Extra << " cycles.\n");
if (Extra > CritLimit) {
DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
return false;
}
}
// The FBB value is pulled into the critical path.
unsigned FDepth = adjCycles(FBBTrace.getPHIDepth(PI.PHI), PI.FCycles);
if (FDepth > MaxDepth) {
unsigned Extra = FDepth - MaxDepth;
DEBUG(dbgs() << "FBB data adds " << Extra << " cycles.\n");
if (Extra > CritLimit) {
DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
return false;
}
}
}
return true;
}
/// Attempt repeated if-conversion on MBB, return true if successful.
///
bool EarlyIfConverter::tryConvertIf(MachineBasicBlock *MBB) {
bool Changed = false;
while (IfConv.canConvertIf(MBB) && shouldConvertIf()) {
// If-convert MBB and update analyses.
invalidateTraces();
SmallVector<MachineBasicBlock*, 4> RemovedBlocks;
IfConv.convertIf(RemovedBlocks);
Changed = true;
updateDomTree(RemovedBlocks);
updateLoops(RemovedBlocks);
}
return Changed;
}
bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
<< "********** Function: " << MF.getName() << '\n');
// Only run if conversion if the target wants it.
const TargetSubtargetInfo &STI = MF.getSubtarget();
if (!STI.enableEarlyIfConversion())
return false;
TII = STI.getInstrInfo();
TRI = STI.getRegisterInfo();
SchedModel = STI.getSchedModel();
MRI = &MF.getRegInfo();
DomTree = &getAnalysis<MachineDominatorTree>();
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
bool Changed = false;
IfConv.runOnMachineFunction(MF);
// Visit blocks in dominator tree post-order. The post-order enables nested
// if-conversion in a single pass. The tryConvertIf() function may erase
// blocks, but only blocks dominated by the head block. This makes it safe to
// update the dominator tree while the post-order iterator is still active.
for (auto DomNode : post_order(DomTree))
if (tryConvertIf(DomNode->getBlock()))
Changed = true;
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineInstrBundle.cpp | //===-- lib/CodeGen/MachineInstrBundle.cpp --------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
class UnpackMachineBundles : public MachineFunctionPass {
public:
static char ID; // Pass identification
UnpackMachineBundles(std::function<bool(const Function &)> Ftor = nullptr)
: MachineFunctionPass(ID), PredicateFtor(Ftor) {
initializeUnpackMachineBundlesPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
private:
std::function<bool(const Function &)> PredicateFtor;
};
} // end anonymous namespace
char UnpackMachineBundles::ID = 0;
char &llvm::UnpackMachineBundlesID = UnpackMachineBundles::ID;
INITIALIZE_PASS(UnpackMachineBundles, "unpack-mi-bundles",
"Unpack machine instruction bundles", false, false)
bool UnpackMachineBundles::runOnMachineFunction(MachineFunction &MF) {
if (PredicateFtor && !PredicateFtor(*MF.getFunction()))
return false;
bool Changed = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
for (MachineBasicBlock::instr_iterator MII = MBB->instr_begin(),
MIE = MBB->instr_end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
// Remove BUNDLE instruction and the InsideBundle flags from bundled
// instructions.
if (MI->isBundle()) {
while (++MII != MIE && MII->isBundledWithPred()) {
MII->unbundleFromPred();
for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MII->getOperand(i);
if (MO.isReg() && MO.isInternalRead())
MO.setIsInternalRead(false);
}
}
MI->eraseFromParent();
Changed = true;
continue;
}
++MII;
}
}
return Changed;
}
FunctionPass *
llvm::createUnpackMachineBundles(std::function<bool(const Function &)> Ftor) {
return new UnpackMachineBundles(Ftor);
}
namespace {
class FinalizeMachineBundles : public MachineFunctionPass {
public:
static char ID; // Pass identification
FinalizeMachineBundles() : MachineFunctionPass(ID) {
initializeFinalizeMachineBundlesPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
};
} // end anonymous namespace
char FinalizeMachineBundles::ID = 0;
char &llvm::FinalizeMachineBundlesID = FinalizeMachineBundles::ID;
INITIALIZE_PASS(FinalizeMachineBundles, "finalize-mi-bundles",
"Finalize machine instruction bundles", false, false)
bool FinalizeMachineBundles::runOnMachineFunction(MachineFunction &MF) {
return llvm::finalizeBundles(MF);
}
/// finalizeBundle - Finalize a machine instruction bundle which includes
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
/// IsInternalRead markers to MachineOperands which are defined inside the
/// bundle, and it copies externally visible defs and uses to the BUNDLE
/// instruction.
void llvm::finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI,
MachineBasicBlock::instr_iterator LastMI) {
assert(FirstMI != LastMI && "Empty bundle?");
MIBundleBuilder Bundle(MBB, FirstMI, LastMI);
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
MachineInstrBuilder MIB =
BuildMI(MF, FirstMI->getDebugLoc(), TII->get(TargetOpcode::BUNDLE));
Bundle.prepend(MIB);
SmallVector<unsigned, 32> LocalDefs;
SmallSet<unsigned, 32> LocalDefSet;
SmallSet<unsigned, 8> DeadDefSet;
SmallSet<unsigned, 16> KilledDefSet;
SmallVector<unsigned, 8> ExternUses;
SmallSet<unsigned, 8> ExternUseSet;
SmallSet<unsigned, 8> KilledUseSet;
SmallSet<unsigned, 8> UndefUseSet;
SmallVector<MachineOperand*, 4> Defs;
for (; FirstMI != LastMI; ++FirstMI) {
for (unsigned i = 0, e = FirstMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = FirstMI->getOperand(i);
if (!MO.isReg())
continue;
if (MO.isDef()) {
Defs.push_back(&MO);
continue;
}
unsigned Reg = MO.getReg();
if (!Reg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
if (LocalDefSet.count(Reg)) {
MO.setIsInternalRead();
if (MO.isKill())
// Internal def is now killed.
KilledDefSet.insert(Reg);
} else {
if (ExternUseSet.insert(Reg).second) {
ExternUses.push_back(Reg);
if (MO.isUndef())
UndefUseSet.insert(Reg);
}
if (MO.isKill())
// External def is now killed.
KilledUseSet.insert(Reg);
}
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
MachineOperand &MO = *Defs[i];
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (LocalDefSet.insert(Reg).second) {
LocalDefs.push_back(Reg);
if (MO.isDead()) {
DeadDefSet.insert(Reg);
}
} else {
// Re-defined inside the bundle, it's no longer killed.
KilledDefSet.erase(Reg);
if (!MO.isDead())
// Previously defined but dead.
DeadDefSet.erase(Reg);
}
if (!MO.isDead()) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (LocalDefSet.insert(SubReg).second)
LocalDefs.push_back(SubReg);
}
}
}
Defs.clear();
}
SmallSet<unsigned, 32> Added;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
if (Added.insert(Reg).second) {
// If it's not live beyond end of the bundle, mark it dead.
bool isDead = DeadDefSet.count(Reg) || KilledDefSet.count(Reg);
MIB.addReg(Reg, getDefRegState(true) | getDeadRegState(isDead) |
getImplRegState(true));
}
}
for (unsigned i = 0, e = ExternUses.size(); i != e; ++i) {
unsigned Reg = ExternUses[i];
bool isKill = KilledUseSet.count(Reg);
bool isUndef = UndefUseSet.count(Reg);
MIB.addReg(Reg, getKillRegState(isKill) | getUndefRegState(isUndef) |
getImplRegState(true));
}
}
/// finalizeBundle - Same functionality as the previous finalizeBundle except
/// the last instruction in the bundle is not provided as an input. This is
/// used in cases where bundles are pre-determined by marking instructions
/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
/// points to the end of the bundle.
MachineBasicBlock::instr_iterator
llvm::finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI) {
MachineBasicBlock::instr_iterator E = MBB.instr_end();
MachineBasicBlock::instr_iterator LastMI = std::next(FirstMI);
while (LastMI != E && LastMI->isInsideBundle())
++LastMI;
finalizeBundle(MBB, FirstMI, LastMI);
return LastMI;
}
/// finalizeBundles - Finalize instruction bundles in the specified
/// MachineFunction. Return true if any bundles are finalized.
bool llvm::finalizeBundles(MachineFunction &MF) {
bool Changed = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock &MBB = *I;
MachineBasicBlock::instr_iterator MII = MBB.instr_begin();
MachineBasicBlock::instr_iterator MIE = MBB.instr_end();
if (MII == MIE)
continue;
assert(!MII->isInsideBundle() &&
"First instr cannot be inside bundle before finalization!");
for (++MII; MII != MIE; ) {
if (!MII->isInsideBundle())
++MII;
else {
MII = finalizeBundle(MBB, std::prev(MII));
Changed = true;
}
}
}
return Changed;
}
//===----------------------------------------------------------------------===//
// MachineOperand iterator
//===----------------------------------------------------------------------===//
MachineOperandIteratorBase::VirtRegInfo
MachineOperandIteratorBase::analyzeVirtReg(unsigned Reg,
SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops) {
VirtRegInfo RI = { false, false, false };
for(; isValid(); ++*this) {
MachineOperand &MO = deref();
if (!MO.isReg() || MO.getReg() != Reg)
continue;
// Remember each (MI, OpNo) that refers to Reg.
if (Ops)
Ops->push_back(std::make_pair(MO.getParent(), getOperandNo()));
// Both defs and uses can read virtual registers.
if (MO.readsReg()) {
RI.Reads = true;
if (MO.isDef())
RI.Tied = true;
}
// Only defs can write.
if (MO.isDef())
RI.Writes = true;
else if (!RI.Tied && MO.getParent()->isRegTiedToDefOperand(getOperandNo()))
RI.Tied = true;
}
return RI;
}
MachineOperandIteratorBase::PhysRegInfo
MachineOperandIteratorBase::analyzePhysReg(unsigned Reg,
const TargetRegisterInfo *TRI) {
bool AllDefsDead = true;
PhysRegInfo PRI = {false, false, false, false, false, false};
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
"analyzePhysReg not given a physical register!");
for (; isValid(); ++*this) {
MachineOperand &MO = deref();
if (MO.isRegMask() && MO.clobbersPhysReg(Reg))
PRI.Clobbers = true; // Regmask clobbers Reg.
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg || !TargetRegisterInfo::isPhysicalRegister(MOReg))
continue;
bool IsRegOrSuperReg = MOReg == Reg || TRI->isSubRegister(MOReg, Reg);
bool IsRegOrOverlapping = MOReg == Reg || TRI->regsOverlap(MOReg, Reg);
if (IsRegOrSuperReg && MO.readsReg()) {
// Reg or a super-reg is read, and perhaps killed also.
PRI.Reads = true;
PRI.Kills = MO.isKill();
}
if (IsRegOrOverlapping && MO.readsReg()) {
PRI.ReadsOverlap = true;// Reg or an overlapping register is read.
}
if (!MO.isDef())
continue;
if (IsRegOrSuperReg) {
PRI.Defines = true; // Reg or a super-register is defined.
if (!MO.isDead())
AllDefsDead = false;
}
if (IsRegOrOverlapping)
PRI.Clobbers = true; // Reg or an overlapping reg is defined.
}
if (AllDefsDead && PRI.Defines)
PRI.DefinesDead = true; // Reg or super-register was defined and was dead.
return PRI;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ShrinkWrap.cpp | //===-- ShrinkWrap.cpp - Compute safe point for prolog/epilog insertion ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass looks for safe point where the prologue and epilogue can be
// inserted.
// The safe point for the prologue (resp. epilogue) is called Save
// (resp. Restore).
// A point is safe for prologue (resp. epilogue) if and only if
// it 1) dominates (resp. post-dominates) all the frame related operations and
// between 2) two executions of the Save (resp. Restore) point there is an
// execution of the Restore (resp. Save) point.
//
// For instance, the following points are safe:
// for (int i = 0; i < 10; ++i) {
// Save
// ...
// Restore
// }
// Indeed, the execution looks like Save -> Restore -> Save -> Restore ...
// And the following points are not:
// for (int i = 0; i < 10; ++i) {
// Save
// ...
// }
// for (int i = 0; i < 10; ++i) {
// ...
// Restore
// }
// Indeed, the execution looks like Save -> Save -> ... -> Restore -> Restore.
//
// This pass also ensures that the safe points are 3) cheaper than the regular
// entry and exits blocks.
//
// Property #1 is ensured via the use of MachineDominatorTree and
// MachinePostDominatorTree.
// Property #2 is ensured via property #1 and MachineLoopInfo, i.e., both
// points must be in the same loop.
// Property #3 is ensured via the MachineBlockFrequencyInfo.
//
// If this pass found points matching all this properties, then
// MachineFrameInfo is updated this that information.
//===----------------------------------------------------------------------===//
#include "llvm/ADT/Statistic.h"
// To check for profitability.
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
// For property #1 for Save.
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
// To record the result of the analysis.
#include "llvm/CodeGen/MachineFrameInfo.h"
// For property #2.
#include "llvm/CodeGen/MachineLoopInfo.h"
// For property #1 for Restore.
#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/CodeGen/Passes.h"
// To know about callee-saved.
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Support/Debug.h"
// To query the target about frame lowering.
#include "llvm/Target/TargetFrameLowering.h"
// To know about frame setup operation.
#include "llvm/Target/TargetInstrInfo.h"
// To access TargetInstrInfo.
#include "llvm/Target/TargetSubtargetInfo.h"
// //
///////////////////////////////////////////////////////////////////////////////
#define DEBUG_TYPE "shrink-wrap"
using namespace llvm;
STATISTIC(NumFunc, "Number of functions");
STATISTIC(NumCandidates, "Number of shrink-wrapping candidates");
STATISTIC(NumCandidatesDropped,
"Number of shrink-wrapping candidates dropped because of frequency");
namespace {
/// \brief Class to determine where the safe point to insert the
/// prologue and epilogue are.
/// Unlike the paper from Fred C. Chow, PLDI'88, that introduces the
/// shrink-wrapping term for prologue/epilogue placement, this pass
/// does not rely on expensive data-flow analysis. Instead we use the
/// dominance properties and loop information to decide which point
/// are safe for such insertion.
class ShrinkWrap : public MachineFunctionPass {
/// Hold callee-saved information.
RegisterClassInfo RCI;
MachineDominatorTree *MDT;
MachinePostDominatorTree *MPDT;
/// Current safe point found for the prologue.
/// The prologue will be inserted before the first instruction
/// in this basic block.
MachineBasicBlock *Save;
/// Current safe point found for the epilogue.
/// The epilogue will be inserted before the first terminator instruction
/// in this basic block.
MachineBasicBlock *Restore;
/// Hold the information of the basic block frequency.
/// Use to check the profitability of the new points.
MachineBlockFrequencyInfo *MBFI;
/// Hold the loop information. Used to determine if Save and Restore
/// are in the same loop.
MachineLoopInfo *MLI;
/// Frequency of the Entry block.
uint64_t EntryFreq;
/// Current opcode for frame setup.
unsigned FrameSetupOpcode;
/// Current opcode for frame destroy.
unsigned FrameDestroyOpcode;
/// Entry block.
const MachineBasicBlock *Entry;
/// \brief Check if \p MI uses or defines a callee-saved register or
/// a frame index. If this is the case, this means \p MI must happen
/// after Save and before Restore.
bool useOrDefCSROrFI(const MachineInstr &MI) const;
/// \brief Update the Save and Restore points such that \p MBB is in
/// the region that is dominated by Save and post-dominated by Restore
/// and Save and Restore still match the safe point definition.
/// Such point may not exist and Save and/or Restore may be null after
/// this call.
void updateSaveRestorePoints(MachineBasicBlock &MBB);
/// \brief Initialize the pass for \p MF.
void init(MachineFunction &MF) {
RCI.runOnMachineFunction(MF);
MDT = &getAnalysis<MachineDominatorTree>();
MPDT = &getAnalysis<MachinePostDominatorTree>();
Save = nullptr;
Restore = nullptr;
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
MLI = &getAnalysis<MachineLoopInfo>();
EntryFreq = MBFI->getEntryFreq();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
FrameSetupOpcode = TII.getCallFrameSetupOpcode();
FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
Entry = &MF.front();
++NumFunc;
}
/// Check whether or not Save and Restore points are still interesting for
/// shrink-wrapping.
bool ArePointsInteresting() const { return Save != Entry && Save && Restore; }
public:
static char ID;
ShrinkWrap() : MachineFunctionPass(ID) {
initializeShrinkWrapPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachinePostDominatorTree>();
AU.addRequired<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
StringRef getPassName() const override {
return "Shrink Wrapping analysis";
}
/// \brief Perform the shrink-wrapping analysis and update
/// the MachineFrameInfo attached to \p MF with the results.
bool runOnMachineFunction(MachineFunction &MF) override;
};
} // End anonymous namespace.
char ShrinkWrap::ID = 0;
char &llvm::ShrinkWrapID = ShrinkWrap::ID;
INITIALIZE_PASS_BEGIN(ShrinkWrap, "shrink-wrap", "Shrink Wrap Pass", false,
false)
INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(ShrinkWrap, "shrink-wrap", "Shrink Wrap Pass", false, false)
bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI) const {
if (MI.getOpcode() == FrameSetupOpcode ||
MI.getOpcode() == FrameDestroyOpcode) {
DEBUG(dbgs() << "Frame instruction: " << MI << '\n');
return true;
}
for (const MachineOperand &MO : MI.operands()) {
bool UseCSR = false;
if (MO.isReg()) {
unsigned PhysReg = MO.getReg();
if (!PhysReg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
"Unallocated register?!");
UseCSR = RCI.getLastCalleeSavedAlias(PhysReg);
}
// TODO: Handle regmask more accurately.
// For now, be conservative about them.
if (UseCSR || MO.isFI() || MO.isRegMask()) {
DEBUG(dbgs() << "Use or define CSR(" << UseCSR << ") or FI(" << MO.isFI()
<< "): " << MI << '\n');
return true;
}
}
return false;
}
/// \brief Helper function to find the immediate (post) dominator.
template <typename ListOfBBs, typename DominanceAnalysis>
MachineBasicBlock *FindIDom(MachineBasicBlock &Block, ListOfBBs BBs,
DominanceAnalysis &Dom) {
MachineBasicBlock *IDom = &Block;
for (MachineBasicBlock *BB : BBs) {
IDom = Dom.findNearestCommonDominator(IDom, BB);
if (!IDom)
break;
}
return IDom;
}
void ShrinkWrap::updateSaveRestorePoints(MachineBasicBlock &MBB) {
// Get rid of the easy cases first.
if (!Save)
Save = &MBB;
else
Save = MDT->findNearestCommonDominator(Save, &MBB);
if (!Save) {
DEBUG(dbgs() << "Found a block that is not reachable from Entry\n");
return;
}
if (!Restore)
Restore = &MBB;
else
Restore = MPDT->findNearestCommonDominator(Restore, &MBB);
// Make sure we would be able to insert the restore code before the
// terminator.
if (Restore == &MBB) {
for (const MachineInstr &Terminator : MBB.terminators()) {
if (!useOrDefCSROrFI(Terminator))
continue;
// One of the terminator needs to happen before the restore point.
if (MBB.succ_empty()) {
Restore = nullptr;
break;
}
// Look for a restore point that post-dominates all the successors.
// The immediate post-dominator is what we are looking for.
Restore = FindIDom<>(*Restore, Restore->successors(), *MPDT);
break;
}
}
if (!Restore) {
DEBUG(dbgs() << "Restore point needs to be spanned on several blocks\n");
return;
}
// Make sure Save and Restore are suitable for shrink-wrapping:
// 1. all path from Save needs to lead to Restore before exiting.
// 2. all path to Restore needs to go through Save from Entry.
// We achieve that by making sure that:
// A. Save dominates Restore.
// B. Restore post-dominates Save.
// C. Save and Restore are in the same loop.
bool SaveDominatesRestore = false;
bool RestorePostDominatesSave = false;
while (Save && Restore &&
(!(SaveDominatesRestore = MDT->dominates(Save, Restore)) ||
!(RestorePostDominatesSave = MPDT->dominates(Restore, Save)) ||
MLI->getLoopFor(Save) != MLI->getLoopFor(Restore))) {
// Fix (A).
if (!SaveDominatesRestore) {
Save = MDT->findNearestCommonDominator(Save, Restore);
continue;
}
// Fix (B).
if (!RestorePostDominatesSave)
Restore = MPDT->findNearestCommonDominator(Restore, Save);
// Fix (C).
if (Save && Restore && Save != Restore &&
MLI->getLoopFor(Save) != MLI->getLoopFor(Restore)) {
if (MLI->getLoopDepth(Save) > MLI->getLoopDepth(Restore))
// Push Save outside of this loop.
Save = FindIDom<>(*Save, Save->predecessors(), *MDT);
else
// Push Restore outside of this loop.
Restore = FindIDom<>(*Restore, Restore->successors(), *MPDT);
}
}
}
bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
if (MF.empty())
return false;
DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
init(MF);
for (MachineBasicBlock &MBB : MF) {
DEBUG(dbgs() << "Look into: " << MBB.getNumber() << ' ' << MBB.getName()
<< '\n');
for (const MachineInstr &MI : MBB) {
if (!useOrDefCSROrFI(MI))
continue;
// Save (resp. restore) point must dominate (resp. post dominate)
// MI. Look for the proper basic block for those.
updateSaveRestorePoints(MBB);
// If we are at a point where we cannot improve the placement of
// save/restore instructions, just give up.
if (!ArePointsInteresting()) {
DEBUG(dbgs() << "No Shrink wrap candidate found\n");
return false;
}
// No need to look for other instructions, this basic block
// will already be part of the handled region.
break;
}
}
if (!ArePointsInteresting()) {
// If the points are not interesting at this point, then they must be null
// because it means we did not encounter any frame/CSR related code.
// Otherwise, we would have returned from the previous loop.
assert(!Save && !Restore && "We miss a shrink-wrap opportunity?!");
DEBUG(dbgs() << "Nothing to shrink-wrap\n");
return false;
}
DEBUG(dbgs() << "\n ** Results **\nFrequency of the Entry: " << EntryFreq
<< '\n');
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
do {
DEBUG(dbgs() << "Shrink wrap candidates (#, Name, Freq):\nSave: "
<< Save->getNumber() << ' ' << Save->getName() << ' '
<< MBFI->getBlockFreq(Save).getFrequency() << "\nRestore: "
<< Restore->getNumber() << ' ' << Restore->getName() << ' '
<< MBFI->getBlockFreq(Restore).getFrequency() << '\n');
bool IsSaveCheap, TargetCanUseSaveAsPrologue = false;
if (((IsSaveCheap = EntryFreq >= MBFI->getBlockFreq(Save).getFrequency()) &&
EntryFreq >= MBFI->getBlockFreq(Restore).getFrequency()) &&
((TargetCanUseSaveAsPrologue = TFI->canUseAsPrologue(*Save)) &&
TFI->canUseAsEpilogue(*Restore)))
break;
DEBUG(dbgs() << "New points are too expensive or invalid for the target\n");
MachineBasicBlock *NewBB;
if (!IsSaveCheap || !TargetCanUseSaveAsPrologue) {
Save = FindIDom<>(*Save, Save->predecessors(), *MDT);
if (!Save)
break;
NewBB = Save;
} else {
// Restore is expensive.
Restore = FindIDom<>(*Restore, Restore->successors(), *MPDT);
if (!Restore)
break;
NewBB = Restore;
}
updateSaveRestorePoints(*NewBB);
} while (Save && Restore);
if (!ArePointsInteresting()) {
++NumCandidatesDropped;
return false;
}
DEBUG(dbgs() << "Final shrink wrap candidates:\nSave: " << Save->getNumber()
<< ' ' << Save->getName() << "\nRestore: "
<< Restore->getNumber() << ' ' << Restore->getName() << '\n');
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setSavePoint(Save);
MFI->setRestorePoint(Restore);
++NumCandidates;
return false;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineBasicBlock.cpp | //===-- llvm/CodeGen/MachineBasicBlock.cpp ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Collect the sequence of machine instructions for a basic block.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "codegen"
MachineBasicBlock::MachineBasicBlock(MachineFunction &mf, const BasicBlock *bb)
: BB(bb), Number(-1), xParent(&mf), Alignment(0), IsLandingPad(false),
AddressTaken(false), CachedMCSymbol(nullptr) {
Insts.Parent = this;
}
MachineBasicBlock::~MachineBasicBlock() {
}
/// getSymbol - Return the MCSymbol for this basic block.
///
MCSymbol *MachineBasicBlock::getSymbol() const {
if (!CachedMCSymbol) {
const MachineFunction *MF = getParent();
MCContext &Ctx = MF->getContext();
const char *Prefix = Ctx.getAsmInfo()->getPrivateLabelPrefix();
CachedMCSymbol = Ctx.getOrCreateSymbol(Twine(Prefix) + "BB" +
Twine(MF->getFunctionNumber()) +
"_" + Twine(getNumber()));
}
return CachedMCSymbol;
}
raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineBasicBlock &MBB) {
MBB.print(OS);
return OS;
}
/// addNodeToList (MBB) - When an MBB is added to an MF, we need to update the
/// parent pointer of the MBB, the MBB numbering, and any instructions in the
/// MBB to be on the right operand list for registers.
///
/// MBBs start out as #-1. When a MBB is added to a MachineFunction, it
/// gets the next available unique MBB number. If it is removed from a
/// MachineFunction, it goes back to being #-1.
void ilist_traits<MachineBasicBlock>::addNodeToList(MachineBasicBlock *N) {
MachineFunction &MF = *N->getParent();
N->Number = MF.addToMBBNumbering(N);
// Make sure the instructions have their operands in the reginfo lists.
MachineRegisterInfo &RegInfo = MF.getRegInfo();
for (MachineBasicBlock::instr_iterator
I = N->instr_begin(), E = N->instr_end(); I != E; ++I)
I->AddRegOperandsToUseLists(RegInfo);
}
void ilist_traits<MachineBasicBlock>::removeNodeFromList(MachineBasicBlock *N) {
N->getParent()->removeFromMBBNumbering(N->Number);
N->Number = -1;
}
/// addNodeToList (MI) - When we add an instruction to a basic block
/// list, we update its parent pointer and add its operands from reg use/def
/// lists if appropriate.
void ilist_traits<MachineInstr>::addNodeToList(MachineInstr *N) {
assert(!N->getParent() && "machine instruction already in a basic block");
N->setParent(Parent);
// Add the instruction's register operands to their corresponding
// use/def lists.
MachineFunction *MF = Parent->getParent();
N->AddRegOperandsToUseLists(MF->getRegInfo());
}
/// removeNodeFromList (MI) - When we remove an instruction from a basic block
/// list, we update its parent pointer and remove its operands from reg use/def
/// lists if appropriate.
void ilist_traits<MachineInstr>::removeNodeFromList(MachineInstr *N) {
assert(N->getParent() && "machine instruction not in a basic block");
// Remove from the use/def lists.
if (MachineFunction *MF = N->getParent()->getParent())
N->RemoveRegOperandsFromUseLists(MF->getRegInfo());
N->setParent(nullptr);
}
/// transferNodesFromList (MI) - When moving a range of instructions from one
/// MBB list to another, we need to update the parent pointers and the use/def
/// lists.
void ilist_traits<MachineInstr>::
transferNodesFromList(ilist_traits<MachineInstr> &fromList,
ilist_iterator<MachineInstr> first,
ilist_iterator<MachineInstr> last) {
assert(Parent->getParent() == fromList.Parent->getParent() &&
"MachineInstr parent mismatch!");
// Splice within the same MBB -> no change.
if (Parent == fromList.Parent) return;
// If splicing between two blocks within the same function, just update the
// parent pointers.
for (; first != last; ++first)
first->setParent(Parent);
}
void ilist_traits<MachineInstr>::deleteNode(MachineInstr* MI) {
assert(!MI->getParent() && "MI is still in a block!");
Parent->getParent()->DeleteMachineInstr(MI);
}
MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() {
instr_iterator I = instr_begin(), E = instr_end();
while (I != E && I->isPHI())
++I;
assert((I == E || !I->isInsideBundle()) &&
"First non-phi MI cannot be inside a bundle!");
return I;
}
MachineBasicBlock::iterator
MachineBasicBlock::SkipPHIsAndLabels(MachineBasicBlock::iterator I) {
iterator E = end();
while (I != E && (I->isPHI() || I->isPosition() || I->isDebugValue()))
++I;
// FIXME: This needs to change if we wish to bundle labels / dbg_values
// inside the bundle.
assert((I == E || !I->isInsideBundle()) &&
"First non-phi / non-label instruction is inside a bundle!");
return I;
}
MachineBasicBlock::iterator MachineBasicBlock::getFirstTerminator() {
iterator B = begin(), E = end(), I = E;
while (I != B && ((--I)->isTerminator() || I->isDebugValue()))
; /*noop */
while (I != E && !I->isTerminator())
++I;
return I;
}
MachineBasicBlock::instr_iterator MachineBasicBlock::getFirstInstrTerminator() {
instr_iterator B = instr_begin(), E = instr_end(), I = E;
while (I != B && ((--I)->isTerminator() || I->isDebugValue()))
; /*noop */
while (I != E && !I->isTerminator())
++I;
return I;
}
MachineBasicBlock::iterator MachineBasicBlock::getFirstNonDebugInstr() {
// Skip over begin-of-block dbg_value instructions.
iterator I = begin(), E = end();
while (I != E && I->isDebugValue())
++I;
return I;
}
MachineBasicBlock::iterator MachineBasicBlock::getLastNonDebugInstr() {
// Skip over end-of-block dbg_value instructions.
instr_iterator B = instr_begin(), I = instr_end();
while (I != B) {
--I;
// Return instruction that starts a bundle.
if (I->isDebugValue() || I->isInsideBundle())
continue;
return I;
}
// The block is all debug values.
return end();
}
const MachineBasicBlock *MachineBasicBlock::getLandingPadSuccessor() const {
// A block with a landing pad successor only has one other successor.
if (succ_size() > 2)
return nullptr;
for (const_succ_iterator I = succ_begin(), E = succ_end(); I != E; ++I)
if ((*I)->isLandingPad())
return *I;
return nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineBasicBlock::dump() const {
print(dbgs());
}
#endif
StringRef MachineBasicBlock::getName() const {
if (const BasicBlock *LBB = getBasicBlock())
return LBB->getName();
else
return "(null)";
}
/// Return a hopefully unique identifier for this block.
std::string MachineBasicBlock::getFullName() const {
std::string Name;
if (getParent())
Name = (getParent()->getName() + ":").str();
if (getBasicBlock())
Name += getBasicBlock()->getName();
else
Name += ("BB" + Twine(getNumber())).str();
return Name;
}
void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
const MachineFunction *MF = getParent();
if (!MF) {
OS << "Can't print out MachineBasicBlock because parent MachineFunction"
<< " is null\n";
return;
}
const Function *F = MF->getFunction();
const Module *M = F ? F->getParent() : nullptr;
ModuleSlotTracker MST(M);
print(OS, MST, Indexes);
}
void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
SlotIndexes *Indexes) const {
const MachineFunction *MF = getParent();
if (!MF) {
OS << "Can't print out MachineBasicBlock because parent MachineFunction"
<< " is null\n";
return;
}
if (Indexes)
OS << Indexes->getMBBStartIdx(this) << '\t';
OS << "BB#" << getNumber() << ": ";
const char *Comma = "";
if (const BasicBlock *LBB = getBasicBlock()) {
OS << Comma << "derived from LLVM BB ";
LBB->printAsOperand(OS, /*PrintType=*/false, MST);
Comma = ", ";
}
if (isLandingPad()) { OS << Comma << "EH LANDING PAD"; Comma = ", "; }
if (hasAddressTaken()) { OS << Comma << "ADDRESS TAKEN"; Comma = ", "; }
if (Alignment)
OS << Comma << "Align " << Alignment << " (" << (1u << Alignment)
<< " bytes)";
OS << '\n';
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
if (!livein_empty()) {
if (Indexes) OS << '\t';
OS << " Live Ins:";
for (livein_iterator I = livein_begin(),E = livein_end(); I != E; ++I)
OS << ' ' << PrintReg(*I, TRI);
OS << '\n';
}
// Print the preds of this block according to the CFG.
if (!pred_empty()) {
if (Indexes) OS << '\t';
OS << " Predecessors according to CFG:";
for (const_pred_iterator PI = pred_begin(), E = pred_end(); PI != E; ++PI)
OS << " BB#" << (*PI)->getNumber();
OS << '\n';
}
for (const_instr_iterator I = instr_begin(); I != instr_end(); ++I) {
if (Indexes) {
if (Indexes->hasIndex(I))
OS << Indexes->getInstructionIndex(I);
OS << '\t';
}
OS << '\t';
if (I->isInsideBundle())
OS << " * ";
I->print(OS, MST);
}
// Print the successors of this block according to the CFG.
if (!succ_empty()) {
if (Indexes) OS << '\t';
OS << " Successors according to CFG:";
for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI) {
OS << " BB#" << (*SI)->getNumber();
if (!Weights.empty())
OS << '(' << *getWeightIterator(SI) << ')';
}
OS << '\n';
}
}
void MachineBasicBlock::printAsOperand(raw_ostream &OS, bool /*PrintType*/) const {
OS << "BB#" << getNumber();
}
void MachineBasicBlock::removeLiveIn(unsigned Reg) {
std::vector<unsigned>::iterator I =
std::find(LiveIns.begin(), LiveIns.end(), Reg);
if (I != LiveIns.end())
LiveIns.erase(I);
}
bool MachineBasicBlock::isLiveIn(unsigned Reg) const {
livein_iterator I = std::find(livein_begin(), livein_end(), Reg);
return I != livein_end();
}
unsigned
MachineBasicBlock::addLiveIn(unsigned PhysReg, const TargetRegisterClass *RC) {
assert(getParent() && "MBB must be inserted in function");
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) && "Expected physreg");
assert(RC && "Register class is required");
assert((isLandingPad() || this == &getParent()->front()) &&
"Only the entry block and landing pads can have physreg live ins");
bool LiveIn = isLiveIn(PhysReg);
iterator I = SkipPHIsAndLabels(begin()), E = end();
MachineRegisterInfo &MRI = getParent()->getRegInfo();
const TargetInstrInfo &TII = *getParent()->getSubtarget().getInstrInfo();
// Look for an existing copy.
if (LiveIn)
for (;I != E && I->isCopy(); ++I)
if (I->getOperand(1).getReg() == PhysReg) {
unsigned VirtReg = I->getOperand(0).getReg();
if (!MRI.constrainRegClass(VirtReg, RC))
llvm_unreachable("Incompatible live-in register class.");
return VirtReg;
}
// No luck, create a virtual register.
unsigned VirtReg = MRI.createVirtualRegister(RC);
BuildMI(*this, I, DebugLoc(), TII.get(TargetOpcode::COPY), VirtReg)
.addReg(PhysReg, RegState::Kill);
if (!LiveIn)
addLiveIn(PhysReg);
return VirtReg;
}
void MachineBasicBlock::moveBefore(MachineBasicBlock *NewAfter) {
getParent()->splice(NewAfter, this);
}
void MachineBasicBlock::moveAfter(MachineBasicBlock *NewBefore) {
MachineFunction::iterator BBI = NewBefore;
getParent()->splice(++BBI, this);
}
void MachineBasicBlock::updateTerminator() {
const TargetInstrInfo *TII = getParent()->getSubtarget().getInstrInfo();
// A block with no successors has no concerns with fall-through edges.
if (this->succ_empty()) return;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
DebugLoc dl; // FIXME: this is nowhere
bool B = TII->AnalyzeBranch(*this, TBB, FBB, Cond);
(void) B;
assert(!B && "UpdateTerminators requires analyzable predecessors!");
if (Cond.empty()) {
if (TBB) {
// The block has an unconditional branch. If its successor is now
// its layout successor, delete the branch.
if (isLayoutSuccessor(TBB))
TII->RemoveBranch(*this);
} else {
// The block has an unconditional fallthrough. If its successor is not
// its layout successor, insert a branch. First we have to locate the
// only non-landing-pad successor, as that is the fallthrough block.
for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) {
if ((*SI)->isLandingPad())
continue;
assert(!TBB && "Found more than one non-landing-pad successor!");
TBB = *SI;
}
// If there is no non-landing-pad successor, the block has no
// fall-through edges to be concerned with.
if (!TBB)
return;
// Finally update the unconditional successor to be reached via a branch
// if it would not be reached by fallthrough.
if (!isLayoutSuccessor(TBB))
TII->InsertBranch(*this, TBB, nullptr, Cond, dl);
}
} else {
if (FBB) {
// The block has a non-fallthrough conditional branch. If one of its
// successors is its layout successor, rewrite it to a fallthrough
// conditional branch.
if (isLayoutSuccessor(TBB)) {
if (TII->ReverseBranchCondition(Cond))
return;
TII->RemoveBranch(*this);
TII->InsertBranch(*this, FBB, nullptr, Cond, dl);
} else if (isLayoutSuccessor(FBB)) {
TII->RemoveBranch(*this);
TII->InsertBranch(*this, TBB, nullptr, Cond, dl);
}
} else {
// Walk through the successors and find the successor which is not
// a landing pad and is not the conditional branch destination (in TBB)
// as the fallthrough successor.
MachineBasicBlock *FallthroughBB = nullptr;
for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) {
if ((*SI)->isLandingPad() || *SI == TBB)
continue;
assert(!FallthroughBB && "Found more than one fallthrough successor.");
FallthroughBB = *SI;
}
if (!FallthroughBB && canFallThrough()) {
// We fallthrough to the same basic block as the conditional jump
// targets. Remove the conditional jump, leaving unconditional
// fallthrough.
// FIXME: This does not seem like a reasonable pattern to support, but it
// has been seen in the wild coming out of degenerate ARM test cases.
TII->RemoveBranch(*this);
// Finally update the unconditional successor to be reached via a branch
// if it would not be reached by fallthrough.
if (!isLayoutSuccessor(TBB))
TII->InsertBranch(*this, TBB, nullptr, Cond, dl);
return;
}
// The block has a fallthrough conditional branch.
if (isLayoutSuccessor(TBB)) {
if (TII->ReverseBranchCondition(Cond)) {
// We can't reverse the condition, add an unconditional branch.
Cond.clear();
TII->InsertBranch(*this, FallthroughBB, nullptr, Cond, dl);
return;
}
TII->RemoveBranch(*this);
TII->InsertBranch(*this, FallthroughBB, nullptr, Cond, dl);
} else if (!isLayoutSuccessor(FallthroughBB)) {
TII->RemoveBranch(*this);
TII->InsertBranch(*this, TBB, FallthroughBB, Cond, dl);
}
}
}
}
void MachineBasicBlock::addSuccessor(MachineBasicBlock *succ, uint32_t weight) {
// If we see non-zero value for the first time it means we actually use Weight
// list, so we fill all Weights with 0's.
if (weight != 0 && Weights.empty())
Weights.resize(Successors.size());
if (weight != 0 || !Weights.empty())
Weights.push_back(weight);
Successors.push_back(succ);
succ->addPredecessor(this);
}
void MachineBasicBlock::removeSuccessor(MachineBasicBlock *succ) {
succ->removePredecessor(this);
succ_iterator I = std::find(Successors.begin(), Successors.end(), succ);
assert(I != Successors.end() && "Not a current successor!");
// If Weight list is empty it means we don't use it (disabled optimization).
if (!Weights.empty()) {
weight_iterator WI = getWeightIterator(I);
Weights.erase(WI);
}
Successors.erase(I);
}
MachineBasicBlock::succ_iterator
MachineBasicBlock::removeSuccessor(succ_iterator I) {
assert(I != Successors.end() && "Not a current successor!");
// If Weight list is empty it means we don't use it (disabled optimization).
if (!Weights.empty()) {
weight_iterator WI = getWeightIterator(I);
Weights.erase(WI);
}
(*I)->removePredecessor(this);
return Successors.erase(I);
}
void MachineBasicBlock::replaceSuccessor(MachineBasicBlock *Old,
MachineBasicBlock *New) {
if (Old == New)
return;
succ_iterator E = succ_end();
succ_iterator NewI = E;
succ_iterator OldI = E;
for (succ_iterator I = succ_begin(); I != E; ++I) {
if (*I == Old) {
OldI = I;
if (NewI != E)
break;
}
if (*I == New) {
NewI = I;
if (OldI != E)
break;
}
}
assert(OldI != E && "Old is not a successor of this block");
Old->removePredecessor(this);
// If New isn't already a successor, let it take Old's place.
if (NewI == E) {
New->addPredecessor(this);
*OldI = New;
return;
}
// New is already a successor.
// Update its weight instead of adding a duplicate edge.
if (!Weights.empty()) {
weight_iterator OldWI = getWeightIterator(OldI);
*getWeightIterator(NewI) += *OldWI;
Weights.erase(OldWI);
}
Successors.erase(OldI);
}
void MachineBasicBlock::addPredecessor(MachineBasicBlock *pred) {
Predecessors.push_back(pred);
}
void MachineBasicBlock::removePredecessor(MachineBasicBlock *pred) {
pred_iterator I = std::find(Predecessors.begin(), Predecessors.end(), pred);
assert(I != Predecessors.end() && "Pred is not a predecessor of this block!");
Predecessors.erase(I);
}
void MachineBasicBlock::transferSuccessors(MachineBasicBlock *fromMBB) {
if (this == fromMBB)
return;
while (!fromMBB->succ_empty()) {
MachineBasicBlock *Succ = *fromMBB->succ_begin();
uint32_t Weight = 0;
// If Weight list is empty it means we don't use it (disabled optimization).
if (!fromMBB->Weights.empty())
Weight = *fromMBB->Weights.begin();
addSuccessor(Succ, Weight);
fromMBB->removeSuccessor(Succ);
}
}
void
MachineBasicBlock::transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB) {
if (this == fromMBB)
return;
while (!fromMBB->succ_empty()) {
MachineBasicBlock *Succ = *fromMBB->succ_begin();
uint32_t Weight = 0;
if (!fromMBB->Weights.empty())
Weight = *fromMBB->Weights.begin();
addSuccessor(Succ, Weight);
fromMBB->removeSuccessor(Succ);
// Fix up any PHI nodes in the successor.
for (MachineBasicBlock::instr_iterator MI = Succ->instr_begin(),
ME = Succ->instr_end(); MI != ME && MI->isPHI(); ++MI)
for (unsigned i = 2, e = MI->getNumOperands()+1; i != e; i += 2) {
MachineOperand &MO = MI->getOperand(i);
if (MO.getMBB() == fromMBB)
MO.setMBB(this);
}
}
}
bool MachineBasicBlock::isPredecessor(const MachineBasicBlock *MBB) const {
return std::find(pred_begin(), pred_end(), MBB) != pred_end();
}
bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const {
return std::find(succ_begin(), succ_end(), MBB) != succ_end();
}
bool MachineBasicBlock::isLayoutSuccessor(const MachineBasicBlock *MBB) const {
MachineFunction::const_iterator I(this);
return std::next(I) == MachineFunction::const_iterator(MBB);
}
bool MachineBasicBlock::canFallThrough() {
MachineFunction::iterator Fallthrough = this;
++Fallthrough;
// If FallthroughBlock is off the end of the function, it can't fall through.
if (Fallthrough == getParent()->end())
return false;
// If FallthroughBlock isn't a successor, no fallthrough is possible.
if (!isSuccessor(Fallthrough))
return false;
// Analyze the branches, if any, at the end of the block.
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
const TargetInstrInfo *TII = getParent()->getSubtarget().getInstrInfo();
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond)) {
// If we couldn't analyze the branch, examine the last instruction.
// If the block doesn't end in a known control barrier, assume fallthrough
// is possible. The isPredicated check is needed because this code can be
// called during IfConversion, where an instruction which is normally a
// Barrier is predicated and thus no longer an actual control barrier.
return empty() || !back().isBarrier() || TII->isPredicated(&back());
}
// If there is no branch, control always falls through.
if (!TBB) return true;
// If there is some explicit branch to the fallthrough block, it can obviously
// reach, even though the branch should get folded to fall through implicitly.
if (MachineFunction::iterator(TBB) == Fallthrough ||
MachineFunction::iterator(FBB) == Fallthrough)
return true;
// If it's an unconditional branch to some block not the fall through, it
// doesn't fall through.
if (Cond.empty()) return false;
// Otherwise, if it is conditional and has no explicit false block, it falls
// through.
return FBB == nullptr;
}
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
// Splitting the critical edge to a landing pad block is non-trivial. Don't do
// it in this generic function.
if (Succ->isLandingPad())
return nullptr;
MachineFunction *MF = getParent();
DebugLoc dl; // FIXME: this is nowhere
// Performance might be harmed on HW that implements branching using exec mask
// where both sides of the branches are always executed.
if (MF->getTarget().requiresStructuredCFG())
return nullptr;
// We may need to update this's terminator, but we can't do that if
// AnalyzeBranch fails. If this uses a jump table, we won't touch it.
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
return nullptr;
// Avoid bugpoint weirdness: A block may end with a conditional branch but
// jumps to the same MBB is either case. We have duplicate CFG edges in that
// case that we can't handle. Since this never happens in properly optimized
// code, just skip those edges.
if (TBB && TBB == FBB) {
DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
<< getNumber() << '\n');
return nullptr;
}
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
DEBUG(dbgs() << "Splitting critical edge:"
" BB#" << getNumber()
<< " -- BB#" << NMBB->getNumber()
<< " -- BB#" << Succ->getNumber() << '\n');
LiveIntervals *LIS = P->getAnalysisIfAvailable<LiveIntervals>();
SlotIndexes *Indexes = P->getAnalysisIfAvailable<SlotIndexes>();
if (LIS)
LIS->insertMBBInMaps(NMBB);
else if (Indexes)
Indexes->insertMBBInMaps(NMBB);
// On some targets like Mips, branches may kill virtual registers. Make sure
// that LiveVariables is properly updated after updateTerminator replaces the
// terminators.
LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();
// Collect a list of virtual registers killed by the terminators.
SmallVector<unsigned, 4> KilledRegs;
if (LV)
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I) {
MachineInstr *MI = I;
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
if (!OI->isReg() || OI->getReg() == 0 ||
!OI->isUse() || !OI->isKill() || OI->isUndef())
continue;
unsigned Reg = OI->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
LV->getVarInfo(Reg).removeKill(MI)) {
KilledRegs.push_back(Reg);
DEBUG(dbgs() << "Removing terminator kill: " << *MI);
OI->setIsKill(false);
}
}
}
SmallVector<unsigned, 4> UsedRegs;
if (LIS) {
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I) {
MachineInstr *MI = I;
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
if (!OI->isReg() || OI->getReg() == 0)
continue;
unsigned Reg = OI->getReg();
if (std::find(UsedRegs.begin(), UsedRegs.end(), Reg) == UsedRegs.end())
UsedRegs.push_back(Reg);
}
}
}
ReplaceUsesOfBlockWith(Succ, NMBB);
// If updateTerminator() removes instructions, we need to remove them from
// SlotIndexes.
SmallVector<MachineInstr*, 4> Terminators;
if (Indexes) {
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I)
Terminators.push_back(I);
}
updateTerminator();
if (Indexes) {
SmallVector<MachineInstr*, 4> NewTerminators;
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I)
NewTerminators.push_back(I);
for (SmallVectorImpl<MachineInstr*>::iterator I = Terminators.begin(),
E = Terminators.end(); I != E; ++I) {
if (std::find(NewTerminators.begin(), NewTerminators.end(), *I) ==
NewTerminators.end())
Indexes->removeMachineInstrFromMaps(*I);
}
}
// Insert unconditional "jump Succ" instruction in NMBB if necessary.
NMBB->addSuccessor(Succ);
if (!NMBB->isLayoutSuccessor(Succ)) {
Cond.clear();
MF->getSubtarget().getInstrInfo()->InsertBranch(*NMBB, Succ, nullptr, Cond,
dl);
if (Indexes) {
for (instr_iterator I = NMBB->instr_begin(), E = NMBB->instr_end();
I != E; ++I) {
// Some instructions may have been moved to NMBB by updateTerminator(),
// so we first remove any instruction that already has an index.
if (Indexes->hasIndex(I))
Indexes->removeMachineInstrFromMaps(I);
Indexes->insertMachineInstrInMaps(I);
}
}
}
// Fix PHI nodes in Succ so they refer to NMBB instead of this
for (MachineBasicBlock::instr_iterator
i = Succ->instr_begin(),e = Succ->instr_end();
i != e && i->isPHI(); ++i)
for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
if (i->getOperand(ni+1).getMBB() == this)
i->getOperand(ni+1).setMBB(NMBB);
// Inherit live-ins from the successor
for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
E = Succ->livein_end(); I != E; ++I)
NMBB->addLiveIn(*I);
// Update LiveVariables.
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
if (LV) {
// Restore kills of virtual registers that were killed by the terminators.
while (!KilledRegs.empty()) {
unsigned Reg = KilledRegs.pop_back_val();
for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
LV->getVarInfo(Reg).Kills.push_back(I);
DEBUG(dbgs() << "Restored terminator kill: " << *I);
break;
}
}
// Update relevant live-through information.
LV->addNewBlock(NMBB, this, Succ);
}
if (LIS) {
// After splitting the edge and updating SlotIndexes, live intervals may be
// in one of two situations, depending on whether this block was the last in
// the function. If the original block was the last in the function, all live
// intervals will end prior to the beginning of the new split block. If the
// original block was not at the end of the function, all live intervals will
// extend to the end of the new split block.
bool isLastMBB =
std::next(MachineFunction::iterator(NMBB)) == getParent()->end();
SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
SlotIndex PrevIndex = StartIndex.getPrevSlot();
SlotIndex EndIndex = Indexes->getMBBEndIdx(NMBB);
// Find the registers used from NMBB in PHIs in Succ.
SmallSet<unsigned, 8> PHISrcRegs;
for (MachineBasicBlock::instr_iterator
I = Succ->instr_begin(), E = Succ->instr_end();
I != E && I->isPHI(); ++I) {
for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) {
if (I->getOperand(ni+1).getMBB() == NMBB) {
MachineOperand &MO = I->getOperand(ni);
unsigned Reg = MO.getReg();
PHISrcRegs.insert(Reg);
if (MO.isUndef())
continue;
LiveInterval &LI = LIS->getInterval(Reg);
VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
assert(VNI && "PHI sources should be live out of their predecessors.");
LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
}
}
}
MachineRegisterInfo *MRI = &getParent()->getRegInfo();
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (PHISrcRegs.count(Reg) || !LIS->hasInterval(Reg))
continue;
LiveInterval &LI = LIS->getInterval(Reg);
if (!LI.liveAt(PrevIndex))
continue;
bool isLiveOut = LI.liveAt(LIS->getMBBStartIdx(Succ));
if (isLiveOut && isLastMBB) {
VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
assert(VNI && "LiveInterval should have VNInfo where it is live.");
LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
} else if (!isLiveOut && !isLastMBB) {
LI.removeSegment(StartIndex, EndIndex);
}
}
// Update all intervals for registers whose uses may have been modified by
// updateTerminator().
LIS->repairIntervalsInRange(this, getFirstTerminator(), end(), UsedRegs);
}
if (MachineDominatorTree *MDT =
P->getAnalysisIfAvailable<MachineDominatorTree>())
MDT->recordSplitCriticalEdge(this, Succ, NMBB);
if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
if (MachineLoop *TIL = MLI->getLoopFor(this)) {
// If one or the other blocks were not in a loop, the new block is not
// either, and thus LI doesn't need to be updated.
if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
if (TIL == DestLoop) {
// Both in the same loop, the NMBB joins loop.
DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
} else if (TIL->contains(DestLoop)) {
// Edge from an outer loop to an inner loop. Add to the outer loop.
TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
} else if (DestLoop->contains(TIL)) {
// Edge from an inner loop to an outer loop. Add to the outer loop.
DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
} else {
// Edge from two loops with no containment relation. Because these
// are natural loops, we know that the destination block must be the
// header of its loop (adding a branch into a loop elsewhere would
// create an irreducible loop).
assert(DestLoop->getHeader() == Succ &&
"Should not create irreducible loops!");
if (MachineLoop *P = DestLoop->getParentLoop())
P->addBasicBlockToLoop(NMBB, MLI->getBase());
}
}
}
return NMBB;
}
/// Prepare MI to be removed from its bundle. This fixes bundle flags on MI's
/// neighboring instructions so the bundle won't be broken by removing MI.
static void unbundleSingleMI(MachineInstr *MI) {
// Removing the first instruction in a bundle.
if (MI->isBundledWithSucc() && !MI->isBundledWithPred())
MI->unbundleFromSucc();
// Removing the last instruction in a bundle.
if (MI->isBundledWithPred() && !MI->isBundledWithSucc())
MI->unbundleFromPred();
// If MI is not bundled, or if it is internal to a bundle, the neighbor flags
// are already fine.
}
MachineBasicBlock::instr_iterator
MachineBasicBlock::erase(MachineBasicBlock::instr_iterator I) {
unbundleSingleMI(I);
return Insts.erase(I);
}
MachineInstr *MachineBasicBlock::remove_instr(MachineInstr *MI) {
unbundleSingleMI(MI);
MI->clearFlag(MachineInstr::BundledPred);
MI->clearFlag(MachineInstr::BundledSucc);
return Insts.remove(MI);
}
MachineBasicBlock::instr_iterator
MachineBasicBlock::insert(instr_iterator I, MachineInstr *MI) {
assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
"Cannot insert instruction with bundle flags");
// Set the bundle flags when inserting inside a bundle.
if (I != instr_end() && I->isBundledWithPred()) {
MI->setFlag(MachineInstr::BundledPred);
MI->setFlag(MachineInstr::BundledSucc);
}
return Insts.insert(I, MI);
}
/// removeFromParent - This method unlinks 'this' from the containing function,
/// and returns it, but does not delete it.
MachineBasicBlock *MachineBasicBlock::removeFromParent() {
assert(getParent() && "Not embedded in a function!");
getParent()->remove(this);
return this;
}
/// eraseFromParent - This method unlinks 'this' from the containing function,
/// and deletes it.
void MachineBasicBlock::eraseFromParent() {
assert(getParent() && "Not embedded in a function!");
getParent()->erase(this);
}
/// ReplaceUsesOfBlockWith - Given a machine basic block that branched to
/// 'Old', change the code and CFG so that it branches to 'New' instead.
void MachineBasicBlock::ReplaceUsesOfBlockWith(MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Cannot replace self with self!");
MachineBasicBlock::instr_iterator I = instr_end();
while (I != instr_begin()) {
--I;
if (!I->isTerminator()) break;
// Scan the operands of this machine instruction, replacing any uses of Old
// with New.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (I->getOperand(i).isMBB() &&
I->getOperand(i).getMBB() == Old)
I->getOperand(i).setMBB(New);
}
// Update the successor information.
replaceSuccessor(Old, New);
}
/// CorrectExtraCFGEdges - Various pieces of code can cause excess edges in the
/// CFG to be inserted. If we have proven that MBB can only branch to DestA and
/// DestB, remove any other MBB successors from the CFG. DestA and DestB can be
/// null.
///
/// Besides DestA and DestB, retain other edges leading to LandingPads
/// (currently there can be only one; we don't check or require that here).
/// Note it is possible that DestA and/or DestB are LandingPads.
bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA,
MachineBasicBlock *DestB,
bool isCond) {
// The values of DestA and DestB frequently come from a call to the
// 'TargetInstrInfo::AnalyzeBranch' method. We take our meaning of the initial
// values from there.
//
// 1. If both DestA and DestB are null, then the block ends with no branches
// (it falls through to its successor).
// 2. If DestA is set, DestB is null, and isCond is false, then the block ends
// with only an unconditional branch.
// 3. If DestA is set, DestB is null, and isCond is true, then the block ends
// with a conditional branch that falls through to a successor (DestB).
// 4. If DestA and DestB is set and isCond is true, then the block ends with a
// conditional branch followed by an unconditional branch. DestA is the
// 'true' destination and DestB is the 'false' destination.
bool Changed = false;
MachineFunction::iterator FallThru =
std::next(MachineFunction::iterator(this));
if (!DestA && !DestB) {
// Block falls through to successor.
DestA = FallThru;
DestB = FallThru;
} else if (DestA && !DestB) {
if (isCond)
// Block ends in conditional jump that falls through to successor.
DestB = FallThru;
} else {
assert(DestA && DestB && isCond &&
"CFG in a bad state. Cannot correct CFG edges");
}
// Remove superfluous edges. I.e., those which aren't destinations of this
// basic block, duplicate edges, or landing pads.
SmallPtrSet<const MachineBasicBlock*, 8> SeenMBBs;
MachineBasicBlock::succ_iterator SI = succ_begin();
while (SI != succ_end()) {
const MachineBasicBlock *MBB = *SI;
if (!SeenMBBs.insert(MBB).second ||
(MBB != DestA && MBB != DestB && !MBB->isLandingPad())) {
// This is a superfluous edge, remove it.
SI = removeSuccessor(SI);
Changed = true;
} else {
++SI;
}
}
return Changed;
}
/// findDebugLoc - find the next valid DebugLoc starting at MBBI, skipping
/// any DBG_VALUE instructions. Return UnknownLoc if there is none.
DebugLoc
MachineBasicBlock::findDebugLoc(instr_iterator MBBI) {
DebugLoc DL;
instr_iterator E = instr_end();
if (MBBI == E)
return DL;
// Skip debug declarations, we don't want a DebugLoc from them.
while (MBBI != E && MBBI->isDebugValue())
MBBI++;
if (MBBI != E)
DL = MBBI->getDebugLoc();
return DL;
}
/// getSuccWeight - Return weight of the edge from this block to MBB.
///
uint32_t MachineBasicBlock::getSuccWeight(const_succ_iterator Succ) const {
if (Weights.empty())
return 0;
return *getWeightIterator(Succ);
}
/// Set successor weight of a given iterator.
void MachineBasicBlock::setSuccWeight(succ_iterator I, uint32_t weight) {
if (Weights.empty())
return;
*getWeightIterator(I) = weight;
}
/// getWeightIterator - Return wight iterator corresonding to the I successor
/// iterator
MachineBasicBlock::weight_iterator MachineBasicBlock::
getWeightIterator(MachineBasicBlock::succ_iterator I) {
assert(Weights.size() == Successors.size() && "Async weight list!");
size_t index = std::distance(Successors.begin(), I);
assert(index < Weights.size() && "Not a current successor!");
return Weights.begin() + index;
}
/// getWeightIterator - Return wight iterator corresonding to the I successor
/// iterator
MachineBasicBlock::const_weight_iterator MachineBasicBlock::
getWeightIterator(MachineBasicBlock::const_succ_iterator I) const {
assert(Weights.size() == Successors.size() && "Async weight list!");
const size_t index = std::distance(Successors.begin(), I);
assert(index < Weights.size() && "Not a current successor!");
return Weights.begin() + index;
}
/// Return whether (physical) register "Reg" has been <def>ined and not <kill>ed
/// as of just before "MI".
///
/// Search is localised to a neighborhood of
/// Neighborhood instructions before (searching for defs or kills) and N
/// instructions after (searching just for defs) MI.
MachineBasicBlock::LivenessQueryResult
MachineBasicBlock::computeRegisterLiveness(const TargetRegisterInfo *TRI,
unsigned Reg, const_iterator Before,
unsigned Neighborhood) const {
unsigned N = Neighborhood;
// Start by searching backwards from Before, looking for kills, reads or defs.
const_iterator I(Before);
// If this is the first insn in the block, don't search backwards.
if (I != begin()) {
do {
--I;
MachineOperandIteratorBase::PhysRegInfo Analysis =
ConstMIOperands(I).analyzePhysReg(Reg, TRI);
if (Analysis.Defines)
// Outputs happen after inputs so they take precedence if both are
// present.
return Analysis.DefinesDead ? LQR_Dead : LQR_Live;
if (Analysis.Kills || Analysis.Clobbers)
// Register killed, so isn't live.
return LQR_Dead;
else if (Analysis.ReadsOverlap)
// Defined or read without a previous kill - live.
return Analysis.Reads ? LQR_Live : LQR_OverlappingLive;
} while (I != begin() && --N > 0);
}
// Did we get to the start of the block?
if (I == begin()) {
// If so, the register's state is definitely defined by the live-in state.
for (MCRegAliasIterator RAI(Reg, TRI, /*IncludeSelf=*/true);
RAI.isValid(); ++RAI) {
if (isLiveIn(*RAI))
return (*RAI == Reg) ? LQR_Live : LQR_OverlappingLive;
}
return LQR_Dead;
}
N = Neighborhood;
// Try searching forwards from Before, looking for reads or defs.
I = const_iterator(Before);
// If this is the last insn in the block, don't search forwards.
if (I != end()) {
for (++I; I != end() && N > 0; ++I, --N) {
MachineOperandIteratorBase::PhysRegInfo Analysis =
ConstMIOperands(I).analyzePhysReg(Reg, TRI);
if (Analysis.ReadsOverlap)
// Used, therefore must have been live.
return (Analysis.Reads) ?
LQR_Live : LQR_OverlappingLive;
else if (Analysis.Clobbers || Analysis.Defines)
// Defined (but not read) therefore cannot have been live.
return LQR_Dead;
}
}
// At this point we have no idea of the liveness of the register.
return LQR_Unknown;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MIRPrinter.cpp | //===- MIRPrinter.cpp - MIR serialization format printer ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the class that prints out the LLVM IR and machine
// functions using the MIR serialization format.
//
//===----------------------------------------------------------------------===//
#include "MIRPrinter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
/// This class prints out the machine functions using the MIR serialization
/// format.
class MIRPrinter {
raw_ostream &OS;
DenseMap<const uint32_t *, unsigned> RegisterMaskIds;
public:
MIRPrinter(raw_ostream &OS) : OS(OS) {}
void print(const MachineFunction &MF);
void convert(yaml::MachineFunction &MF, const MachineRegisterInfo &RegInfo,
const TargetRegisterInfo *TRI);
void convert(yaml::MachineFrameInfo &YamlMFI, const MachineFrameInfo &MFI);
void convert(ModuleSlotTracker &MST, yaml::MachineBasicBlock &YamlMBB,
const MachineBasicBlock &MBB);
void convertStackObjects(yaml::MachineFunction &MF,
const MachineFrameInfo &MFI);
private:
void initRegisterMaskIds(const MachineFunction &MF);
};
/// This class prints out the machine instructions using the MIR serialization
/// format.
class MIPrinter {
raw_ostream &OS;
ModuleSlotTracker &MST;
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds;
public:
MIPrinter(raw_ostream &OS, ModuleSlotTracker &MST,
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds)
: OS(OS), MST(MST), RegisterMaskIds(RegisterMaskIds) {}
void print(const MachineInstr &MI);
void printMBBReference(const MachineBasicBlock &MBB);
void print(const MachineOperand &Op, const TargetRegisterInfo *TRI);
};
} // end anonymous namespace
namespace llvm {
namespace yaml {
/// This struct serializes the LLVM IR module.
template <> struct BlockScalarTraits<Module> {
static void output(const Module &Mod, void *Ctxt, raw_ostream &OS) {
Mod.print(OS, nullptr);
}
static StringRef input(StringRef Str, void *Ctxt, Module &Mod) {
llvm_unreachable("LLVM Module is supposed to be parsed separately");
return "";
}
};
} // end namespace yaml
} // end namespace llvm
static void printReg(unsigned Reg, raw_ostream &OS,
const TargetRegisterInfo *TRI) {
// TODO: Print Stack Slots.
if (!Reg)
OS << '_';
else if (TargetRegisterInfo::isVirtualRegister(Reg))
OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
else if (Reg < TRI->getNumRegs())
OS << '%' << StringRef(TRI->getName(Reg)).lower();
else
llvm_unreachable("Can't print this kind of register yet");
}
void MIRPrinter::print(const MachineFunction &MF) {
initRegisterMaskIds(MF);
yaml::MachineFunction YamlMF;
YamlMF.Name = MF.getName();
YamlMF.Alignment = MF.getAlignment();
YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
YamlMF.HasInlineAsm = MF.hasInlineAsm();
convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
convert(YamlMF.FrameInfo, *MF.getFrameInfo());
convertStackObjects(YamlMF, *MF.getFrameInfo());
int I = 0;
ModuleSlotTracker MST(MF.getFunction()->getParent());
for (const auto &MBB : MF) {
// TODO: Allow printing of non sequentially numbered MBBs.
// This is currently needed as the basic block references get their index
// from MBB.getNumber(), thus it should be sequential so that the parser can
// map back to the correct MBBs when parsing the output.
assert(MBB.getNumber() == I++ &&
"Can't print MBBs that aren't sequentially numbered");
(void)I;
yaml::MachineBasicBlock YamlMBB;
convert(MST, YamlMBB, MBB);
YamlMF.BasicBlocks.push_back(YamlMBB);
}
yaml::Output Out(OS);
Out << YamlMF;
}
void MIRPrinter::convert(yaml::MachineFunction &MF,
const MachineRegisterInfo &RegInfo,
const TargetRegisterInfo *TRI) {
MF.IsSSA = RegInfo.isSSA();
MF.TracksRegLiveness = RegInfo.tracksLiveness();
MF.TracksSubRegLiveness = RegInfo.subRegLivenessEnabled();
// Print the virtual register definitions.
for (unsigned I = 0, E = RegInfo.getNumVirtRegs(); I < E; ++I) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
yaml::VirtualRegisterDefinition VReg;
VReg.ID = I;
VReg.Class =
StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
MF.VirtualRegisters.push_back(VReg);
}
}
void MIRPrinter::convert(yaml::MachineFrameInfo &YamlMFI,
const MachineFrameInfo &MFI) {
YamlMFI.IsFrameAddressTaken = MFI.isFrameAddressTaken();
YamlMFI.IsReturnAddressTaken = MFI.isReturnAddressTaken();
YamlMFI.HasStackMap = MFI.hasStackMap();
YamlMFI.HasPatchPoint = MFI.hasPatchPoint();
YamlMFI.StackSize = MFI.getStackSize();
YamlMFI.OffsetAdjustment = MFI.getOffsetAdjustment();
YamlMFI.MaxAlignment = MFI.getMaxAlignment();
YamlMFI.AdjustsStack = MFI.adjustsStack();
YamlMFI.HasCalls = MFI.hasCalls();
YamlMFI.MaxCallFrameSize = MFI.getMaxCallFrameSize();
YamlMFI.HasOpaqueSPAdjustment = MFI.hasOpaqueSPAdjustment();
YamlMFI.HasVAStart = MFI.hasVAStart();
YamlMFI.HasMustTailInVarArgFunc = MFI.hasMustTailInVarArgFunc();
}
void MIRPrinter::convertStackObjects(yaml::MachineFunction &MF,
const MachineFrameInfo &MFI) {
// Process fixed stack objects.
unsigned ID = 0;
for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
if (MFI.isDeadObjectIndex(I))
continue;
yaml::FixedMachineStackObject YamlObject;
YamlObject.ID = ID++;
YamlObject.Type = MFI.isSpillSlotObjectIndex(I)
? yaml::FixedMachineStackObject::SpillSlot
: yaml::FixedMachineStackObject::DefaultType;
YamlObject.Offset = MFI.getObjectOffset(I);
YamlObject.Size = MFI.getObjectSize(I);
YamlObject.Alignment = MFI.getObjectAlignment(I);
YamlObject.IsImmutable = MFI.isImmutableObjectIndex(I);
YamlObject.IsAliased = MFI.isAliasedObjectIndex(I);
MF.FixedStackObjects.push_back(YamlObject);
// TODO: Store the mapping between fixed object IDs and object indices to
// print the fixed stack object references correctly.
}
// Process ordinary stack objects.
ID = 0;
for (int I = 0, E = MFI.getObjectIndexEnd(); I < E; ++I) {
if (MFI.isDeadObjectIndex(I))
continue;
yaml::MachineStackObject YamlObject;
YamlObject.ID = ID++;
YamlObject.Type = MFI.isSpillSlotObjectIndex(I)
? yaml::MachineStackObject::SpillSlot
: MFI.isVariableSizedObjectIndex(I)
? yaml::MachineStackObject::VariableSized
: yaml::MachineStackObject::DefaultType;
YamlObject.Offset = MFI.getObjectOffset(I);
YamlObject.Size = MFI.getObjectSize(I);
YamlObject.Alignment = MFI.getObjectAlignment(I);
MF.StackObjects.push_back(YamlObject);
// TODO: Store the mapping between object IDs and object indices to print
// the stack object references correctly.
}
}
void MIRPrinter::convert(ModuleSlotTracker &MST,
yaml::MachineBasicBlock &YamlMBB,
const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
YamlMBB.ID = (unsigned)MBB.getNumber();
// TODO: Serialize unnamed BB references.
if (const auto *BB = MBB.getBasicBlock())
YamlMBB.Name.Value = BB->hasName() ? BB->getName() : "<unnamed bb>";
else
YamlMBB.Name.Value = "";
YamlMBB.Alignment = MBB.getAlignment();
YamlMBB.AddressTaken = MBB.hasAddressTaken();
YamlMBB.IsLandingPad = MBB.isLandingPad();
for (const auto *SuccMBB : MBB.successors()) {
std::string Str;
raw_string_ostream StrOS(Str);
MIPrinter(StrOS, MST, RegisterMaskIds).printMBBReference(*SuccMBB);
YamlMBB.Successors.push_back(StrOS.str());
}
// Print the live in registers.
const auto *TRI = MBB.getParent()->getSubtarget().getRegisterInfo();
assert(TRI && "Expected target register info");
for (auto I = MBB.livein_begin(), E = MBB.livein_end(); I != E; ++I) {
std::string Str;
raw_string_ostream StrOS(Str);
printReg(*I, StrOS, TRI);
YamlMBB.LiveIns.push_back(StrOS.str());
}
// Print the machine instructions.
YamlMBB.Instructions.reserve(MBB.size());
std::string Str;
for (const auto &MI : MBB) {
raw_string_ostream StrOS(Str);
MIPrinter(StrOS, MST, RegisterMaskIds).print(MI);
YamlMBB.Instructions.push_back(StrOS.str());
Str.clear();
}
}
void MIRPrinter::initRegisterMaskIds(const MachineFunction &MF) {
const auto *TRI = MF.getSubtarget().getRegisterInfo();
unsigned I = 0;
for (const uint32_t *Mask : TRI->getRegMasks())
RegisterMaskIds.insert(std::make_pair(Mask, I++));
}
void MIPrinter::print(const MachineInstr &MI) {
const auto &SubTarget = MI.getParent()->getParent()->getSubtarget();
const auto *TRI = SubTarget.getRegisterInfo();
assert(TRI && "Expected target register info");
const auto *TII = SubTarget.getInstrInfo();
assert(TII && "Expected target instruction info");
unsigned I = 0, E = MI.getNumOperands();
for (; I < E && MI.getOperand(I).isReg() && MI.getOperand(I).isDef() &&
!MI.getOperand(I).isImplicit();
++I) {
if (I)
OS << ", ";
print(MI.getOperand(I), TRI);
}
if (I)
OS << " = ";
OS << TII->getName(MI.getOpcode());
// TODO: Print the instruction flags, machine mem operands.
if (I < E)
OS << ' ';
bool NeedComma = false;
for (; I < E; ++I) {
if (NeedComma)
OS << ", ";
print(MI.getOperand(I), TRI);
NeedComma = true;
}
}
void MIPrinter::printMBBReference(const MachineBasicBlock &MBB) {
OS << "%bb." << MBB.getNumber();
if (const auto *BB = MBB.getBasicBlock()) {
if (BB->hasName())
OS << '.' << BB->getName();
}
}
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) {
switch (Op.getType()) {
case MachineOperand::MO_Register:
// TODO: Print the other register flags.
if (Op.isImplicit())
OS << (Op.isDef() ? "implicit-def " : "implicit ");
if (Op.isDead())
OS << "dead ";
if (Op.isKill())
OS << "killed ";
if (Op.isUndef())
OS << "undef ";
printReg(Op.getReg(), OS, TRI);
// Print the sub register.
if (Op.getSubReg() != 0)
OS << ':' << TRI->getSubRegIndexName(Op.getSubReg());
break;
case MachineOperand::MO_Immediate:
OS << Op.getImm();
break;
case MachineOperand::MO_MachineBasicBlock:
printMBBReference(*Op.getMBB());
break;
case MachineOperand::MO_GlobalAddress:
Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
// TODO: Print offset and target flags.
break;
case MachineOperand::MO_RegisterMask: {
auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask());
if (RegMaskInfo != RegisterMaskIds.end())
OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower();
else
llvm_unreachable("Can't print this machine register mask yet.");
break;
}
default:
// TODO: Print the other machine operands.
llvm_unreachable("Can't print this machine operand at the moment");
}
}
void llvm::printMIR(raw_ostream &OS, const Module &M) {
yaml::Output Out(OS);
Out << const_cast<Module &>(M);
}
void llvm::printMIR(raw_ostream &OS, const MachineFunction &MF) {
MIRPrinter Printer(OS);
Printer.print(MF);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TargetFrameLoweringImpl.cpp | //===----- TargetFrameLoweringImpl.cpp - Implement target frame interface --==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implements the layout of a stack frame on the target machine.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/BitVector.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cstdlib>
using namespace llvm;
TargetFrameLowering::~TargetFrameLowering() {
}
/// The default implementation just looks at attribute "no-frame-pointer-elim".
bool TargetFrameLowering::noFramePointerElim(const MachineFunction &MF) const {
auto Attr = MF.getFunction()->getFnAttribute("no-frame-pointer-elim");
return Attr.getValueAsString() == "true";
}
/// getFrameIndexOffset - Returns the displacement from the frame register to
/// the stack frame of the specified index. This is the default implementation
/// which is overridden for some targets.
int TargetFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int FI) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
return MFI->getObjectOffset(FI) + MFI->getStackSize() -
getOffsetOfLocalArea() + MFI->getOffsetAdjustment();
}
int TargetFrameLowering::getFrameIndexReference(const MachineFunction &MF,
int FI, unsigned &FrameReg) const {
const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
// By default, assume all frame indices are referenced via whatever
// getFrameRegister() says. The target can override this if it's doing
// something different.
FrameReg = RI->getFrameRegister(MF);
return getFrameIndexOffset(MF, FI);
}
bool TargetFrameLowering::needsFrameIndexResolution(
const MachineFunction &MF) const {
return MF.getFrameInfo()->hasStackObjects();
}
void TargetFrameLowering::determineCalleeSaves(MachineFunction &MF,
BitVector &SavedRegs,
RegScavenger *RS) const {
// Get the callee saved register list...
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF);
// Early exit if there are no callee saved registers.
if (!CSRegs || CSRegs[0] == 0)
return;
SavedRegs.resize(TRI.getNumRegs());
// In Naked functions we aren't going to save any registers.
if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
return;
// Functions which call __builtin_unwind_init get all their registers saved.
bool CallsUnwindInit = MF.getMMI().callsUnwindInit();
const MachineRegisterInfo &MRI = MF.getRegInfo();
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
if (CallsUnwindInit || MRI.isPhysRegModified(Reg))
SavedRegs.set(Reg);
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveVariables.cpp | //===-- LiveVariables.cpp - Live Variable Analysis for Machine Code -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveVariable analysis pass. For each machine
// instruction in the function, this pass calculates the set of registers that
// are immediately dead after the instruction (i.e., the instruction calculates
// the value, but it is never used) and the set of registers that are used by
// the instruction, but are never used after the instruction (i.e., they are
// killed).
//
// This class computes live variables using a sparse implementation based on
// the machine code SSA form. This class computes live variable information for
// each virtual and _register allocatable_ physical register in a function. It
// uses the dominance properties of SSA form to efficiently compute live
// variables for virtual registers, and assumes that physical registers are only
// live within a single basic block (allowing it to do a single local analysis
// to resolve physical register lifetimes in each basic block). If a physical
// register is not register allocatable, it is not tracked. This is useful for
// things like the stack pointer and condition codes.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include <algorithm>
using namespace llvm;
char LiveVariables::ID = 0;
char &llvm::LiveVariablesID = LiveVariables::ID;
INITIALIZE_PASS_BEGIN(LiveVariables, "livevars",
"Live Variable Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(UnreachableMachineBlockElim)
INITIALIZE_PASS_END(LiveVariables, "livevars",
"Live Variable Analysis", false, false)
void LiveVariables::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(UnreachableMachineBlockElimID);
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
MachineInstr *
LiveVariables::VarInfo::findKill(const MachineBasicBlock *MBB) const {
for (unsigned i = 0, e = Kills.size(); i != e; ++i)
if (Kills[i]->getParent() == MBB)
return Kills[i];
return nullptr;
}
void LiveVariables::VarInfo::dump() const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dbgs() << " Alive in blocks: ";
for (SparseBitVector<>::iterator I = AliveBlocks.begin(),
E = AliveBlocks.end(); I != E; ++I)
dbgs() << *I << ", ";
dbgs() << "\n Killed by:";
if (Kills.empty())
dbgs() << " No instructions.\n";
else {
for (unsigned i = 0, e = Kills.size(); i != e; ++i)
dbgs() << "\n #" << i << ": " << *Kills[i];
dbgs() << "\n";
}
#endif
}
/// getVarInfo - Get (possibly creating) a VarInfo object for the given vreg.
LiveVariables::VarInfo &LiveVariables::getVarInfo(unsigned RegIdx) {
assert(TargetRegisterInfo::isVirtualRegister(RegIdx) &&
"getVarInfo: not a virtual register!");
VirtRegInfo.grow(RegIdx);
return VirtRegInfo[RegIdx];
}
void LiveVariables::MarkVirtRegAliveInBlock(VarInfo& VRInfo,
MachineBasicBlock *DefBlock,
MachineBasicBlock *MBB,
std::vector<MachineBasicBlock*> &WorkList) {
unsigned BBNum = MBB->getNumber();
// Check to see if this basic block is one of the killing blocks. If so,
// remove it.
for (unsigned i = 0, e = VRInfo.Kills.size(); i != e; ++i)
if (VRInfo.Kills[i]->getParent() == MBB) {
VRInfo.Kills.erase(VRInfo.Kills.begin()+i); // Erase entry
break;
}
if (MBB == DefBlock) return; // Terminate recursion
if (VRInfo.AliveBlocks.test(BBNum))
return; // We already know the block is live
// Mark the variable known alive in this bb
VRInfo.AliveBlocks.set(BBNum);
assert(MBB != &MF->front() && "Can't find reaching def for virtreg");
WorkList.insert(WorkList.end(), MBB->pred_rbegin(), MBB->pred_rend());
}
void LiveVariables::MarkVirtRegAliveInBlock(VarInfo &VRInfo,
MachineBasicBlock *DefBlock,
MachineBasicBlock *MBB) {
std::vector<MachineBasicBlock*> WorkList;
MarkVirtRegAliveInBlock(VRInfo, DefBlock, MBB, WorkList);
while (!WorkList.empty()) {
MachineBasicBlock *Pred = WorkList.back();
WorkList.pop_back();
MarkVirtRegAliveInBlock(VRInfo, DefBlock, Pred, WorkList);
}
}
void LiveVariables::HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB,
MachineInstr *MI) {
assert(MRI->getVRegDef(reg) && "Register use before def!");
unsigned BBNum = MBB->getNumber();
VarInfo& VRInfo = getVarInfo(reg);
// Check to see if this basic block is already a kill block.
if (!VRInfo.Kills.empty() && VRInfo.Kills.back()->getParent() == MBB) {
// Yes, this register is killed in this basic block already. Increase the
// live range by updating the kill instruction.
VRInfo.Kills.back() = MI;
return;
}
#ifndef NDEBUG
for (unsigned i = 0, e = VRInfo.Kills.size(); i != e; ++i)
assert(VRInfo.Kills[i]->getParent() != MBB && "entry should be at end!");
#endif
// This situation can occur:
//
// ,------.
// | |
// | v
// | t2 = phi ... t1 ...
// | |
// | v
// | t1 = ...
// | ... = ... t1 ...
// | |
// `------'
//
// where there is a use in a PHI node that's a predecessor to the defining
// block. We don't want to mark all predecessors as having the value "alive"
// in this case.
if (MBB == MRI->getVRegDef(reg)->getParent()) return;
// Add a new kill entry for this basic block. If this virtual register is
// already marked as alive in this basic block, that means it is alive in at
// least one of the successor blocks, it's not a kill.
if (!VRInfo.AliveBlocks.test(BBNum))
VRInfo.Kills.push_back(MI);
// Update all dominating blocks to mark them as "known live".
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
E = MBB->pred_end(); PI != E; ++PI)
MarkVirtRegAliveInBlock(VRInfo, MRI->getVRegDef(reg)->getParent(), *PI);
}
void LiveVariables::HandleVirtRegDef(unsigned Reg, MachineInstr *MI) {
VarInfo &VRInfo = getVarInfo(Reg);
if (VRInfo.AliveBlocks.empty())
// If vr is not alive in any block, then defaults to dead.
VRInfo.Kills.push_back(MI);
}
/// FindLastPartialDef - Return the last partial def of the specified register.
/// Also returns the sub-registers that're defined by the instruction.
MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
SmallSet<unsigned,4> &PartDefRegs) {
unsigned LastDefReg = 0;
unsigned LastDefDist = 0;
MachineInstr *LastDef = nullptr;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
if (!Def)
continue;
unsigned Dist = DistanceMap[Def];
if (Dist > LastDefDist) {
LastDefReg = SubReg;
LastDef = Def;
LastDefDist = Dist;
}
}
if (!LastDef)
return nullptr;
PartDefRegs.insert(LastDefReg);
for (unsigned i = 0, e = LastDef->getNumOperands(); i != e; ++i) {
MachineOperand &MO = LastDef->getOperand(i);
if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0)
continue;
unsigned DefReg = MO.getReg();
if (TRI->isSubRegister(Reg, DefReg)) {
for (MCSubRegIterator SubRegs(DefReg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
PartDefRegs.insert(*SubRegs);
}
}
return LastDef;
}
/// HandlePhysRegUse - Turn previous partial def's into read/mod/writes. Add
/// implicit defs to a machine instruction if there was an earlier def of its
/// super-register.
void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
MachineInstr *LastDef = PhysRegDef[Reg];
// If there was a previous use or a "full" def all is well.
if (!LastDef && !PhysRegUse[Reg]) {
// Otherwise, the last sub-register def implicitly defines this register.
// e.g.
// AH =
// AL = ... <imp-def EAX>, <imp-kill AH>
// = AH
// ...
// = EAX
// All of the sub-registers must have been defined before the use of Reg!
SmallSet<unsigned, 4> PartDefRegs;
MachineInstr *LastPartialDef = FindLastPartialDef(Reg, PartDefRegs);
// If LastPartialDef is NULL, it must be using a livein register.
if (LastPartialDef) {
LastPartialDef->addOperand(MachineOperand::CreateReg(Reg, true/*IsDef*/,
true/*IsImp*/));
PhysRegDef[Reg] = LastPartialDef;
SmallSet<unsigned, 8> Processed;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (Processed.count(SubReg))
continue;
if (PartDefRegs.count(SubReg))
continue;
// This part of Reg was defined before the last partial def. It's killed
// here.
LastPartialDef->addOperand(MachineOperand::CreateReg(SubReg,
false/*IsDef*/,
true/*IsImp*/));
PhysRegDef[SubReg] = LastPartialDef;
for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
Processed.insert(*SS);
}
}
} else if (LastDef && !PhysRegUse[Reg] &&
!LastDef->findRegisterDefOperand(Reg))
// Last def defines the super register, add an implicit def of reg.
LastDef->addOperand(MachineOperand::CreateReg(Reg, true/*IsDef*/,
true/*IsImp*/));
// Remember this use.
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
PhysRegUse[*SubRegs] = MI;
}
/// FindLastRefOrPartRef - Return the last reference or partial reference of
/// the specified register.
MachineInstr *LiveVariables::FindLastRefOrPartRef(unsigned Reg) {
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
return nullptr;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
unsigned LastPartDefDist = 0;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
// There was a def of this sub-register in between. This is a partial
// def, keep track of the last one.
unsigned Dist = DistanceMap[Def];
if (Dist > LastPartDefDist)
LastPartDefDist = Dist;
} else if (MachineInstr *Use = PhysRegUse[SubReg]) {
unsigned Dist = DistanceMap[Use];
if (Dist > LastRefOrPartRefDist) {
LastRefOrPartRefDist = Dist;
LastRefOrPartRef = Use;
}
}
}
return LastRefOrPartRef;
}
bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
return false;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
// The whole register is used.
// AL =
// AH =
//
// = AX
// = AL, AX<imp-use, kill>
// AX =
//
// Or whole register is defined, but not used at all.
// AX<dead> =
// ...
// AX =
//
// Or whole register is defined, but only partly used.
// AX<dead> = AL<imp-def>
// = AL<kill>
// AX =
MachineInstr *LastPartDef = nullptr;
unsigned LastPartDefDist = 0;
SmallSet<unsigned, 8> PartUses;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
// There was a def of this sub-register in between. This is a partial
// def, keep track of the last one.
unsigned Dist = DistanceMap[Def];
if (Dist > LastPartDefDist) {
LastPartDefDist = Dist;
LastPartDef = Def;
}
continue;
}
if (MachineInstr *Use = PhysRegUse[SubReg]) {
for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true); SS.isValid();
++SS)
PartUses.insert(*SS);
unsigned Dist = DistanceMap[Use];
if (Dist > LastRefOrPartRefDist) {
LastRefOrPartRefDist = Dist;
LastRefOrPartRef = Use;
}
}
}
if (!PhysRegUse[Reg]) {
// Partial uses. Mark register def dead and add implicit def of
// sub-registers which are used.
// EAX<dead> = op AL<imp-def>
// That is, EAX def is dead but AL def extends pass it.
PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true);
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (!PartUses.count(SubReg))
continue;
bool NeedDef = true;
if (PhysRegDef[Reg] == PhysRegDef[SubReg]) {
MachineOperand *MO = PhysRegDef[Reg]->findRegisterDefOperand(SubReg);
if (MO) {
NeedDef = false;
assert(!MO->isDead());
}
}
if (NeedDef)
PhysRegDef[Reg]->addOperand(MachineOperand::CreateReg(SubReg,
true/*IsDef*/, true/*IsImp*/));
MachineInstr *LastSubRef = FindLastRefOrPartRef(SubReg);
if (LastSubRef)
LastSubRef->addRegisterKilled(SubReg, TRI, true);
else {
LastRefOrPartRef->addRegisterKilled(SubReg, TRI, true);
for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true);
SS.isValid(); ++SS)
PhysRegUse[*SS] = LastRefOrPartRef;
}
for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
PartUses.erase(*SS);
}
} else if (LastRefOrPartRef == PhysRegDef[Reg] && LastRefOrPartRef != MI) {
if (LastPartDef)
// The last partial def kills the register.
LastPartDef->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
true/*IsImp*/, true/*IsKill*/));
else {
MachineOperand *MO =
LastRefOrPartRef->findRegisterDefOperand(Reg, false, TRI);
bool NeedEC = MO->isEarlyClobber() && MO->getReg() != Reg;
// If the last reference is the last def, then it's not used at all.
// That is, unless we are currently processing the last reference itself.
LastRefOrPartRef->addRegisterDead(Reg, TRI, true);
if (NeedEC) {
// If we are adding a subreg def and the superreg def is marked early
// clobber, add an early clobber marker to the subreg def.
MO = LastRefOrPartRef->findRegisterDefOperand(Reg);
if (MO)
MO->setIsEarlyClobber();
}
}
} else
LastRefOrPartRef->addRegisterKilled(Reg, TRI, true);
return true;
}
void LiveVariables::HandleRegMask(const MachineOperand &MO) {
// Call HandlePhysRegKill() for all live registers clobbered by Mask.
// Clobbered registers are always dead, sp there is no need to use
// HandlePhysRegDef().
for (unsigned Reg = 1, NumRegs = TRI->getNumRegs(); Reg != NumRegs; ++Reg) {
// Skip dead regs.
if (!PhysRegDef[Reg] && !PhysRegUse[Reg])
continue;
// Skip mask-preserved regs.
if (!MO.clobbersPhysReg(Reg))
continue;
// Kill the largest clobbered super-register.
// This avoids needless implicit operands.
unsigned Super = Reg;
for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
if ((PhysRegDef[*SR] || PhysRegUse[*SR]) && MO.clobbersPhysReg(*SR))
Super = *SR;
HandlePhysRegKill(Super, nullptr);
}
}
void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
SmallVectorImpl<unsigned> &Defs) {
// What parts of the register are previously defined?
SmallSet<unsigned, 32> Live;
if (PhysRegDef[Reg] || PhysRegUse[Reg]) {
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
Live.insert(*SubRegs);
} else {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
// If a register isn't itself defined, but all parts that make up of it
// are defined, then consider it also defined.
// e.g.
// AL =
// AH =
// = AX
if (Live.count(SubReg))
continue;
if (PhysRegDef[SubReg] || PhysRegUse[SubReg]) {
for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true);
SS.isValid(); ++SS)
Live.insert(*SS);
}
}
}
// Start from the largest piece, find the last time any part of the register
// is referenced.
HandlePhysRegKill(Reg, MI);
// Only some of the sub-registers are used.
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (!Live.count(SubReg))
// Skip if this sub-register isn't defined.
continue;
HandlePhysRegKill(SubReg, MI);
}
if (MI)
Defs.push_back(Reg); // Remember this def.
}
void LiveVariables::UpdatePhysRegDefs(MachineInstr *MI,
SmallVectorImpl<unsigned> &Defs) {
while (!Defs.empty()) {
unsigned Reg = Defs.back();
Defs.pop_back();
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
PhysRegDef[SubReg] = MI;
PhysRegUse[SubReg] = nullptr;
}
}
}
void LiveVariables::runOnInstr(MachineInstr *MI,
SmallVectorImpl<unsigned> &Defs) {
assert(!MI->isDebugValue());
// Process all of the operands of the instruction...
unsigned NumOperandsToProcess = MI->getNumOperands();
// Unless it is a PHI node. In this case, ONLY process the DEF, not any
// of the uses. They will be handled in other basic blocks.
if (MI->isPHI())
NumOperandsToProcess = 1;
// Clear kill and dead markers. LV will recompute them.
SmallVector<unsigned, 4> UseRegs;
SmallVector<unsigned, 4> DefRegs;
SmallVector<unsigned, 1> RegMasks;
for (unsigned i = 0; i != NumOperandsToProcess; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask()) {
RegMasks.push_back(i);
continue;
}
if (!MO.isReg() || MO.getReg() == 0)
continue;
unsigned MOReg = MO.getReg();
if (MO.isUse()) {
MO.setIsKill(false);
if (MO.readsReg())
UseRegs.push_back(MOReg);
} else /*MO.isDef()*/ {
MO.setIsDead(false);
DefRegs.push_back(MOReg);
}
}
MachineBasicBlock *MBB = MI->getParent();
// Process all uses.
for (unsigned i = 0, e = UseRegs.size(); i != e; ++i) {
unsigned MOReg = UseRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
HandleVirtRegUse(MOReg, MBB, MI);
else if (!MRI->isReserved(MOReg))
HandlePhysRegUse(MOReg, MI);
}
// Process all masked registers. (Call clobbers).
for (unsigned i = 0, e = RegMasks.size(); i != e; ++i)
HandleRegMask(MI->getOperand(RegMasks[i]));
// Process all defs.
for (unsigned i = 0, e = DefRegs.size(); i != e; ++i) {
unsigned MOReg = DefRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
HandleVirtRegDef(MOReg, MI);
else if (!MRI->isReserved(MOReg))
HandlePhysRegDef(MOReg, MI, Defs);
}
UpdatePhysRegDefs(MI, Defs);
}
void LiveVariables::runOnBlock(MachineBasicBlock *MBB, const unsigned NumRegs) {
// Mark live-in registers as live-in.
SmallVector<unsigned, 4> Defs;
for (MachineBasicBlock::livein_iterator II = MBB->livein_begin(),
EE = MBB->livein_end(); II != EE; ++II) {
assert(TargetRegisterInfo::isPhysicalRegister(*II) &&
"Cannot have a live-in virtual register!");
HandlePhysRegDef(*II, nullptr, Defs);
}
// Loop over all of the instructions, processing them.
DistanceMap.clear();
unsigned Dist = 0;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ++I) {
MachineInstr *MI = I;
if (MI->isDebugValue())
continue;
DistanceMap.insert(std::make_pair(MI, Dist++));
runOnInstr(MI, Defs);
}
// Handle any virtual assignments from PHI nodes which might be at the
// bottom of this basic block. We check all of our successor blocks to see
// if they have PHI nodes, and if so, we simulate an assignment at the end
// of the current block.
if (!PHIVarInfo[MBB->getNumber()].empty()) {
SmallVectorImpl<unsigned> &VarInfoVec = PHIVarInfo[MBB->getNumber()];
for (SmallVectorImpl<unsigned>::iterator I = VarInfoVec.begin(),
E = VarInfoVec.end(); I != E; ++I)
// Mark it alive only in the block we are representing.
MarkVirtRegAliveInBlock(getVarInfo(*I),MRI->getVRegDef(*I)->getParent(),
MBB);
}
// MachineCSE may CSE instructions which write to non-allocatable physical
// registers across MBBs. Remember if any reserved register is liveout.
SmallSet<unsigned, 4> LiveOuts;
for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end(); SI != SE; ++SI) {
MachineBasicBlock *SuccMBB = *SI;
if (SuccMBB->isLandingPad())
continue;
for (MachineBasicBlock::livein_iterator LI = SuccMBB->livein_begin(),
LE = SuccMBB->livein_end(); LI != LE; ++LI) {
unsigned LReg = *LI;
if (!TRI->isInAllocatableClass(LReg))
// Ignore other live-ins, e.g. those that are live into landing pads.
LiveOuts.insert(LReg);
}
}
// Loop over PhysRegDef / PhysRegUse, killing any registers that are
// available at the end of the basic block.
for (unsigned i = 0; i != NumRegs; ++i)
if ((PhysRegDef[i] || PhysRegUse[i]) && !LiveOuts.count(i))
HandlePhysRegDef(i, nullptr, Defs);
}
bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MRI = &mf.getRegInfo();
TRI = MF->getSubtarget().getRegisterInfo();
const unsigned NumRegs = TRI->getNumRegs();
PhysRegDef.assign(NumRegs, nullptr);
PhysRegUse.assign(NumRegs, nullptr);
PHIVarInfo.resize(MF->getNumBlockIDs());
PHIJoins.clear();
// FIXME: LiveIntervals will be updated to remove its dependence on
// LiveVariables to improve compilation time and eliminate bizarre pass
// dependencies. Until then, we can't change much in -O0.
if (!MRI->isSSA())
report_fatal_error("regalloc=... not currently supported with -O0");
analyzePHINodes(mf);
// Calculate live variable information in depth first order on the CFG of the
// function. This guarantees that we will see the definition of a virtual
// register before its uses due to dominance properties of SSA (except for PHI
// nodes, which are treated as a special case).
MachineBasicBlock *Entry = MF->begin();
SmallPtrSet<MachineBasicBlock*,16> Visited;
for (MachineBasicBlock *MBB : depth_first_ext(Entry, Visited)) {
runOnBlock(MBB, NumRegs);
PhysRegDef.assign(NumRegs, nullptr);
PhysRegUse.assign(NumRegs, nullptr);
}
// Convert and transfer the dead / killed information we have gathered into
// VirtRegInfo onto MI's.
for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i) {
const unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
for (unsigned j = 0, e2 = VirtRegInfo[Reg].Kills.size(); j != e2; ++j)
if (VirtRegInfo[Reg].Kills[j] == MRI->getVRegDef(Reg))
VirtRegInfo[Reg].Kills[j]->addRegisterDead(Reg, TRI);
else
VirtRegInfo[Reg].Kills[j]->addRegisterKilled(Reg, TRI);
}
// Check to make sure there are no unreachable blocks in the MC CFG for the
// function. If so, it is due to a bug in the instruction selector or some
// other part of the code generator if this happens.
#ifndef NDEBUG
for(MachineFunction::iterator i = MF->begin(), e = MF->end(); i != e; ++i)
assert(Visited.count(&*i) != 0 && "unreachable basic block found");
#endif
PhysRegDef.clear();
PhysRegUse.clear();
PHIVarInfo.clear();
return false;
}
/// replaceKillInstruction - Update register kill info by replacing a kill
/// instruction with a new one.
void LiveVariables::replaceKillInstruction(unsigned Reg, MachineInstr *OldMI,
MachineInstr *NewMI) {
VarInfo &VI = getVarInfo(Reg);
std::replace(VI.Kills.begin(), VI.Kills.end(), OldMI, NewMI);
}
/// removeVirtualRegistersKilled - Remove all killed info for the specified
/// instruction.
void LiveVariables::removeVirtualRegistersKilled(MachineInstr *MI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isKill()) {
MO.setIsKill(false);
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
bool removed = getVarInfo(Reg).removeKill(MI);
assert(removed && "kill not in register's VarInfo?");
(void)removed;
}
}
}
}
/// analyzePHINodes - Gather information about the PHI nodes in here. In
/// particular, we want to map the variable information of a virtual register
/// which is used in a PHI node. We map that to the BB the vreg is coming from.
///
void LiveVariables::analyzePHINodes(const MachineFunction& Fn) {
for (const auto &MBB : Fn)
for (const auto &BBI : MBB) {
if (!BBI.isPHI())
break;
for (unsigned i = 1, e = BBI.getNumOperands(); i != e; i += 2)
if (BBI.getOperand(i).readsReg())
PHIVarInfo[BBI.getOperand(i + 1).getMBB()->getNumber()]
.push_back(BBI.getOperand(i).getReg());
}
}
bool LiveVariables::VarInfo::isLiveIn(const MachineBasicBlock &MBB,
unsigned Reg,
MachineRegisterInfo &MRI) {
unsigned Num = MBB.getNumber();
// Reg is live-through.
if (AliveBlocks.test(Num))
return true;
// Registers defined in MBB cannot be live in.
const MachineInstr *Def = MRI.getVRegDef(Reg);
if (Def && Def->getParent() == &MBB)
return false;
// Reg was not defined in MBB, was it killed here?
return findKill(&MBB);
}
bool LiveVariables::isLiveOut(unsigned Reg, const MachineBasicBlock &MBB) {
LiveVariables::VarInfo &VI = getVarInfo(Reg);
SmallPtrSet<const MachineBasicBlock *, 8> Kills;
for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
Kills.insert(VI.Kills[i]->getParent());
// Loop over all of the successors of the basic block, checking to see if
// the value is either live in the block, or if it is killed in the block.
for (const MachineBasicBlock *SuccMBB : MBB.successors()) {
// Is it alive in this successor?
unsigned SuccIdx = SuccMBB->getNumber();
if (VI.AliveBlocks.test(SuccIdx))
return true;
// Or is it live because there is a use in a successor that kills it?
if (Kills.count(SuccMBB))
return true;
}
return false;
}
/// addNewBlock - Add a new basic block BB as an empty succcessor to DomBB. All
/// variables that are live out of DomBB will be marked as passing live through
/// BB.
void LiveVariables::addNewBlock(MachineBasicBlock *BB,
MachineBasicBlock *DomBB,
MachineBasicBlock *SuccBB) {
const unsigned NumNew = BB->getNumber();
SmallSet<unsigned, 16> Defs, Kills;
MachineBasicBlock::iterator BBI = SuccBB->begin(), BBE = SuccBB->end();
for (; BBI != BBE && BBI->isPHI(); ++BBI) {
// Record the def of the PHI node.
Defs.insert(BBI->getOperand(0).getReg());
// All registers used by PHI nodes in SuccBB must be live through BB.
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2)
if (BBI->getOperand(i+1).getMBB() == BB)
getVarInfo(BBI->getOperand(i).getReg()).AliveBlocks.set(NumNew);
}
// Record all vreg defs and kills of all instructions in SuccBB.
for (; BBI != BBE; ++BBI) {
for (MachineInstr::mop_iterator I = BBI->operands_begin(),
E = BBI->operands_end(); I != E; ++I) {
if (I->isReg() && TargetRegisterInfo::isVirtualRegister(I->getReg())) {
if (I->isDef())
Defs.insert(I->getReg());
else if (I->isKill())
Kills.insert(I->getReg());
}
}
}
// Update info for all live variables
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
// If the Defs is defined in the successor it can't be live in BB.
if (Defs.count(Reg))
continue;
// If the register is either killed in or live through SuccBB it's also live
// through BB.
VarInfo &VI = getVarInfo(Reg);
if (Kills.count(Reg) || VI.AliveBlocks.test(SuccBB->getNumber()))
VI.AliveBlocks.set(NumNew);
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineBlockPlacement.cpp | //===-- MachineBlockPlacement.cpp - Basic Block Code Layout optimization --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements basic block placement transformations using the CFG
// structure and branch probability estimates.
//
// The pass strives to preserve the structure of the CFG (that is, retain
// a topological ordering of basic blocks) in the absence of a *strong* signal
// to the contrary from probabilities. However, within the CFG structure, it
// attempts to choose an ordering which favors placing more likely sequences of
// blocks adjacent to each other.
//
// The algorithm works from the inner-most loop within a function outward, and
// at each stage walks through the basic blocks, trying to coalesce them into
// sequential chains where allowed by the CFG (or demanded by heavy
// probabilities). Finally, it walks the blocks in topological order, and the
// first time it reaches a chain of basic blocks, it schedules them in the
// function in-order.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "block-placement"
STATISTIC(NumCondBranches, "Number of conditional branches");
STATISTIC(NumUncondBranches, "Number of uncondittional branches");
STATISTIC(CondBranchTakenFreq,
"Potential frequency of taking conditional branches");
STATISTIC(UncondBranchTakenFreq,
"Potential frequency of taking unconditional branches");
static cl::opt<unsigned> AlignAllBlock("align-all-blocks",
cl::desc("Force the alignment of all "
"blocks in the function."),
cl::init(0), cl::Hidden);
// FIXME: Find a good default for this flag and remove the flag.
static cl::opt<unsigned> ExitBlockBias(
"block-placement-exit-block-bias",
cl::desc("Block frequency percentage a loop exit block needs "
"over the original exit to be considered the new exit."),
cl::init(0), cl::Hidden);
static cl::opt<bool> OutlineOptionalBranches(
"outline-optional-branches",
cl::desc("Put completely optional branches, i.e. branches with a common "
"post dominator, out of line."),
cl::init(false), cl::Hidden);
static cl::opt<unsigned> OutlineOptionalThreshold(
"outline-optional-threshold",
cl::desc("Don't outline optional branches that are a single block with an "
"instruction count below this threshold"),
cl::init(4), cl::Hidden);
namespace {
class BlockChain;
/// \brief Type for our function-wide basic block -> block chain mapping.
typedef DenseMap<MachineBasicBlock *, BlockChain *> BlockToChainMapType;
}
namespace {
/// \brief A chain of blocks which will be laid out contiguously.
///
/// This is the datastructure representing a chain of consecutive blocks that
/// are profitable to layout together in order to maximize fallthrough
/// probabilities and code locality. We also can use a block chain to represent
/// a sequence of basic blocks which have some external (correctness)
/// requirement for sequential layout.
///
/// Chains can be built around a single basic block and can be merged to grow
/// them. They participate in a block-to-chain mapping, which is updated
/// automatically as chains are merged together.
class BlockChain {
/// \brief The sequence of blocks belonging to this chain.
///
/// This is the sequence of blocks for a particular chain. These will be laid
/// out in-order within the function.
SmallVector<MachineBasicBlock *, 4> Blocks;
/// \brief A handle to the function-wide basic block to block chain mapping.
///
/// This is retained in each block chain to simplify the computation of child
/// block chains for SCC-formation and iteration. We store the edges to child
/// basic blocks, and map them back to their associated chains using this
/// structure.
BlockToChainMapType &BlockToChain;
public:
/// \brief Construct a new BlockChain.
///
/// This builds a new block chain representing a single basic block in the
/// function. It also registers itself as the chain that block participates
/// in with the BlockToChain mapping.
BlockChain(BlockToChainMapType &BlockToChain, MachineBasicBlock *BB)
: Blocks(1, BB), BlockToChain(BlockToChain), LoopPredecessors(0) {
assert(BB && "Cannot create a chain with a null basic block");
BlockToChain[BB] = this;
}
/// \brief Iterator over blocks within the chain.
typedef SmallVectorImpl<MachineBasicBlock *>::iterator iterator;
/// \brief Beginning of blocks within the chain.
iterator begin() { return Blocks.begin(); }
/// \brief End of blocks within the chain.
iterator end() { return Blocks.end(); }
/// \brief Merge a block chain into this one.
///
/// This routine merges a block chain into this one. It takes care of forming
/// a contiguous sequence of basic blocks, updating the edge list, and
/// updating the block -> chain mapping. It does not free or tear down the
/// old chain, but the old chain's block list is no longer valid.
void merge(MachineBasicBlock *BB, BlockChain *Chain) {
assert(BB);
assert(!Blocks.empty());
// Fast path in case we don't have a chain already.
if (!Chain) {
assert(!BlockToChain[BB]);
Blocks.push_back(BB);
BlockToChain[BB] = this;
return;
}
assert(BB == *Chain->begin());
assert(Chain->begin() != Chain->end());
// Update the incoming blocks to point to this chain, and add them to the
// chain structure.
for (MachineBasicBlock *ChainBB : *Chain) {
Blocks.push_back(ChainBB);
assert(BlockToChain[ChainBB] == Chain && "Incoming blocks not in chain");
BlockToChain[ChainBB] = this;
}
}
#ifndef NDEBUG
/// \brief Dump the blocks in this chain.
LLVM_DUMP_METHOD void dump() {
for (MachineBasicBlock *MBB : *this)
MBB->dump();
}
#endif // NDEBUG
/// \brief Count of predecessors within the loop currently being processed.
///
/// This count is updated at each loop we process to represent the number of
/// in-loop predecessors of this chain.
unsigned LoopPredecessors;
};
}
namespace {
class MachineBlockPlacement : public MachineFunctionPass {
/// \brief A typedef for a block filter set.
typedef SmallPtrSet<MachineBasicBlock *, 16> BlockFilterSet;
/// \brief A handle to the branch probability pass.
const MachineBranchProbabilityInfo *MBPI;
/// \brief A handle to the function-wide block frequency pass.
const MachineBlockFrequencyInfo *MBFI;
/// \brief A handle to the loop info.
const MachineLoopInfo *MLI;
/// \brief A handle to the target's instruction info.
const TargetInstrInfo *TII;
/// \brief A handle to the target's lowering info.
const TargetLoweringBase *TLI;
/// \brief A handle to the post dominator tree.
MachineDominatorTree *MDT;
/// \brief A set of blocks that are unavoidably execute, i.e. they dominate
/// all terminators of the MachineFunction.
SmallPtrSet<MachineBasicBlock *, 4> UnavoidableBlocks;
/// \brief Allocator and owner of BlockChain structures.
///
/// We build BlockChains lazily while processing the loop structure of
/// a function. To reduce malloc traffic, we allocate them using this
/// slab-like allocator, and destroy them after the pass completes. An
/// important guarantee is that this allocator produces stable pointers to
/// the chains.
SpecificBumpPtrAllocator<BlockChain> ChainAllocator;
/// \brief Function wide BasicBlock to BlockChain mapping.
///
/// This mapping allows efficiently moving from any given basic block to the
/// BlockChain it participates in, if any. We use it to, among other things,
/// allow implicitly defining edges between chains as the existing edges
/// between basic blocks.
DenseMap<MachineBasicBlock *, BlockChain *> BlockToChain;
void markChainSuccessors(BlockChain &Chain, MachineBasicBlock *LoopHeaderBB,
SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
const BlockFilterSet *BlockFilter = nullptr);
MachineBasicBlock *selectBestSuccessor(MachineBasicBlock *BB,
BlockChain &Chain,
const BlockFilterSet *BlockFilter);
MachineBasicBlock *
selectBestCandidateBlock(BlockChain &Chain,
SmallVectorImpl<MachineBasicBlock *> &WorkList,
const BlockFilterSet *BlockFilter);
MachineBasicBlock *
getFirstUnplacedBlock(MachineFunction &F, const BlockChain &PlacedChain,
MachineFunction::iterator &PrevUnplacedBlockIt,
const BlockFilterSet *BlockFilter);
void buildChain(MachineBasicBlock *BB, BlockChain &Chain,
SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
const BlockFilterSet *BlockFilter = nullptr);
MachineBasicBlock *findBestLoopTop(MachineLoop &L,
const BlockFilterSet &LoopBlockSet);
MachineBasicBlock *findBestLoopExit(MachineFunction &F, MachineLoop &L,
const BlockFilterSet &LoopBlockSet);
void buildLoopChains(MachineFunction &F, MachineLoop &L);
void rotateLoop(BlockChain &LoopChain, MachineBasicBlock *ExitingBB,
const BlockFilterSet &LoopBlockSet);
void buildCFGChains(MachineFunction &F);
public:
static char ID; // Pass identification, replacement for typeid
MachineBlockPlacement() : MachineFunctionPass(ID) {
initializeMachineBlockPlacementPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
}
char MachineBlockPlacement::ID = 0;
char &llvm::MachineBlockPlacementID = MachineBlockPlacement::ID;
INITIALIZE_PASS_BEGIN(MachineBlockPlacement, "block-placement",
"Branch Probability Basic Block Placement", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(MachineBlockPlacement, "block-placement",
"Branch Probability Basic Block Placement", false, false)
#ifndef NDEBUG
/// \brief Helper to print the name of a MBB.
///
/// Only used by debug logging.
static std::string getBlockName(MachineBasicBlock *BB) {
std::string Result;
raw_string_ostream OS(Result);
OS << "BB#" << BB->getNumber();
OS << " (derived from LLVM BB '" << BB->getName() << "')";
OS.flush();
return Result;
}
/// \brief Helper to print the number of a MBB.
///
/// Only used by debug logging.
static std::string getBlockNum(MachineBasicBlock *BB) {
std::string Result;
raw_string_ostream OS(Result);
OS << "BB#" << BB->getNumber();
OS.flush();
return Result;
}
#endif
/// \brief Mark a chain's successors as having one fewer preds.
///
/// When a chain is being merged into the "placed" chain, this routine will
/// quickly walk the successors of each block in the chain and mark them as
/// having one fewer active predecessor. It also adds any successors of this
/// chain which reach the zero-predecessor state to the worklist passed in.
void MachineBlockPlacement::markChainSuccessors(
BlockChain &Chain, MachineBasicBlock *LoopHeaderBB,
SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
const BlockFilterSet *BlockFilter) {
// Walk all the blocks in this chain, marking their successors as having
// a predecessor placed.
for (MachineBasicBlock *MBB : Chain) {
// Add any successors for which this is the only un-placed in-loop
// predecessor to the worklist as a viable candidate for CFG-neutral
// placement. No subsequent placement of this block will violate the CFG
// shape, so we get to use heuristics to choose a favorable placement.
for (MachineBasicBlock *Succ : MBB->successors()) {
if (BlockFilter && !BlockFilter->count(Succ))
continue;
BlockChain &SuccChain = *BlockToChain[Succ];
// Disregard edges within a fixed chain, or edges to the loop header.
if (&Chain == &SuccChain || Succ == LoopHeaderBB)
continue;
// This is a cross-chain edge that is within the loop, so decrement the
// loop predecessor count of the destination chain.
if (SuccChain.LoopPredecessors > 0 && --SuccChain.LoopPredecessors == 0)
BlockWorkList.push_back(*SuccChain.begin());
}
}
}
/// \brief Select the best successor for a block.
///
/// This looks across all successors of a particular block and attempts to
/// select the "best" one to be the layout successor. It only considers direct
/// successors which also pass the block filter. It will attempt to avoid
/// breaking CFG structure, but cave and break such structures in the case of
/// very hot successor edges.
///
/// \returns The best successor block found, or null if none are viable.
MachineBasicBlock *
MachineBlockPlacement::selectBestSuccessor(MachineBasicBlock *BB,
BlockChain &Chain,
const BlockFilterSet *BlockFilter) {
const BranchProbability HotProb(4, 5); // 80%
MachineBasicBlock *BestSucc = nullptr;
// FIXME: Due to the performance of the probability and weight routines in
// the MBPI analysis, we manually compute probabilities using the edge
// weights. This is suboptimal as it means that the somewhat subtle
// definition of edge weight semantics is encoded here as well. We should
// improve the MBPI interface to efficiently support query patterns such as
// this.
uint32_t BestWeight = 0;
uint32_t WeightScale = 0;
uint32_t SumWeight = MBPI->getSumForBlock(BB, WeightScale);
DEBUG(dbgs() << "Attempting merge from: " << getBlockName(BB) << "\n");
for (MachineBasicBlock *Succ : BB->successors()) {
if (BlockFilter && !BlockFilter->count(Succ))
continue;
BlockChain &SuccChain = *BlockToChain[Succ];
if (&SuccChain == &Chain) {
DEBUG(dbgs() << " " << getBlockName(Succ) << " -> Already merged!\n");
continue;
}
if (Succ != *SuccChain.begin()) {
DEBUG(dbgs() << " " << getBlockName(Succ) << " -> Mid chain!\n");
continue;
}
uint32_t SuccWeight = MBPI->getEdgeWeight(BB, Succ);
BranchProbability SuccProb(SuccWeight / WeightScale, SumWeight);
// If we outline optional branches, look whether Succ is unavoidable, i.e.
// dominates all terminators of the MachineFunction. If it does, other
// successors must be optional. Don't do this for cold branches.
if (OutlineOptionalBranches && SuccProb > HotProb.getCompl() &&
UnavoidableBlocks.count(Succ) > 0) {
auto HasShortOptionalBranch = [&]() {
for (MachineBasicBlock *Pred : Succ->predecessors()) {
// Check whether there is an unplaced optional branch.
if (Pred == Succ || (BlockFilter && !BlockFilter->count(Pred)) ||
BlockToChain[Pred] == &Chain)
continue;
// Check whether the optional branch has exactly one BB.
if (Pred->pred_size() > 1 || *Pred->pred_begin() != BB)
continue;
// Check whether the optional branch is small.
if (Pred->size() < OutlineOptionalThreshold)
return true;
}
return false;
};
if (!HasShortOptionalBranch())
return Succ;
}
// Only consider successors which are either "hot", or wouldn't violate
// any CFG constraints.
if (SuccChain.LoopPredecessors != 0) {
if (SuccProb < HotProb) {
DEBUG(dbgs() << " " << getBlockName(Succ) << " -> " << SuccProb
<< " (prob) (CFG conflict)\n");
continue;
}
// Make sure that a hot successor doesn't have a globally more
// important predecessor.
BlockFrequency CandidateEdgeFreq =
MBFI->getBlockFreq(BB) * SuccProb * HotProb.getCompl();
bool BadCFGConflict = false;
for (MachineBasicBlock *Pred : Succ->predecessors()) {
if (Pred == Succ || (BlockFilter && !BlockFilter->count(Pred)) ||
BlockToChain[Pred] == &Chain)
continue;
BlockFrequency PredEdgeFreq =
MBFI->getBlockFreq(Pred) * MBPI->getEdgeProbability(Pred, Succ);
if (PredEdgeFreq >= CandidateEdgeFreq) {
BadCFGConflict = true;
break;
}
}
if (BadCFGConflict) {
DEBUG(dbgs() << " " << getBlockName(Succ) << " -> " << SuccProb
<< " (prob) (non-cold CFG conflict)\n");
continue;
}
}
DEBUG(dbgs() << " " << getBlockName(Succ) << " -> " << SuccProb
<< " (prob)"
<< (SuccChain.LoopPredecessors != 0 ? " (CFG break)" : "")
<< "\n");
if (BestSucc && BestWeight >= SuccWeight)
continue;
BestSucc = Succ;
BestWeight = SuccWeight;
}
return BestSucc;
}
/// \brief Select the best block from a worklist.
///
/// This looks through the provided worklist as a list of candidate basic
/// blocks and select the most profitable one to place. The definition of
/// profitable only really makes sense in the context of a loop. This returns
/// the most frequently visited block in the worklist, which in the case of
/// a loop, is the one most desirable to be physically close to the rest of the
/// loop body in order to improve icache behavior.
///
/// \returns The best block found, or null if none are viable.
MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock(
BlockChain &Chain, SmallVectorImpl<MachineBasicBlock *> &WorkList,
const BlockFilterSet *BlockFilter) {
// Once we need to walk the worklist looking for a candidate, cleanup the
// worklist of already placed entries.
// FIXME: If this shows up on profiles, it could be folded (at the cost of
// some code complexity) into the loop below.
WorkList.erase(std::remove_if(WorkList.begin(), WorkList.end(),
[&](MachineBasicBlock *BB) {
return BlockToChain.lookup(BB) == &Chain;
}),
WorkList.end());
MachineBasicBlock *BestBlock = nullptr;
BlockFrequency BestFreq;
for (MachineBasicBlock *MBB : WorkList) {
BlockChain &SuccChain = *BlockToChain[MBB];
if (&SuccChain == &Chain) {
DEBUG(dbgs() << " " << getBlockName(MBB) << " -> Already merged!\n");
continue;
}
assert(SuccChain.LoopPredecessors == 0 && "Found CFG-violating block");
BlockFrequency CandidateFreq = MBFI->getBlockFreq(MBB);
DEBUG(dbgs() << " " << getBlockName(MBB) << " -> ";
MBFI->printBlockFreq(dbgs(), CandidateFreq) << " (freq)\n");
if (BestBlock && BestFreq >= CandidateFreq)
continue;
BestBlock = MBB;
BestFreq = CandidateFreq;
}
return BestBlock;
}
/// \brief Retrieve the first unplaced basic block.
///
/// This routine is called when we are unable to use the CFG to walk through
/// all of the basic blocks and form a chain due to unnatural loops in the CFG.
/// We walk through the function's blocks in order, starting from the
/// LastUnplacedBlockIt. We update this iterator on each call to avoid
/// re-scanning the entire sequence on repeated calls to this routine.
MachineBasicBlock *MachineBlockPlacement::getFirstUnplacedBlock(
MachineFunction &F, const BlockChain &PlacedChain,
MachineFunction::iterator &PrevUnplacedBlockIt,
const BlockFilterSet *BlockFilter) {
for (MachineFunction::iterator I = PrevUnplacedBlockIt, E = F.end(); I != E;
++I) {
if (BlockFilter && !BlockFilter->count(I))
continue;
if (BlockToChain[I] != &PlacedChain) {
PrevUnplacedBlockIt = I;
// Now select the head of the chain to which the unplaced block belongs
// as the block to place. This will force the entire chain to be placed,
// and satisfies the requirements of merging chains.
return *BlockToChain[I]->begin();
}
}
return nullptr;
}
void MachineBlockPlacement::buildChain(
MachineBasicBlock *BB, BlockChain &Chain,
SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
const BlockFilterSet *BlockFilter) {
assert(BB);
assert(BlockToChain[BB] == &Chain);
MachineFunction &F = *BB->getParent();
MachineFunction::iterator PrevUnplacedBlockIt = F.begin();
MachineBasicBlock *LoopHeaderBB = BB;
markChainSuccessors(Chain, LoopHeaderBB, BlockWorkList, BlockFilter);
BB = *std::prev(Chain.end());
for (;;) {
assert(BB);
assert(BlockToChain[BB] == &Chain);
assert(*std::prev(Chain.end()) == BB);
// Look for the best viable successor if there is one to place immediately
// after this block.
MachineBasicBlock *BestSucc = selectBestSuccessor(BB, Chain, BlockFilter);
// If an immediate successor isn't available, look for the best viable
// block among those we've identified as not violating the loop's CFG at
// this point. This won't be a fallthrough, but it will increase locality.
if (!BestSucc)
BestSucc = selectBestCandidateBlock(Chain, BlockWorkList, BlockFilter);
if (!BestSucc) {
BestSucc =
getFirstUnplacedBlock(F, Chain, PrevUnplacedBlockIt, BlockFilter);
if (!BestSucc)
break;
DEBUG(dbgs() << "Unnatural loop CFG detected, forcibly merging the "
"layout successor until the CFG reduces\n");
}
// Place this block, updating the datastructures to reflect its placement.
BlockChain &SuccChain = *BlockToChain[BestSucc];
// Zero out LoopPredecessors for the successor we're about to merge in case
// we selected a successor that didn't fit naturally into the CFG.
SuccChain.LoopPredecessors = 0;
DEBUG(dbgs() << "Merging from " << getBlockNum(BB) << " to "
<< getBlockNum(BestSucc) << "\n");
markChainSuccessors(SuccChain, LoopHeaderBB, BlockWorkList, BlockFilter);
Chain.merge(BestSucc, &SuccChain);
BB = *std::prev(Chain.end());
}
DEBUG(dbgs() << "Finished forming chain for header block "
<< getBlockNum(*Chain.begin()) << "\n");
}
/// \brief Find the best loop top block for layout.
///
/// Look for a block which is strictly better than the loop header for laying
/// out at the top of the loop. This looks for one and only one pattern:
/// a latch block with no conditional exit. This block will cause a conditional
/// jump around it or will be the bottom of the loop if we lay it out in place,
/// but if it it doesn't end up at the bottom of the loop for any reason,
/// rotation alone won't fix it. Because such a block will always result in an
/// unconditional jump (for the backedge) rotating it in front of the loop
/// header is always profitable.
MachineBasicBlock *
MachineBlockPlacement::findBestLoopTop(MachineLoop &L,
const BlockFilterSet &LoopBlockSet) {
// Check that the header hasn't been fused with a preheader block due to
// crazy branches. If it has, we need to start with the header at the top to
// prevent pulling the preheader into the loop body.
BlockChain &HeaderChain = *BlockToChain[L.getHeader()];
if (!LoopBlockSet.count(*HeaderChain.begin()))
return L.getHeader();
DEBUG(dbgs() << "Finding best loop top for: " << getBlockName(L.getHeader())
<< "\n");
BlockFrequency BestPredFreq;
MachineBasicBlock *BestPred = nullptr;
for (MachineBasicBlock *Pred : L.getHeader()->predecessors()) {
if (!LoopBlockSet.count(Pred))
continue;
DEBUG(dbgs() << " header pred: " << getBlockName(Pred) << ", "
<< Pred->succ_size() << " successors, ";
MBFI->printBlockFreq(dbgs(), Pred) << " freq\n");
if (Pred->succ_size() > 1)
continue;
BlockFrequency PredFreq = MBFI->getBlockFreq(Pred);
if (!BestPred || PredFreq > BestPredFreq ||
(!(PredFreq < BestPredFreq) &&
Pred->isLayoutSuccessor(L.getHeader()))) {
BestPred = Pred;
BestPredFreq = PredFreq;
}
}
// If no direct predecessor is fine, just use the loop header.
if (!BestPred)
return L.getHeader();
// Walk backwards through any straight line of predecessors.
while (BestPred->pred_size() == 1 &&
(*BestPred->pred_begin())->succ_size() == 1 &&
*BestPred->pred_begin() != L.getHeader())
BestPred = *BestPred->pred_begin();
DEBUG(dbgs() << " final top: " << getBlockName(BestPred) << "\n");
return BestPred;
}
/// \brief Find the best loop exiting block for layout.
///
/// This routine implements the logic to analyze the loop looking for the best
/// block to layout at the top of the loop. Typically this is done to maximize
/// fallthrough opportunities.
MachineBasicBlock *
MachineBlockPlacement::findBestLoopExit(MachineFunction &F, MachineLoop &L,
const BlockFilterSet &LoopBlockSet) {
// We don't want to layout the loop linearly in all cases. If the loop header
// is just a normal basic block in the loop, we want to look for what block
// within the loop is the best one to layout at the top. However, if the loop
// header has be pre-merged into a chain due to predecessors not having
// analyzable branches, *and* the predecessor it is merged with is *not* part
// of the loop, rotating the header into the middle of the loop will create
// a non-contiguous range of blocks which is Very Bad. So start with the
// header and only rotate if safe.
BlockChain &HeaderChain = *BlockToChain[L.getHeader()];
if (!LoopBlockSet.count(*HeaderChain.begin()))
return nullptr;
BlockFrequency BestExitEdgeFreq;
unsigned BestExitLoopDepth = 0;
MachineBasicBlock *ExitingBB = nullptr;
// If there are exits to outer loops, loop rotation can severely limit
// fallthrough opportunites unless it selects such an exit. Keep a set of
// blocks where rotating to exit with that block will reach an outer loop.
SmallPtrSet<MachineBasicBlock *, 4> BlocksExitingToOuterLoop;
DEBUG(dbgs() << "Finding best loop exit for: " << getBlockName(L.getHeader())
<< "\n");
for (MachineBasicBlock *MBB : L.getBlocks()) {
BlockChain &Chain = *BlockToChain[MBB];
// Ensure that this block is at the end of a chain; otherwise it could be
// mid-way through an inner loop or a successor of an unanalyzable branch.
if (MBB != *std::prev(Chain.end()))
continue;
// Now walk the successors. We need to establish whether this has a viable
// exiting successor and whether it has a viable non-exiting successor.
// We store the old exiting state and restore it if a viable looping
// successor isn't found.
MachineBasicBlock *OldExitingBB = ExitingBB;
BlockFrequency OldBestExitEdgeFreq = BestExitEdgeFreq;
bool HasLoopingSucc = false;
// FIXME: Due to the performance of the probability and weight routines in
// the MBPI analysis, we use the internal weights and manually compute the
// probabilities to avoid quadratic behavior.
uint32_t WeightScale = 0;
uint32_t SumWeight = MBPI->getSumForBlock(MBB, WeightScale);
for (MachineBasicBlock *Succ : MBB->successors()) {
if (Succ->isLandingPad())
continue;
if (Succ == MBB)
continue;
BlockChain &SuccChain = *BlockToChain[Succ];
// Don't split chains, either this chain or the successor's chain.
if (&Chain == &SuccChain) {
DEBUG(dbgs() << " exiting: " << getBlockName(MBB) << " -> "
<< getBlockName(Succ) << " (chain conflict)\n");
continue;
}
uint32_t SuccWeight = MBPI->getEdgeWeight(MBB, Succ);
if (LoopBlockSet.count(Succ)) {
DEBUG(dbgs() << " looping: " << getBlockName(MBB) << " -> "
<< getBlockName(Succ) << " (" << SuccWeight << ")\n");
HasLoopingSucc = true;
continue;
}
unsigned SuccLoopDepth = 0;
if (MachineLoop *ExitLoop = MLI->getLoopFor(Succ)) {
SuccLoopDepth = ExitLoop->getLoopDepth();
if (ExitLoop->contains(&L))
BlocksExitingToOuterLoop.insert(MBB);
}
BranchProbability SuccProb(SuccWeight / WeightScale, SumWeight);
BlockFrequency ExitEdgeFreq = MBFI->getBlockFreq(MBB) * SuccProb;
DEBUG(dbgs() << " exiting: " << getBlockName(MBB) << " -> "
<< getBlockName(Succ) << " [L:" << SuccLoopDepth << "] (";
MBFI->printBlockFreq(dbgs(), ExitEdgeFreq) << ")\n");
// Note that we bias this toward an existing layout successor to retain
// incoming order in the absence of better information. The exit must have
// a frequency higher than the current exit before we consider breaking
// the layout.
BranchProbability Bias(100 - ExitBlockBias, 100);
if (!ExitingBB || SuccLoopDepth > BestExitLoopDepth ||
ExitEdgeFreq > BestExitEdgeFreq ||
(MBB->isLayoutSuccessor(Succ) &&
!(ExitEdgeFreq < BestExitEdgeFreq * Bias))) {
BestExitEdgeFreq = ExitEdgeFreq;
ExitingBB = MBB;
}
}
if (!HasLoopingSucc) {
// Restore the old exiting state, no viable looping successor was found.
ExitingBB = OldExitingBB;
BestExitEdgeFreq = OldBestExitEdgeFreq;
continue;
}
}
// Without a candidate exiting block or with only a single block in the
// loop, just use the loop header to layout the loop.
if (!ExitingBB || L.getNumBlocks() == 1)
return nullptr;
// Also, if we have exit blocks which lead to outer loops but didn't select
// one of them as the exiting block we are rotating toward, disable loop
// rotation altogether.
if (!BlocksExitingToOuterLoop.empty() &&
!BlocksExitingToOuterLoop.count(ExitingBB))
return nullptr;
DEBUG(dbgs() << " Best exiting block: " << getBlockName(ExitingBB) << "\n");
return ExitingBB;
}
/// \brief Attempt to rotate an exiting block to the bottom of the loop.
///
/// Once we have built a chain, try to rotate it to line up the hot exit block
/// with fallthrough out of the loop if doing so doesn't introduce unnecessary
/// branches. For example, if the loop has fallthrough into its header and out
/// of its bottom already, don't rotate it.
void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
MachineBasicBlock *ExitingBB,
const BlockFilterSet &LoopBlockSet) {
if (!ExitingBB)
return;
MachineBasicBlock *Top = *LoopChain.begin();
bool ViableTopFallthrough = false;
for (MachineBasicBlock *Pred : Top->predecessors()) {
BlockChain *PredChain = BlockToChain[Pred];
if (!LoopBlockSet.count(Pred) &&
(!PredChain || Pred == *std::prev(PredChain->end()))) {
ViableTopFallthrough = true;
break;
}
}
// If the header has viable fallthrough, check whether the current loop
// bottom is a viable exiting block. If so, bail out as rotating will
// introduce an unnecessary branch.
if (ViableTopFallthrough) {
MachineBasicBlock *Bottom = *std::prev(LoopChain.end());
for (MachineBasicBlock *Succ : Bottom->successors()) {
BlockChain *SuccChain = BlockToChain[Succ];
if (!LoopBlockSet.count(Succ) &&
(!SuccChain || Succ == *SuccChain->begin()))
return;
}
}
BlockChain::iterator ExitIt =
std::find(LoopChain.begin(), LoopChain.end(), ExitingBB);
if (ExitIt == LoopChain.end())
return;
std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end());
}
/// \brief Forms basic block chains from the natural loop structures.
///
/// These chains are designed to preserve the existing *structure* of the code
/// as much as possible. We can then stitch the chains together in a way which
/// both preserves the topological structure and minimizes taken conditional
/// branches.
void MachineBlockPlacement::buildLoopChains(MachineFunction &F,
MachineLoop &L) {
// First recurse through any nested loops, building chains for those inner
// loops.
for (MachineLoop *InnerLoop : L)
buildLoopChains(F, *InnerLoop);
SmallVector<MachineBasicBlock *, 16> BlockWorkList;
BlockFilterSet LoopBlockSet(L.block_begin(), L.block_end());
// First check to see if there is an obviously preferable top block for the
// loop. This will default to the header, but may end up as one of the
// predecessors to the header if there is one which will result in strictly
// fewer branches in the loop body.
MachineBasicBlock *LoopTop = findBestLoopTop(L, LoopBlockSet);
// If we selected just the header for the loop top, look for a potentially
// profitable exit block in the event that rotating the loop can eliminate
// branches by placing an exit edge at the bottom.
MachineBasicBlock *ExitingBB = nullptr;
if (LoopTop == L.getHeader())
ExitingBB = findBestLoopExit(F, L, LoopBlockSet);
BlockChain &LoopChain = *BlockToChain[LoopTop];
// FIXME: This is a really lame way of walking the chains in the loop: we
// walk the blocks, and use a set to prevent visiting a particular chain
// twice.
SmallPtrSet<BlockChain *, 4> UpdatedPreds;
assert(LoopChain.LoopPredecessors == 0);
UpdatedPreds.insert(&LoopChain);
for (MachineBasicBlock *LoopBB : L.getBlocks()) {
BlockChain &Chain = *BlockToChain[LoopBB];
if (!UpdatedPreds.insert(&Chain).second)
continue;
assert(Chain.LoopPredecessors == 0);
for (MachineBasicBlock *ChainBB : Chain) {
assert(BlockToChain[ChainBB] == &Chain);
for (MachineBasicBlock *Pred : ChainBB->predecessors()) {
if (BlockToChain[Pred] == &Chain || !LoopBlockSet.count(Pred))
continue;
++Chain.LoopPredecessors;
}
}
if (Chain.LoopPredecessors == 0)
BlockWorkList.push_back(*Chain.begin());
}
buildChain(LoopTop, LoopChain, BlockWorkList, &LoopBlockSet);
rotateLoop(LoopChain, ExitingBB, LoopBlockSet);
DEBUG({
// Crash at the end so we get all of the debugging output first.
bool BadLoop = false;
if (LoopChain.LoopPredecessors) {
BadLoop = true;
dbgs() << "Loop chain contains a block without its preds placed!\n"
<< " Loop header: " << getBlockName(*L.block_begin()) << "\n"
<< " Chain header: " << getBlockName(*LoopChain.begin()) << "\n";
}
for (MachineBasicBlock *ChainBB : LoopChain) {
dbgs() << " ... " << getBlockName(ChainBB) << "\n";
if (!LoopBlockSet.erase(ChainBB)) {
// We don't mark the loop as bad here because there are real situations
// where this can occur. For example, with an unanalyzable fallthrough
// from a loop block to a non-loop block or vice versa.
dbgs() << "Loop chain contains a block not contained by the loop!\n"
<< " Loop header: " << getBlockName(*L.block_begin()) << "\n"
<< " Chain header: " << getBlockName(*LoopChain.begin()) << "\n"
<< " Bad block: " << getBlockName(ChainBB) << "\n";
}
}
if (!LoopBlockSet.empty()) {
BadLoop = true;
for (MachineBasicBlock *LoopBB : LoopBlockSet)
dbgs() << "Loop contains blocks never placed into a chain!\n"
<< " Loop header: " << getBlockName(*L.block_begin()) << "\n"
<< " Chain header: " << getBlockName(*LoopChain.begin()) << "\n"
<< " Bad block: " << getBlockName(LoopBB) << "\n";
}
assert(!BadLoop && "Detected problems with the placement of this loop.");
});
}
void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// Ensure that every BB in the function has an associated chain to simplify
// the assumptions of the remaining algorithm.
SmallVector<MachineOperand, 4> Cond; // For AnalyzeBranch.
for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
MachineBasicBlock *BB = FI;
BlockChain *Chain =
new (ChainAllocator.Allocate()) BlockChain(BlockToChain, BB);
// Also, merge any blocks which we cannot reason about and must preserve
// the exact fallthrough behavior for.
for (;;) {
Cond.clear();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr; // For AnalyzeBranch.
if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond) || !FI->canFallThrough())
break;
MachineFunction::iterator NextFI(std::next(FI));
MachineBasicBlock *NextBB = NextFI;
// Ensure that the layout successor is a viable block, as we know that
// fallthrough is a possibility.
assert(NextFI != FE && "Can't fallthrough past the last block.");
DEBUG(dbgs() << "Pre-merging due to unanalyzable fallthrough: "
<< getBlockName(BB) << " -> " << getBlockName(NextBB)
<< "\n");
Chain->merge(NextBB, nullptr);
FI = NextFI;
BB = NextBB;
}
}
if (OutlineOptionalBranches) {
// Find the nearest common dominator of all of F's terminators.
MachineBasicBlock *Terminator = nullptr;
for (MachineBasicBlock &MBB : F) {
if (MBB.succ_size() == 0) {
if (Terminator == nullptr)
Terminator = &MBB;
else
Terminator = MDT->findNearestCommonDominator(Terminator, &MBB);
}
}
// MBBs dominating this common dominator are unavoidable.
UnavoidableBlocks.clear();
for (MachineBasicBlock &MBB : F) {
if (MDT->dominates(&MBB, Terminator)) {
UnavoidableBlocks.insert(&MBB);
}
}
}
// Build any loop-based chains.
for (MachineLoop *L : *MLI)
buildLoopChains(F, *L);
SmallVector<MachineBasicBlock *, 16> BlockWorkList;
SmallPtrSet<BlockChain *, 4> UpdatedPreds;
for (MachineBasicBlock &MBB : F) {
BlockChain &Chain = *BlockToChain[&MBB];
if (!UpdatedPreds.insert(&Chain).second)
continue;
assert(Chain.LoopPredecessors == 0);
for (MachineBasicBlock *ChainBB : Chain) {
assert(BlockToChain[ChainBB] == &Chain);
for (MachineBasicBlock *Pred : ChainBB->predecessors()) {
if (BlockToChain[Pred] == &Chain)
continue;
++Chain.LoopPredecessors;
}
}
if (Chain.LoopPredecessors == 0)
BlockWorkList.push_back(*Chain.begin());
}
BlockChain &FunctionChain = *BlockToChain[&F.front()];
buildChain(&F.front(), FunctionChain, BlockWorkList);
#ifndef NDEBUG
typedef SmallPtrSet<MachineBasicBlock *, 16> FunctionBlockSetType;
#endif
DEBUG({
// Crash at the end so we get all of the debugging output first.
bool BadFunc = false;
FunctionBlockSetType FunctionBlockSet;
for (MachineBasicBlock &MBB : F)
FunctionBlockSet.insert(&MBB);
for (MachineBasicBlock *ChainBB : FunctionChain)
if (!FunctionBlockSet.erase(ChainBB)) {
BadFunc = true;
dbgs() << "Function chain contains a block not in the function!\n"
<< " Bad block: " << getBlockName(ChainBB) << "\n";
}
if (!FunctionBlockSet.empty()) {
BadFunc = true;
for (MachineBasicBlock *RemainingBB : FunctionBlockSet)
dbgs() << "Function contains blocks never placed into a chain!\n"
<< " Bad block: " << getBlockName(RemainingBB) << "\n";
}
assert(!BadFunc && "Detected problems with the block placement.");
});
// Splice the blocks into place.
MachineFunction::iterator InsertPos = F.begin();
for (MachineBasicBlock *ChainBB : FunctionChain) {
DEBUG(dbgs() << (ChainBB == *FunctionChain.begin() ? "Placing chain "
: " ... ")
<< getBlockName(ChainBB) << "\n");
if (InsertPos != MachineFunction::iterator(ChainBB))
F.splice(InsertPos, ChainBB);
else
++InsertPos;
// Update the terminator of the previous block.
if (ChainBB == *FunctionChain.begin())
continue;
MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(ChainBB));
// FIXME: It would be awesome of updateTerminator would just return rather
// than assert when the branch cannot be analyzed in order to remove this
// boiler plate.
Cond.clear();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr; // For AnalyzeBranch.
if (!TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond)) {
// The "PrevBB" is not yet updated to reflect current code layout, so,
// o. it may fall-through to a block without explict "goto" instruction
// before layout, and no longer fall-through it after layout; or
// o. just opposite.
//
// AnalyzeBranch() may return erroneous value for FBB when these two
// situations take place. For the first scenario FBB is mistakenly set
// NULL; for the 2nd scenario, the FBB, which is expected to be NULL,
// is mistakenly pointing to "*BI".
//
bool needUpdateBr = true;
if (!Cond.empty() && (!FBB || FBB == ChainBB)) {
PrevBB->updateTerminator();
needUpdateBr = false;
Cond.clear();
TBB = FBB = nullptr;
if (TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond)) {
// FIXME: This should never take place.
TBB = FBB = nullptr;
}
}
// If PrevBB has a two-way branch, try to re-order the branches
// such that we branch to the successor with higher weight first.
if (TBB && !Cond.empty() && FBB &&
MBPI->getEdgeWeight(PrevBB, FBB) > MBPI->getEdgeWeight(PrevBB, TBB) &&
!TII->ReverseBranchCondition(Cond)) {
DEBUG(dbgs() << "Reverse order of the two branches: "
<< getBlockName(PrevBB) << "\n");
DEBUG(dbgs() << " Edge weight: " << MBPI->getEdgeWeight(PrevBB, FBB)
<< " vs " << MBPI->getEdgeWeight(PrevBB, TBB) << "\n");
DebugLoc dl; // FIXME: this is nowhere
TII->RemoveBranch(*PrevBB);
TII->InsertBranch(*PrevBB, FBB, TBB, Cond, dl);
needUpdateBr = true;
}
if (needUpdateBr)
PrevBB->updateTerminator();
}
}
// Fixup the last block.
Cond.clear();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr; // For AnalyzeBranch.
if (!TII->AnalyzeBranch(F.back(), TBB, FBB, Cond))
F.back().updateTerminator();
// Walk through the backedges of the function now that we have fully laid out
// the basic blocks and align the destination of each backedge. We don't rely
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
if (F.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
return;
if (FunctionChain.begin() == FunctionChain.end())
return; // Empty chain.
const BranchProbability ColdProb(1, 5); // 20%
BlockFrequency EntryFreq = MBFI->getBlockFreq(F.begin());
BlockFrequency WeightedEntryFreq = EntryFreq * ColdProb;
for (MachineBasicBlock *ChainBB : FunctionChain) {
if (ChainBB == *FunctionChain.begin())
continue;
// Don't align non-looping basic blocks. These are unlikely to execute
// enough times to matter in practice. Note that we'll still handle
// unnatural CFGs inside of a natural outer loop (the common case) and
// rotated loops.
MachineLoop *L = MLI->getLoopFor(ChainBB);
if (!L)
continue;
unsigned Align = TLI->getPrefLoopAlignment(L);
if (!Align)
continue; // Don't care about loop alignment.
// If the block is cold relative to the function entry don't waste space
// aligning it.
BlockFrequency Freq = MBFI->getBlockFreq(ChainBB);
if (Freq < WeightedEntryFreq)
continue;
// If the block is cold relative to its loop header, don't align it
// regardless of what edges into the block exist.
MachineBasicBlock *LoopHeader = L->getHeader();
BlockFrequency LoopHeaderFreq = MBFI->getBlockFreq(LoopHeader);
if (Freq < (LoopHeaderFreq * ColdProb))
continue;
// Check for the existence of a non-layout predecessor which would benefit
// from aligning this block.
MachineBasicBlock *LayoutPred =
&*std::prev(MachineFunction::iterator(ChainBB));
// Force alignment if all the predecessors are jumps. We already checked
// that the block isn't cold above.
if (!LayoutPred->isSuccessor(ChainBB)) {
ChainBB->setAlignment(Align);
continue;
}
// Align this block if the layout predecessor's edge into this block is
// cold relative to the block. When this is true, other predecessors make up
// all of the hot entries into the block and thus alignment is likely to be
// important.
BranchProbability LayoutProb =
MBPI->getEdgeProbability(LayoutPred, ChainBB);
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
if (LayoutEdgeFreq <= (Freq * ColdProb))
ChainBB->setAlignment(Align);
}
}
bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) {
// Check for single-block functions and skip them.
if (std::next(F.begin()) == F.end())
return false;
if (skipOptnoneFunction(*F.getFunction()))
return false;
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
MLI = &getAnalysis<MachineLoopInfo>();
TII = F.getSubtarget().getInstrInfo();
TLI = F.getSubtarget().getTargetLowering();
MDT = &getAnalysis<MachineDominatorTree>();
assert(BlockToChain.empty());
buildCFGChains(F);
BlockToChain.clear();
ChainAllocator.DestroyAll();
if (AlignAllBlock)
// Align all of the blocks in the function to a specific alignment.
for (MachineBasicBlock &MBB : F)
MBB.setAlignment(AlignAllBlock);
// We always return true as we have no way to track whether the final order
// differs from the original order.
return true;
}
namespace {
/// \brief A pass to compute block placement statistics.
///
/// A separate pass to compute interesting statistics for evaluating block
/// placement. This is separate from the actual placement pass so that they can
/// be computed in the absence of any placement transformations or when using
/// alternative placement strategies.
class MachineBlockPlacementStats : public MachineFunctionPass {
/// \brief A handle to the branch probability pass.
const MachineBranchProbabilityInfo *MBPI;
/// \brief A handle to the function-wide block frequency pass.
const MachineBlockFrequencyInfo *MBFI;
public:
static char ID; // Pass identification, replacement for typeid
MachineBlockPlacementStats() : MachineFunctionPass(ID) {
initializeMachineBlockPlacementStatsPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
}
char MachineBlockPlacementStats::ID = 0;
char &llvm::MachineBlockPlacementStatsID = MachineBlockPlacementStats::ID;
INITIALIZE_PASS_BEGIN(MachineBlockPlacementStats, "block-placement-stats",
"Basic Block Placement Stats", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
INITIALIZE_PASS_END(MachineBlockPlacementStats, "block-placement-stats",
"Basic Block Placement Stats", false, false)
bool MachineBlockPlacementStats::runOnMachineFunction(MachineFunction &F) {
// Check for single-block functions and skip them.
if (std::next(F.begin()) == F.end())
return false;
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
for (MachineBasicBlock &MBB : F) {
BlockFrequency BlockFreq = MBFI->getBlockFreq(&MBB);
Statistic &NumBranches =
(MBB.succ_size() > 1) ? NumCondBranches : NumUncondBranches;
Statistic &BranchTakenFreq =
(MBB.succ_size() > 1) ? CondBranchTakenFreq : UncondBranchTakenFreq;
for (MachineBasicBlock *Succ : MBB.successors()) {
// Skip if this successor is a fallthrough.
if (MBB.isLayoutSuccessor(Succ))
continue;
BlockFrequency EdgeFreq =
BlockFreq * MBPI->getEdgeProbability(&MBB, Succ);
++NumBranches;
BranchTakenFreq += EdgeFreq.getFrequency();
}
}
return false;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/OcamlGC.cpp | //===-- OcamlGC.cpp - Ocaml frametable GC strategy ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements lowering for the llvm.gc* intrinsics compatible with
// Objective Caml 3.10.0, which uses a liveness-accurate static stack map.
//
// The frametable emitter is in OcamlGCPrinter.cpp.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCs.h"
#include "llvm/CodeGen/GCStrategy.h"
using namespace llvm;
namespace {
class OcamlGC : public GCStrategy {
public:
OcamlGC();
};
}
static GCRegistry::Add<OcamlGC> X("ocaml", "ocaml 3.10-compatible GC");
void llvm::linkOcamlGC() {}
OcamlGC::OcamlGC() {
NeededSafePoints = 1 << GC::PostCall;
UsesMetadata = true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/DwarfEHPrepare.cpp | //===-- DwarfEHPrepare - Prepare exception handling for code generation ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass mulches exception handling code into a form adapted to code
// generation. Required if using dwarf exception handling.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/LibCallSemantics.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
#define DEBUG_TYPE "dwarfehprepare"
STATISTIC(NumResumesLowered, "Number of resume calls lowered");
namespace {
class DwarfEHPrepare : public FunctionPass {
const TargetMachine *TM;
// RewindFunction - _Unwind_Resume or the target equivalent.
Constant *RewindFunction;
DominatorTree *DT;
const TargetLowering *TLI;
bool InsertUnwindResumeCalls(Function &Fn);
Value *GetExceptionObject(ResumeInst *RI);
size_t
pruneUnreachableResumes(Function &Fn,
SmallVectorImpl<ResumeInst *> &Resumes,
SmallVectorImpl<LandingPadInst *> &CleanupLPads);
public:
static char ID; // Pass identification, replacement for typeid.
// INITIALIZE_TM_PASS requires a default constructor, but it isn't used in
// practice.
DwarfEHPrepare()
: FunctionPass(ID), TM(nullptr), RewindFunction(nullptr), DT(nullptr),
TLI(nullptr) {}
DwarfEHPrepare(const TargetMachine *TM)
: FunctionPass(ID), TM(TM), RewindFunction(nullptr), DT(nullptr),
TLI(nullptr) {}
bool runOnFunction(Function &Fn) override;
bool doFinalization(Module &M) override {
RewindFunction = nullptr;
return false;
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
StringRef getPassName() const override {
return "Exception handling preparation";
}
};
} // end anonymous namespace
char DwarfEHPrepare::ID = 0;
INITIALIZE_TM_PASS_BEGIN(DwarfEHPrepare, "dwarfehprepare",
"Prepare DWARF exceptions", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_TM_PASS_END(DwarfEHPrepare, "dwarfehprepare",
"Prepare DWARF exceptions", false, false)
FunctionPass *llvm::createDwarfEHPass(const TargetMachine *TM) {
return new DwarfEHPrepare(TM);
}
void DwarfEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
}
/// GetExceptionObject - Return the exception object from the value passed into
/// the 'resume' instruction (typically an aggregate). Clean up any dead
/// instructions, including the 'resume' instruction.
Value *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
Value *V = RI->getOperand(0);
Value *ExnObj = nullptr;
InsertValueInst *SelIVI = dyn_cast<InsertValueInst>(V);
LoadInst *SelLoad = nullptr;
InsertValueInst *ExcIVI = nullptr;
bool EraseIVIs = false;
if (SelIVI) {
if (SelIVI->getNumIndices() == 1 && *SelIVI->idx_begin() == 1) {
ExcIVI = dyn_cast<InsertValueInst>(SelIVI->getOperand(0));
if (ExcIVI && isa<UndefValue>(ExcIVI->getOperand(0)) &&
ExcIVI->getNumIndices() == 1 && *ExcIVI->idx_begin() == 0) {
ExnObj = ExcIVI->getOperand(1);
SelLoad = dyn_cast<LoadInst>(SelIVI->getOperand(1));
EraseIVIs = true;
}
}
}
if (!ExnObj)
ExnObj = ExtractValueInst::Create(RI->getOperand(0), 0, "exn.obj", RI);
RI->eraseFromParent();
if (EraseIVIs) {
if (SelIVI->use_empty())
SelIVI->eraseFromParent();
if (ExcIVI->use_empty())
ExcIVI->eraseFromParent();
if (SelLoad && SelLoad->use_empty())
SelLoad->eraseFromParent();
}
return ExnObj;
}
/// Replace resumes that are not reachable from a cleanup landing pad with
/// unreachable and then simplify those blocks.
size_t DwarfEHPrepare::pruneUnreachableResumes(
Function &Fn, SmallVectorImpl<ResumeInst *> &Resumes,
SmallVectorImpl<LandingPadInst *> &CleanupLPads) {
BitVector ResumeReachable(Resumes.size());
size_t ResumeIndex = 0;
for (auto *RI : Resumes) {
for (auto *LP : CleanupLPads) {
if (isPotentiallyReachable(LP, RI, DT)) {
ResumeReachable.set(ResumeIndex);
break;
}
}
++ResumeIndex;
}
// If everything is reachable, there is no change.
if (ResumeReachable.all())
return Resumes.size();
const TargetTransformInfo &TTI =
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn);
LLVMContext &Ctx = Fn.getContext();
// Otherwise, insert unreachable instructions and call simplifycfg.
size_t ResumesLeft = 0;
for (size_t I = 0, E = Resumes.size(); I < E; ++I) {
ResumeInst *RI = Resumes[I];
if (ResumeReachable[I]) {
Resumes[ResumesLeft++] = RI;
} else {
BasicBlock *BB = RI->getParent();
new UnreachableInst(Ctx, RI);
RI->eraseFromParent();
SimplifyCFG(BB, TTI, 1);
}
}
Resumes.resize(ResumesLeft);
return ResumesLeft;
}
/// InsertUnwindResumeCalls - Convert the ResumeInsts that are still present
/// into calls to the appropriate _Unwind_Resume function.
bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
SmallVector<ResumeInst*, 16> Resumes;
SmallVector<LandingPadInst*, 16> CleanupLPads;
for (BasicBlock &BB : Fn) {
if (auto *RI = dyn_cast<ResumeInst>(BB.getTerminator()))
Resumes.push_back(RI);
if (auto *LP = BB.getLandingPadInst())
if (LP->isCleanup())
CleanupLPads.push_back(LP);
}
if (Resumes.empty())
return false;
// Check the personality, don't do anything if it's for MSVC.
EHPersonality Pers = classifyEHPersonality(Fn.getPersonalityFn());
if (isMSVCEHPersonality(Pers))
return false;
LLVMContext &Ctx = Fn.getContext();
size_t ResumesLeft = pruneUnreachableResumes(Fn, Resumes, CleanupLPads);
if (ResumesLeft == 0)
return true; // We pruned them all.
// Find the rewind function if we didn't already.
if (!RewindFunction) {
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
Type::getInt8PtrTy(Ctx), false);
const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
RewindFunction = Fn.getParent()->getOrInsertFunction(RewindName, FTy);
}
// Create the basic block where the _Unwind_Resume call will live.
if (ResumesLeft == 1) {
// Instead of creating a new BB and PHI node, just append the call to
// _Unwind_Resume to the end of the single resume block.
ResumeInst *RI = Resumes.front();
BasicBlock *UnwindBB = RI->getParent();
Value *ExnObj = GetExceptionObject(RI);
// Call the _Unwind_Resume function.
CallInst *CI = CallInst::Create(RewindFunction, ExnObj, "", UnwindBB);
CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
// We never expect _Unwind_Resume to return.
new UnreachableInst(Ctx, UnwindBB);
return true;
}
BasicBlock *UnwindBB = BasicBlock::Create(Ctx, "unwind_resume", &Fn);
PHINode *PN = PHINode::Create(Type::getInt8PtrTy(Ctx), ResumesLeft,
"exn.obj", UnwindBB);
// Extract the exception object from the ResumeInst and add it to the PHI node
// that feeds the _Unwind_Resume call.
for (ResumeInst *RI : Resumes) {
BasicBlock *Parent = RI->getParent();
BranchInst::Create(UnwindBB, Parent);
Value *ExnObj = GetExceptionObject(RI);
PN->addIncoming(ExnObj, Parent);
++NumResumesLowered;
}
// Call the function.
CallInst *CI = CallInst::Create(RewindFunction, PN, "", UnwindBB);
CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
// We never expect _Unwind_Resume to return.
new UnreachableInst(Ctx, UnwindBB);
return true;
}
bool DwarfEHPrepare::runOnFunction(Function &Fn) {
assert(TM && "DWARF EH preparation requires a target machine");
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
bool Changed = InsertUnwindResumeCalls(Fn);
DT = nullptr;
TLI = nullptr;
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveDebugVariables.cpp | //===- LiveDebugVariables.cpp - Tracking debug info variables -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveDebugVariables analysis.
//
// Remove all DBG_VALUE instructions referencing virtual registers and replace
// them with a data structure tracking where live user variables are kept - in a
// virtual register or in a stack slot.
//
// Allow the data structure to be updated during register allocation when values
// are moved between registers and stack slots. Finally emit new DBG_VALUE
// instructions after register allocation is complete.
//
//===----------------------------------------------------------------------===//
#include "LiveDebugVariables.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <memory>
using namespace llvm;
#define DEBUG_TYPE "livedebug"
static cl::opt<bool>
EnableLDV("live-debug-variables", cl::init(true),
cl::desc("Enable the live debug variables pass"), cl::Hidden);
STATISTIC(NumInsertedDebugValues, "Number of DBG_VALUEs inserted");
char LiveDebugVariables::ID = 0;
INITIALIZE_PASS_BEGIN(LiveDebugVariables, "livedebugvars",
"Debug Variable Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_END(LiveDebugVariables, "livedebugvars",
"Debug Variable Analysis", false, false)
void LiveDebugVariables::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineDominatorTree>();
AU.addRequiredTransitive<LiveIntervals>();
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(nullptr) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
}
/// LocMap - Map of where a user value is live, and its location.
typedef IntervalMap<SlotIndex, unsigned, 4> LocMap;
namespace {
/// UserValueScopes - Keeps track of lexical scopes associated with a
/// user value's source location.
class UserValueScopes {
DebugLoc DL;
LexicalScopes &LS;
SmallPtrSet<const MachineBasicBlock *, 4> LBlocks;
public:
UserValueScopes(DebugLoc D, LexicalScopes &L) : DL(D), LS(L) {}
/// dominates - Return true if current scope dominates at least one machine
/// instruction in a given machine basic block.
bool dominates(MachineBasicBlock *MBB) {
if (LBlocks.empty())
LS.getMachineBasicBlocks(DL, LBlocks);
if (LBlocks.count(MBB) != 0 || LS.dominates(DL, MBB))
return true;
return false;
}
};
} // end anonymous namespace
/// UserValue - A user value is a part of a debug info user variable.
///
/// A DBG_VALUE instruction notes that (a sub-register of) a virtual register
/// holds part of a user variable. The part is identified by a byte offset.
///
/// UserValues are grouped into equivalence classes for easier searching. Two
/// user values are related if they refer to the same variable, or if they are
/// held by the same virtual register. The equivalence class is the transitive
/// closure of that relation.
namespace {
class LDVImpl;
class UserValue {
const MDNode *Variable; ///< The debug info variable we are part of.
const MDNode *Expression; ///< Any complex address expression.
unsigned offset; ///< Byte offset into variable.
bool IsIndirect; ///< true if this is a register-indirect+offset value.
DebugLoc dl; ///< The debug location for the variable. This is
///< used by dwarf writer to find lexical scope.
UserValue *leader; ///< Equivalence class leader.
UserValue *next; ///< Next value in equivalence class, or null.
/// Numbered locations referenced by locmap.
SmallVector<MachineOperand, 4> locations;
/// Map of slot indices where this value is live.
LocMap locInts;
/// coalesceLocation - After LocNo was changed, check if it has become
/// identical to another location, and coalesce them. This may cause LocNo or
/// a later location to be erased, but no earlier location will be erased.
void coalesceLocation(unsigned LocNo);
/// insertDebugValue - Insert a DBG_VALUE into MBB at Idx for LocNo.
void insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx, unsigned LocNo,
LiveIntervals &LIS, const TargetInstrInfo &TII);
/// splitLocation - Replace OldLocNo ranges with NewRegs ranges where NewRegs
/// is live. Returns true if any changes were made.
bool splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals &LIS);
public:
/// UserValue - Create a new UserValue.
UserValue(const MDNode *var, const MDNode *expr, unsigned o, bool i,
DebugLoc L, LocMap::Allocator &alloc)
: Variable(var), Expression(expr), offset(o), IsIndirect(i), dl(L),
leader(this), next(nullptr), locInts(alloc) {}
/// getLeader - Get the leader of this value's equivalence class.
UserValue *getLeader() {
UserValue *l = leader;
while (l != l->leader)
l = l->leader;
return leader = l;
}
/// getNext - Return the next UserValue in the equivalence class.
UserValue *getNext() const { return next; }
/// match - Does this UserValue match the parameters?
bool match(const MDNode *Var, const MDNode *Expr, const DILocation *IA,
unsigned Offset, bool indirect) const {
return Var == Variable && Expr == Expression && dl->getInlinedAt() == IA &&
Offset == offset && indirect == IsIndirect;
}
/// merge - Merge equivalence classes.
static UserValue *merge(UserValue *L1, UserValue *L2) {
L2 = L2->getLeader();
if (!L1)
return L2;
L1 = L1->getLeader();
if (L1 == L2)
return L1;
// Splice L2 before L1's members.
UserValue *End = L2;
while (End->next)
End->leader = L1, End = End->next;
End->leader = L1;
End->next = L1->next;
L1->next = L2;
return L1;
}
/// getLocationNo - Return the location number that matches Loc.
unsigned getLocationNo(const MachineOperand &LocMO) {
if (LocMO.isReg()) {
if (LocMO.getReg() == 0)
return ~0u;
// For register locations we dont care about use/def and other flags.
for (unsigned i = 0, e = locations.size(); i != e; ++i)
if (locations[i].isReg() &&
locations[i].getReg() == LocMO.getReg() &&
locations[i].getSubReg() == LocMO.getSubReg())
return i;
} else
for (unsigned i = 0, e = locations.size(); i != e; ++i)
if (LocMO.isIdenticalTo(locations[i]))
return i;
locations.push_back(LocMO);
// We are storing a MachineOperand outside a MachineInstr.
locations.back().clearParent();
// Don't store def operands.
if (locations.back().isReg())
locations.back().setIsUse();
return locations.size() - 1;
}
/// mapVirtRegs - Ensure that all virtual register locations are mapped.
void mapVirtRegs(LDVImpl *LDV);
/// addDef - Add a definition point to this value.
void addDef(SlotIndex Idx, const MachineOperand &LocMO) {
// Add a singular (Idx,Idx) -> Loc mapping.
LocMap::iterator I = locInts.find(Idx);
if (!I.valid() || I.start() != Idx)
I.insert(Idx, Idx.getNextSlot(), getLocationNo(LocMO));
else
// A later DBG_VALUE at the same SlotIndex overrides the old location.
I.setValue(getLocationNo(LocMO));
}
/// extendDef - Extend the current definition as far as possible down the
/// dominator tree. Stop when meeting an existing def or when leaving the live
/// range of VNI.
/// End points where VNI is no longer live are added to Kills.
/// @param Idx Starting point for the definition.
/// @param LocNo Location number to propagate.
/// @param LR Restrict liveness to where LR has the value VNI. May be null.
/// @param VNI When LR is not null, this is the value to restrict to.
/// @param Kills Append end points of VNI's live range to Kills.
/// @param LIS Live intervals analysis.
/// @param MDT Dominator tree.
void extendDef(SlotIndex Idx, unsigned LocNo,
LiveRange *LR, const VNInfo *VNI,
SmallVectorImpl<SlotIndex> *Kills,
LiveIntervals &LIS, MachineDominatorTree &MDT,
UserValueScopes &UVS);
/// addDefsFromCopies - The value in LI/LocNo may be copies to other
/// registers. Determine if any of the copies are available at the kill
/// points, and add defs if possible.
/// @param LI Scan for copies of the value in LI->reg.
/// @param LocNo Location number of LI->reg.
/// @param Kills Points where the range of LocNo could be extended.
/// @param NewDefs Append (Idx, LocNo) of inserted defs here.
void addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
const SmallVectorImpl<SlotIndex> &Kills,
SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs,
MachineRegisterInfo &MRI,
LiveIntervals &LIS);
/// computeIntervals - Compute the live intervals of all locations after
/// collecting all their def points.
void computeIntervals(MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
LiveIntervals &LIS, MachineDominatorTree &MDT,
UserValueScopes &UVS);
/// splitRegister - Replace OldReg ranges with NewRegs ranges where NewRegs is
/// live. Returns true if any changes were made.
bool splitRegister(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals &LIS);
/// rewriteLocations - Rewrite virtual register locations according to the
/// provided virtual register map.
void rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI);
/// emitDebugValues - Recreate DBG_VALUE instruction from data structures.
void emitDebugValues(VirtRegMap *VRM,
LiveIntervals &LIS, const TargetInstrInfo &TRI);
/// getDebugLoc - Return DebugLoc of this UserValue.
DebugLoc getDebugLoc() { return dl;}
void print(raw_ostream &, const TargetRegisterInfo *);
};
} // namespace
/// LDVImpl - Implementation of the LiveDebugVariables pass.
namespace {
class LDVImpl {
LiveDebugVariables &pass;
LocMap::Allocator allocator;
MachineFunction *MF;
LiveIntervals *LIS;
LexicalScopes LS;
MachineDominatorTree *MDT;
const TargetRegisterInfo *TRI;
/// Whether emitDebugValues is called.
bool EmitDone;
/// Whether the machine function is modified during the pass.
bool ModifiedMF;
/// userValues - All allocated UserValue instances.
SmallVector<std::unique_ptr<UserValue>, 8> userValues;
/// Map virtual register to eq class leader.
typedef DenseMap<unsigned, UserValue*> VRMap;
VRMap virtRegToEqClass;
/// Map user variable to eq class leader.
typedef DenseMap<const MDNode *, UserValue*> UVMap;
UVMap userVarMap;
/// getUserValue - Find or create a UserValue.
UserValue *getUserValue(const MDNode *Var, const MDNode *Expr,
unsigned Offset, bool IsIndirect, DebugLoc DL);
/// lookupVirtReg - Find the EC leader for VirtReg or null.
UserValue *lookupVirtReg(unsigned VirtReg);
/// handleDebugValue - Add DBG_VALUE instruction to our maps.
/// @param MI DBG_VALUE instruction
/// @param Idx Last valid SLotIndex before instruction.
/// @return True if the DBG_VALUE instruction should be deleted.
bool handleDebugValue(MachineInstr *MI, SlotIndex Idx);
/// collectDebugValues - Collect and erase all DBG_VALUE instructions, adding
/// a UserValue def for each instruction.
/// @param mf MachineFunction to be scanned.
/// @return True if any debug values were found.
bool collectDebugValues(MachineFunction &mf);
/// computeIntervals - Compute the live intervals of all user values after
/// collecting all their def points.
void computeIntervals();
public:
LDVImpl(LiveDebugVariables *ps)
: pass(*ps), MF(nullptr), EmitDone(false), ModifiedMF(false) {}
bool runOnMachineFunction(MachineFunction &mf);
/// clear - Release all memory.
void clear() {
MF = nullptr;
userValues.clear();
virtRegToEqClass.clear();
userVarMap.clear();
// Make sure we call emitDebugValues if the machine function was modified.
assert((!ModifiedMF || EmitDone) &&
"Dbg values are not emitted in LDV");
EmitDone = false;
ModifiedMF = false;
LS.reset();
}
/// mapVirtReg - Map virtual register to an equivalence class.
void mapVirtReg(unsigned VirtReg, UserValue *EC);
/// splitRegister - Replace all references to OldReg with NewRegs.
void splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs);
/// emitDebugValues - Recreate DBG_VALUE instruction from data structures.
void emitDebugValues(VirtRegMap *VRM);
void print(raw_ostream&);
};
} // namespace
static void printDebugLoc(DebugLoc DL, raw_ostream &CommentOS,
const LLVMContext &Ctx) {
if (!DL)
return;
auto *Scope = cast<DIScope>(DL.getScope());
// Omit the directory, because it's likely to be long and uninteresting.
CommentOS << Scope->getFilename();
CommentOS << ':' << DL.getLine();
if (DL.getCol() != 0)
CommentOS << ':' << DL.getCol();
DebugLoc InlinedAtDL = DL.getInlinedAt();
if (!InlinedAtDL)
return;
CommentOS << " @[ ";
printDebugLoc(InlinedAtDL, CommentOS, Ctx);
CommentOS << " ]";
}
static void printExtendedName(raw_ostream &OS, const DILocalVariable *V,
const DILocation *DL) {
const LLVMContext &Ctx = V->getContext();
StringRef Res = V->getName();
if (!Res.empty())
OS << Res << "," << V->getLine();
if (auto *InlinedAt = DL->getInlinedAt()) {
if (DebugLoc InlinedAtDL = InlinedAt) {
OS << " @[";
printDebugLoc(InlinedAtDL, OS, Ctx);
OS << "]";
}
}
}
void UserValue::print(raw_ostream &OS, const TargetRegisterInfo *TRI) {
auto *DV = cast<DILocalVariable>(Variable);
OS << "!\"";
printExtendedName(OS, DV, dl);
OS << "\"\t";
if (offset)
OS << '+' << offset;
for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I) {
OS << " [" << I.start() << ';' << I.stop() << "):";
if (I.value() == ~0u)
OS << "undef";
else
OS << I.value();
}
for (unsigned i = 0, e = locations.size(); i != e; ++i) {
OS << " Loc" << i << '=';
locations[i].print(OS, TRI);
}
OS << '\n';
}
void LDVImpl::print(raw_ostream &OS) {
OS << "********** DEBUG VARIABLES **********\n";
for (unsigned i = 0, e = userValues.size(); i != e; ++i)
userValues[i]->print(OS, TRI);
}
void UserValue::coalesceLocation(unsigned LocNo) {
unsigned KeepLoc = 0;
for (unsigned e = locations.size(); KeepLoc != e; ++KeepLoc) {
if (KeepLoc == LocNo)
continue;
if (locations[KeepLoc].isIdenticalTo(locations[LocNo]))
break;
}
// No matches.
if (KeepLoc == locations.size())
return;
// Keep the smaller location, erase the larger one.
unsigned EraseLoc = LocNo;
if (KeepLoc > EraseLoc)
std::swap(KeepLoc, EraseLoc);
locations.erase(locations.begin() + EraseLoc);
// Rewrite values.
for (LocMap::iterator I = locInts.begin(); I.valid(); ++I) {
unsigned v = I.value();
if (v == EraseLoc)
I.setValue(KeepLoc); // Coalesce when possible.
else if (v > EraseLoc)
I.setValueUnchecked(v-1); // Avoid coalescing with untransformed values.
}
}
void UserValue::mapVirtRegs(LDVImpl *LDV) {
for (unsigned i = 0, e = locations.size(); i != e; ++i)
if (locations[i].isReg() &&
TargetRegisterInfo::isVirtualRegister(locations[i].getReg()))
LDV->mapVirtReg(locations[i].getReg(), this);
}
UserValue *LDVImpl::getUserValue(const MDNode *Var, const MDNode *Expr,
unsigned Offset, bool IsIndirect,
DebugLoc DL) {
UserValue *&Leader = userVarMap[Var];
if (Leader) {
UserValue *UV = Leader->getLeader();
Leader = UV;
for (; UV; UV = UV->getNext())
if (UV->match(Var, Expr, DL->getInlinedAt(), Offset, IsIndirect))
return UV;
}
userValues.push_back(
make_unique<UserValue>(Var, Expr, Offset, IsIndirect, DL, allocator));
UserValue *UV = userValues.back().get();
Leader = UserValue::merge(Leader, UV);
return UV;
}
void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Only map VirtRegs");
UserValue *&Leader = virtRegToEqClass[VirtReg];
Leader = UserValue::merge(Leader, EC);
}
UserValue *LDVImpl::lookupVirtReg(unsigned VirtReg) {
if (UserValue *UV = virtRegToEqClass.lookup(VirtReg))
return UV->getLeader();
return nullptr;
}
bool LDVImpl::handleDebugValue(MachineInstr *MI, SlotIndex Idx) {
// DBG_VALUE loc, offset, variable
if (MI->getNumOperands() != 4 ||
!(MI->getOperand(1).isReg() || MI->getOperand(1).isImm()) ||
!MI->getOperand(2).isMetadata()) {
DEBUG(dbgs() << "Can't handle " << *MI);
return false;
}
// Get or create the UserValue for (variable,offset).
bool IsIndirect = MI->isIndirectDebugValue();
unsigned Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
const MDNode *Var = MI->getDebugVariable();
const MDNode *Expr = MI->getDebugExpression();
//here.
UserValue *UV =
getUserValue(Var, Expr, Offset, IsIndirect, MI->getDebugLoc());
UV->addDef(Idx, MI->getOperand(0));
return true;
}
bool LDVImpl::collectDebugValues(MachineFunction &mf) {
bool Changed = false;
for (MachineFunction::iterator MFI = mf.begin(), MFE = mf.end(); MFI != MFE;
++MFI) {
MachineBasicBlock *MBB = MFI;
for (MachineBasicBlock::iterator MBBI = MBB->begin(), MBBE = MBB->end();
MBBI != MBBE;) {
if (!MBBI->isDebugValue()) {
++MBBI;
continue;
}
// DBG_VALUE has no slot index, use the previous instruction instead.
SlotIndex Idx = MBBI == MBB->begin() ?
LIS->getMBBStartIdx(MBB) :
LIS->getInstructionIndex(std::prev(MBBI)).getRegSlot();
// Handle consecutive DBG_VALUE instructions with the same slot index.
do {
if (handleDebugValue(MBBI, Idx)) {
MBBI = MBB->erase(MBBI);
Changed = true;
} else
++MBBI;
} while (MBBI != MBBE && MBBI->isDebugValue());
}
}
return Changed;
}
void UserValue::extendDef(SlotIndex Idx, unsigned LocNo,
LiveRange *LR, const VNInfo *VNI,
SmallVectorImpl<SlotIndex> *Kills,
LiveIntervals &LIS, MachineDominatorTree &MDT,
UserValueScopes &UVS) {
SmallVector<SlotIndex, 16> Todo;
Todo.push_back(Idx);
do {
SlotIndex Start = Todo.pop_back_val();
MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start);
SlotIndex Stop = LIS.getMBBEndIdx(MBB);
LocMap::iterator I = locInts.find(Start);
// Limit to VNI's live range.
bool ToEnd = true;
if (LR && VNI) {
LiveInterval::Segment *Segment = LR->getSegmentContaining(Start);
if (!Segment || Segment->valno != VNI) {
if (Kills)
Kills->push_back(Start);
continue;
}
if (Segment->end < Stop)
Stop = Segment->end, ToEnd = false;
}
// There could already be a short def at Start.
if (I.valid() && I.start() <= Start) {
// Stop when meeting a different location or an already extended interval.
Start = Start.getNextSlot();
if (I.value() != LocNo || I.stop() != Start)
continue;
// This is a one-slot placeholder. Just skip it.
++I;
}
// Limited by the next def.
if (I.valid() && I.start() < Stop)
Stop = I.start(), ToEnd = false;
// Limited by VNI's live range.
else if (!ToEnd && Kills)
Kills->push_back(Stop);
if (Start >= Stop)
continue;
I.insert(Start, Stop, LocNo);
// If we extended to the MBB end, propagate down the dominator tree.
if (!ToEnd)
continue;
const std::vector<MachineDomTreeNode*> &Children =
MDT.getNode(MBB)->getChildren();
for (unsigned i = 0, e = Children.size(); i != e; ++i) {
MachineBasicBlock *MBB = Children[i]->getBlock();
if (UVS.dominates(MBB))
Todo.push_back(LIS.getMBBStartIdx(MBB));
}
} while (!Todo.empty());
}
void
UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
const SmallVectorImpl<SlotIndex> &Kills,
SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs,
MachineRegisterInfo &MRI, LiveIntervals &LIS) {
if (Kills.empty())
return;
// Don't track copies from physregs, there are too many uses.
if (!TargetRegisterInfo::isVirtualRegister(LI->reg))
return;
// Collect all the (vreg, valno) pairs that are copies of LI.
SmallVector<std::pair<LiveInterval*, const VNInfo*>, 8> CopyValues;
for (MachineOperand &MO : MRI.use_nodbg_operands(LI->reg)) {
MachineInstr *MI = MO.getParent();
// Copies of the full value.
if (MO.getSubReg() || !MI->isCopy())
continue;
unsigned DstReg = MI->getOperand(0).getReg();
// Don't follow copies to physregs. These are usually setting up call
// arguments, and the argument registers are always call clobbered. We are
// better off in the source register which could be a callee-saved register,
// or it could be spilled.
if (!TargetRegisterInfo::isVirtualRegister(DstReg))
continue;
// Is LocNo extended to reach this copy? If not, another def may be blocking
// it, or we are looking at a wrong value of LI.
SlotIndex Idx = LIS.getInstructionIndex(MI);
LocMap::iterator I = locInts.find(Idx.getRegSlot(true));
if (!I.valid() || I.value() != LocNo)
continue;
if (!LIS.hasInterval(DstReg))
continue;
LiveInterval *DstLI = &LIS.getInterval(DstReg);
const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getRegSlot());
assert(DstVNI && DstVNI->def == Idx.getRegSlot() && "Bad copy value");
CopyValues.push_back(std::make_pair(DstLI, DstVNI));
}
if (CopyValues.empty())
return;
DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI << '\n');
// Try to add defs of the copied values for each kill point.
for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
SlotIndex Idx = Kills[i];
for (unsigned j = 0, e = CopyValues.size(); j != e; ++j) {
LiveInterval *DstLI = CopyValues[j].first;
const VNInfo *DstVNI = CopyValues[j].second;
if (DstLI->getVNInfoAt(Idx) != DstVNI)
continue;
// Check that there isn't already a def at Idx
LocMap::iterator I = locInts.find(Idx);
if (I.valid() && I.start() <= Idx)
continue;
DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #"
<< DstVNI->id << " in " << *DstLI << '\n');
MachineInstr *CopyMI = LIS.getInstructionFromIndex(DstVNI->def);
assert(CopyMI && CopyMI->isCopy() && "Bad copy value");
unsigned LocNo = getLocationNo(CopyMI->getOperand(0));
I.insert(Idx, Idx.getNextSlot(), LocNo);
NewDefs.push_back(std::make_pair(Idx, LocNo));
break;
}
}
}
void
UserValue::computeIntervals(MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI,
LiveIntervals &LIS,
MachineDominatorTree &MDT,
UserValueScopes &UVS) {
SmallVector<std::pair<SlotIndex, unsigned>, 16> Defs;
// Collect all defs to be extended (Skipping undefs).
for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I)
if (I.value() != ~0u)
Defs.push_back(std::make_pair(I.start(), I.value()));
// Extend all defs, and possibly add new ones along the way.
for (unsigned i = 0; i != Defs.size(); ++i) {
SlotIndex Idx = Defs[i].first;
unsigned LocNo = Defs[i].second;
const MachineOperand &Loc = locations[LocNo];
if (!Loc.isReg()) {
extendDef(Idx, LocNo, nullptr, nullptr, nullptr, LIS, MDT, UVS);
continue;
}
// Register locations are constrained to where the register value is live.
if (TargetRegisterInfo::isVirtualRegister(Loc.getReg())) {
LiveInterval *LI = nullptr;
const VNInfo *VNI = nullptr;
if (LIS.hasInterval(Loc.getReg())) {
LI = &LIS.getInterval(Loc.getReg());
VNI = LI->getVNInfoAt(Idx);
}
SmallVector<SlotIndex, 16> Kills;
extendDef(Idx, LocNo, LI, VNI, &Kills, LIS, MDT, UVS);
if (LI)
addDefsFromCopies(LI, LocNo, Kills, Defs, MRI, LIS);
continue;
}
// For physregs, use the live range of the first regunit as a guide.
unsigned Unit = *MCRegUnitIterator(Loc.getReg(), &TRI);
LiveRange *LR = &LIS.getRegUnit(Unit);
const VNInfo *VNI = LR->getVNInfoAt(Idx);
// Don't track copies from physregs, it is too expensive.
extendDef(Idx, LocNo, LR, VNI, nullptr, LIS, MDT, UVS);
}
// Finally, erase all the undefs.
for (LocMap::iterator I = locInts.begin(); I.valid();)
if (I.value() == ~0u)
I.erase();
else
++I;
}
void LDVImpl::computeIntervals() {
for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
UserValueScopes UVS(userValues[i]->getDebugLoc(), LS);
userValues[i]->computeIntervals(MF->getRegInfo(), *TRI, *LIS, *MDT, UVS);
userValues[i]->mapVirtRegs(this);
}
}
bool LDVImpl::runOnMachineFunction(MachineFunction &mf) {
clear();
MF = &mf;
LIS = &pass.getAnalysis<LiveIntervals>();
MDT = &pass.getAnalysis<MachineDominatorTree>();
TRI = mf.getSubtarget().getRegisterInfo();
LS.initialize(mf);
DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
<< mf.getName() << " **********\n");
bool Changed = collectDebugValues(mf);
computeIntervals();
DEBUG(print(dbgs()));
ModifiedMF = Changed;
return Changed;
}
static void removeDebugValues(MachineFunction &mf) {
for (MachineBasicBlock &MBB : mf) {
for (auto MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ) {
if (!MBBI->isDebugValue()) {
++MBBI;
continue;
}
MBBI = MBB.erase(MBBI);
}
}
}
bool LiveDebugVariables::runOnMachineFunction(MachineFunction &mf) {
if (!EnableLDV)
return false;
if (!FunctionDIs.count(mf.getFunction())) {
removeDebugValues(mf);
return false;
}
if (!pImpl)
pImpl = new LDVImpl(this);
return static_cast<LDVImpl*>(pImpl)->runOnMachineFunction(mf);
}
void LiveDebugVariables::releaseMemory() {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->clear();
}
LiveDebugVariables::~LiveDebugVariables() {
if (pImpl)
delete static_cast<LDVImpl*>(pImpl);
}
//===----------------------------------------------------------------------===//
// Live Range Splitting
//===----------------------------------------------------------------------===//
bool
UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals& LIS) {
DEBUG({
dbgs() << "Splitting Loc" << OldLocNo << '\t';
print(dbgs(), nullptr);
});
bool DidChange = false;
LocMap::iterator LocMapI;
LocMapI.setMap(locInts);
for (unsigned i = 0; i != NewRegs.size(); ++i) {
LiveInterval *LI = &LIS.getInterval(NewRegs[i]);
if (LI->empty())
continue;
// Don't allocate the new LocNo until it is needed.
unsigned NewLocNo = ~0u;
// Iterate over the overlaps between locInts and LI.
LocMapI.find(LI->beginIndex());
if (!LocMapI.valid())
continue;
LiveInterval::iterator LII = LI->advanceTo(LI->begin(), LocMapI.start());
LiveInterval::iterator LIE = LI->end();
while (LocMapI.valid() && LII != LIE) {
// At this point, we know that LocMapI.stop() > LII->start.
LII = LI->advanceTo(LII, LocMapI.start());
if (LII == LIE)
break;
// Now LII->end > LocMapI.start(). Do we have an overlap?
if (LocMapI.value() == OldLocNo && LII->start < LocMapI.stop()) {
// Overlapping correct location. Allocate NewLocNo now.
if (NewLocNo == ~0u) {
MachineOperand MO = MachineOperand::CreateReg(LI->reg, false);
MO.setSubReg(locations[OldLocNo].getSubReg());
NewLocNo = getLocationNo(MO);
DidChange = true;
}
SlotIndex LStart = LocMapI.start();
SlotIndex LStop = LocMapI.stop();
// Trim LocMapI down to the LII overlap.
if (LStart < LII->start)
LocMapI.setStartUnchecked(LII->start);
if (LStop > LII->end)
LocMapI.setStopUnchecked(LII->end);
// Change the value in the overlap. This may trigger coalescing.
LocMapI.setValue(NewLocNo);
// Re-insert any removed OldLocNo ranges.
if (LStart < LocMapI.start()) {
LocMapI.insert(LStart, LocMapI.start(), OldLocNo);
++LocMapI;
assert(LocMapI.valid() && "Unexpected coalescing");
}
if (LStop > LocMapI.stop()) {
++LocMapI;
LocMapI.insert(LII->end, LStop, OldLocNo);
--LocMapI;
}
}
// Advance to the next overlap.
if (LII->end < LocMapI.stop()) {
if (++LII == LIE)
break;
LocMapI.advanceTo(LII->start);
} else {
++LocMapI;
if (!LocMapI.valid())
break;
LII = LI->advanceTo(LII, LocMapI.start());
}
}
}
// Finally, remove any remaining OldLocNo intervals and OldLocNo itself.
locations.erase(locations.begin() + OldLocNo);
LocMapI.goToBegin();
while (LocMapI.valid()) {
unsigned v = LocMapI.value();
if (v == OldLocNo) {
DEBUG(dbgs() << "Erasing [" << LocMapI.start() << ';'
<< LocMapI.stop() << ")\n");
LocMapI.erase();
} else {
if (v > OldLocNo)
LocMapI.setValueUnchecked(v-1);
++LocMapI;
}
}
DEBUG({dbgs() << "Split result: \t"; print(dbgs(), nullptr);});
return DidChange;
}
bool
UserValue::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs,
LiveIntervals &LIS) {
bool DidChange = false;
// Split locations referring to OldReg. Iterate backwards so splitLocation can
// safely erase unused locations.
for (unsigned i = locations.size(); i ; --i) {
unsigned LocNo = i-1;
const MachineOperand *Loc = &locations[LocNo];
if (!Loc->isReg() || Loc->getReg() != OldReg)
continue;
DidChange |= splitLocation(LocNo, NewRegs, LIS);
}
return DidChange;
}
void LDVImpl::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs) {
bool DidChange = false;
for (UserValue *UV = lookupVirtReg(OldReg); UV; UV = UV->getNext())
DidChange |= UV->splitRegister(OldReg, NewRegs, *LIS);
if (!DidChange)
return;
// Map all of the new virtual registers.
UserValue *UV = lookupVirtReg(OldReg);
for (unsigned i = 0; i != NewRegs.size(); ++i)
mapVirtReg(NewRegs[i], UV);
}
void LiveDebugVariables::
splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs, LiveIntervals &LIS) {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->splitRegister(OldReg, NewRegs);
}
void
UserValue::rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI) {
// Iterate over locations in reverse makes it easier to handle coalescing.
for (unsigned i = locations.size(); i ; --i) {
unsigned LocNo = i-1;
MachineOperand &Loc = locations[LocNo];
// Only virtual registers are rewritten.
if (!Loc.isReg() || !Loc.getReg() ||
!TargetRegisterInfo::isVirtualRegister(Loc.getReg()))
continue;
unsigned VirtReg = Loc.getReg();
if (VRM.isAssignedReg(VirtReg) &&
TargetRegisterInfo::isPhysicalRegister(VRM.getPhys(VirtReg))) {
// This can create a %noreg operand in rare cases when the sub-register
// index is no longer available. That means the user value is in a
// non-existent sub-register, and %noreg is exactly what we want.
Loc.substPhysReg(VRM.getPhys(VirtReg), TRI);
} else if (VRM.getStackSlot(VirtReg) != VirtRegMap::NO_STACK_SLOT) {
// FIXME: Translate SubIdx to a stackslot offset.
Loc = MachineOperand::CreateFI(VRM.getStackSlot(VirtReg));
} else {
Loc.setReg(0);
Loc.setSubReg(0);
}
coalesceLocation(LocNo);
}
}
/// findInsertLocation - Find an iterator for inserting a DBG_VALUE
/// instruction.
static MachineBasicBlock::iterator
findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx,
LiveIntervals &LIS) {
SlotIndex Start = LIS.getMBBStartIdx(MBB);
Idx = Idx.getBaseIndex();
// Try to find an insert location by going backwards from Idx.
MachineInstr *MI;
while (!(MI = LIS.getInstructionFromIndex(Idx))) {
// We've reached the beginning of MBB.
if (Idx == Start) {
MachineBasicBlock::iterator I = MBB->SkipPHIsAndLabels(MBB->begin());
return I;
}
Idx = Idx.getPrevIndex();
}
// Don't insert anything after the first terminator, though.
return MI->isTerminator() ? MBB->getFirstTerminator() :
std::next(MachineBasicBlock::iterator(MI));
}
void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx,
unsigned LocNo,
LiveIntervals &LIS,
const TargetInstrInfo &TII) {
MachineBasicBlock::iterator I = findInsertLocation(MBB, Idx, LIS);
MachineOperand &Loc = locations[LocNo];
++NumInsertedDebugValues;
assert(cast<DILocalVariable>(Variable)
->isValidLocationForIntrinsic(getDebugLoc()) &&
"Expected inlined-at fields to agree");
if (Loc.isReg())
BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE),
IsIndirect, Loc.getReg(), offset, Variable, Expression);
else
BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE))
.addOperand(Loc)
.addImm(offset)
.addMetadata(Variable)
.addMetadata(Expression);
}
void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
const TargetInstrInfo &TII) {
MachineFunction::iterator MFEnd = VRM->getMachineFunction().end();
for (LocMap::const_iterator I = locInts.begin(); I.valid();) {
SlotIndex Start = I.start();
SlotIndex Stop = I.stop();
unsigned LocNo = I.value();
DEBUG(dbgs() << "\t[" << Start << ';' << Stop << "):" << LocNo);
MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start);
SlotIndex MBBEnd = LIS.getMBBEndIdx(MBB);
DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
insertDebugValue(MBB, Start, LocNo, LIS, TII);
// This interval may span multiple basic blocks.
// Insert a DBG_VALUE into each one.
while(Stop > MBBEnd) {
// Move to the next block.
Start = MBBEnd;
if (++MBB == MFEnd)
break;
MBBEnd = LIS.getMBBEndIdx(MBB);
DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
insertDebugValue(MBB, Start, LocNo, LIS, TII);
}
DEBUG(dbgs() << '\n');
if (MBB == MFEnd)
break;
++I;
}
}
void LDVImpl::emitDebugValues(VirtRegMap *VRM) {
DEBUG(dbgs() << "********** EMITTING LIVE DEBUG VARIABLES **********\n");
if (!MF)
return;
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
DEBUG(userValues[i]->print(dbgs(), TRI));
userValues[i]->rewriteLocations(*VRM, *TRI);
userValues[i]->emitDebugValues(VRM, *LIS, *TII);
}
EmitDone = true;
}
void LiveDebugVariables::emitDebugValues(VirtRegMap *VRM) {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->emitDebugValues(VRM);
}
bool LiveDebugVariables::doInitialization(Module &M) {
FunctionDIs = makeSubprogramMap(M);
return Pass::doInitialization(M);
}
#ifndef NDEBUG
void LiveDebugVariables::dump() {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->print(dbgs());
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveStackAnalysis.cpp | //===-- LiveStackAnalysis.cpp - Live Stack Slot Analysis ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the live stack slot analysis pass. It is analogous to
// live interval analysis except it's analyzing liveness of stack slots rather
// than registers.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <limits>
using namespace llvm;
#define DEBUG_TYPE "livestacks"
char LiveStacks::ID = 0;
INITIALIZE_PASS_BEGIN(LiveStacks, "livestacks",
"Live Stack Slot Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_END(LiveStacks, "livestacks",
"Live Stack Slot Analysis", false, false)
char &llvm::LiveStacksID = LiveStacks::ID;
void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addPreserved<SlotIndexes>();
AU.addRequiredTransitive<SlotIndexes>();
MachineFunctionPass::getAnalysisUsage(AU);
}
void LiveStacks::releaseMemory() {
// Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
VNInfoAllocator.Reset();
S2IMap.clear();
S2RCMap.clear();
}
bool LiveStacks::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getSubtarget().getRegisterInfo();
// FIXME: No analysis is being done right now. We are relying on the
// register allocators to provide the information.
return false;
}
LiveInterval &
LiveStacks::getOrCreateInterval(int Slot, const TargetRegisterClass *RC) {
assert(Slot >= 0 && "Spill slot indice must be >= 0");
SS2IntervalMap::iterator I = S2IMap.find(Slot);
if (I == S2IMap.end()) {
I = S2IMap.emplace(std::piecewise_construct, std::forward_as_tuple(Slot),
std::forward_as_tuple(
TargetRegisterInfo::index2StackSlot(Slot), 0.0F))
.first;
S2RCMap.insert(std::make_pair(Slot, RC));
} else {
// Use the largest common subclass register class.
const TargetRegisterClass *OldRC = S2RCMap[Slot];
S2RCMap[Slot] = TRI->getCommonSubClass(OldRC, RC);
}
return I->second;
}
/// print - Implement the dump method.
void LiveStacks::print(raw_ostream &OS, const Module*) const {
OS << "********** INTERVALS **********\n";
for (const_iterator I = begin(), E = end(); I != E; ++I) {
I->second.print(OS);
int Slot = I->first;
const TargetRegisterClass *RC = getIntervalRegClass(Slot);
if (RC)
OS << " [" << TRI->getRegClassName(RC) << "]\n";
else
OS << " [Unknown]\n";
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineLoopInfo.cpp | //===- MachineLoopInfo.cpp - Natural Loop Calculator ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineLoopInfo class that is used to identify natural
// loops and determine the loop depth of various nodes of the CFG. Note that
// the loops identified may actually be several natural loops that share the
// same header node... not just a single natural loop.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/Analysis/LoopInfoImpl.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
// Explicitly instantiate methods in LoopInfoImpl.h for MI-level Loops.
template class llvm::LoopBase<MachineBasicBlock, MachineLoop>;
template class llvm::LoopInfoBase<MachineBasicBlock, MachineLoop>;
char MachineLoopInfo::ID = 0;
INITIALIZE_PASS_BEGIN(MachineLoopInfo, "machine-loops",
"Machine Natural Loop Construction", true, true)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_END(MachineLoopInfo, "machine-loops",
"Machine Natural Loop Construction", true, true)
char &llvm::MachineLoopInfoID = MachineLoopInfo::ID;
bool MachineLoopInfo::runOnMachineFunction(MachineFunction &) {
releaseMemory();
LI.Analyze(getAnalysis<MachineDominatorTree>().getBase());
return false;
}
void MachineLoopInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
MachineBasicBlock *MachineLoop::getTopBlock() {
MachineBasicBlock *TopMBB = getHeader();
MachineFunction::iterator Begin = TopMBB->getParent()->begin();
if (TopMBB != Begin) {
MachineBasicBlock *PriorMBB = std::prev(MachineFunction::iterator(TopMBB));
while (contains(PriorMBB)) {
TopMBB = PriorMBB;
if (TopMBB == Begin) break;
PriorMBB = std::prev(MachineFunction::iterator(TopMBB));
}
}
return TopMBB;
}
MachineBasicBlock *MachineLoop::getBottomBlock() {
MachineBasicBlock *BotMBB = getHeader();
MachineFunction::iterator End = BotMBB->getParent()->end();
if (BotMBB != std::prev(End)) {
MachineBasicBlock *NextMBB = std::next(MachineFunction::iterator(BotMBB));
while (contains(NextMBB)) {
BotMBB = NextMBB;
if (BotMBB == std::next(MachineFunction::iterator(BotMBB))) break;
NextMBB = std::next(MachineFunction::iterator(BotMBB));
}
}
return BotMBB;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineLoop::dump() const {
print(dbgs());
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachinePostDominators.cpp | //===- MachinePostDominators.cpp -Machine Post Dominator Calculation ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements simple dominator construction algorithms for finding
// post dominators on machine functions.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachinePostDominators.h"
using namespace llvm;
char MachinePostDominatorTree::ID = 0;
//declare initializeMachinePostDominatorTreePass
INITIALIZE_PASS(MachinePostDominatorTree, "machinepostdomtree",
"MachinePostDominator Tree Construction", true, true)
MachinePostDominatorTree::MachinePostDominatorTree() : MachineFunctionPass(ID) {
initializeMachinePostDominatorTreePass(*PassRegistry::getPassRegistry());
DT = new DominatorTreeBase<MachineBasicBlock>(true); //true indicate
// postdominator
}
FunctionPass *
MachinePostDominatorTree::createMachinePostDominatorTreePass() {
return new MachinePostDominatorTree();
}
bool
MachinePostDominatorTree::runOnMachineFunction(MachineFunction &F) {
DT->recalculate(F);
return false;
}
MachinePostDominatorTree::~MachinePostDominatorTree() {
delete DT;
}
void
MachinePostDominatorTree::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
void
MachinePostDominatorTree::print(llvm::raw_ostream &OS, const Module *M) const {
DT->print(OS);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineFunctionPass.cpp | //===-- MachineFunctionPass.cpp -------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the definitions of the MachineFunctionPass members.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Analysis/IVUsers.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
using namespace llvm;
Pass *MachineFunctionPass::createPrinterPass(raw_ostream &O,
const std::string &Banner) const {
return createMachineFunctionPrinterPass(O, Banner);
}
bool MachineFunctionPass::runOnFunction(Function &F) {
// Do not codegen any 'available_externally' functions at all, they have
// definitions outside the translation unit.
if (F.hasAvailableExternallyLinkage())
return false;
MachineFunction &MF = getAnalysis<MachineFunctionAnalysis>().getMF();
return runOnMachineFunction(MF);
}
void MachineFunctionPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineFunctionAnalysis>();
AU.addPreserved<MachineFunctionAnalysis>();
// MachineFunctionPass preserves all LLVM IR passes, but there's no
// high-level way to express this. Instead, just list a bunch of
// passes explicitly. This does not include setPreservesCFG,
// because CodeGen overloads that to mean preserving the MachineBasicBlock
// CFG in addition to the LLVM IR CFG.
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<DominanceFrontier>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<IVUsers>();
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<MemoryDependenceAnalysis>();
AU.addPreserved<ScalarEvolution>();
AU.addPreserved<StackProtector>();
FunctionPass::getAnalysisUsage(AU);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/AllocationOrder.cpp | //===-- llvm/CodeGen/AllocationOrder.cpp - Allocation Order ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an allocation order for virtual registers.
//
// The preferred allocation order for a virtual register depends on allocation
// hints and target hooks. The AllocationOrder class encapsulates all of that.
//
//===----------------------------------------------------------------------===//
#include "AllocationOrder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
// Compare VirtRegMap::getRegAllocPref().
AllocationOrder::AllocationOrder(unsigned VirtReg,
const VirtRegMap &VRM,
const RegisterClassInfo &RegClassInfo)
: Pos(0) {
const MachineFunction &MF = VRM.getMachineFunction();
const TargetRegisterInfo *TRI = &VRM.getTargetRegInfo();
Order = RegClassInfo.getOrder(MF.getRegInfo().getRegClass(VirtReg));
TRI->getRegAllocationHints(VirtReg, Order, Hints, MF, &VRM);
rewind();
DEBUG({
if (!Hints.empty()) {
dbgs() << "hints:";
for (unsigned I = 0, E = Hints.size(); I != E; ++I)
dbgs() << ' ' << PrintReg(Hints[I], TRI);
dbgs() << '\n';
}
});
#ifndef NDEBUG
for (unsigned I = 0, E = Hints.size(); I != E; ++I)
assert(std::find(Order.begin(), Order.end(), Hints[I]) != Order.end() &&
"Target hint is outside allocation order.");
#endif
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/InterleavedAccessPass.cpp | //=----------------------- InterleavedAccessPass.cpp -----------------------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Interleaved Access pass, which identifies
// interleaved memory accesses and transforms into target specific intrinsics.
//
// An interleaved load reads data from memory into several vectors, with
// DE-interleaving the data on a factor. An interleaved store writes several
// vectors to memory with RE-interleaving the data on a factor.
//
// As interleaved accesses are hard to be identified in CodeGen (mainly because
// the VECTOR_SHUFFLE DAG node is quite different from the shufflevector IR),
// we identify and transform them to intrinsics in this pass. So the intrinsics
// can be easily matched into target specific instructions later in CodeGen.
//
// E.g. An interleaved load (Factor = 2):
// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6>
// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7>
//
// It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2
// intrinsic in ARM backend.
//
// E.g. An interleaved store (Factor = 3):
// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
// store <12 x i32> %i.vec, <12 x i32>* %ptr
//
// It could be transformed into a st3 intrinsic in AArch64 backend or a vst3
// intrinsic in ARM backend.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "interleaved-access"
static cl::opt<bool> LowerInterleavedAccesses(
"lower-interleaved-accesses",
cl::desc("Enable lowering interleaved accesses to intrinsics"),
cl::init(false), cl::Hidden);
static unsigned MaxFactor; // The maximum supported interleave factor.
namespace llvm {
static void initializeInterleavedAccessPass(PassRegistry &);
}
namespace {
class InterleavedAccess : public FunctionPass {
public:
static char ID;
InterleavedAccess(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM), TLI(nullptr) {
initializeInterleavedAccessPass(*PassRegistry::getPassRegistry());
}
StringRef getPassName() const override { return "Interleaved Access Pass"; }
bool runOnFunction(Function &F) override;
private:
const TargetMachine *TM;
const TargetLowering *TLI;
/// \brief Transform an interleaved load into target specific intrinsics.
bool lowerInterleavedLoad(LoadInst *LI,
SmallVector<Instruction *, 32> &DeadInsts);
/// \brief Transform an interleaved store into target specific intrinsics.
bool lowerInterleavedStore(StoreInst *SI,
SmallVector<Instruction *, 32> &DeadInsts);
};
} // end anonymous namespace.
char InterleavedAccess::ID = 0;
INITIALIZE_TM_PASS(InterleavedAccess, "interleaved-access",
"Lower interleaved memory accesses to target specific intrinsics",
false, false)
FunctionPass *llvm::createInterleavedAccessPass(const TargetMachine *TM) {
return new InterleavedAccess(TM);
}
/// \brief Check if the mask is a DE-interleave mask of the given factor
/// \p Factor like:
/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
unsigned &Index) {
// Check all potential start indices from 0 to (Factor - 1).
for (Index = 0; Index < Factor; Index++) {
unsigned i = 0;
// Check that elements are in ascending order by Factor. Ignore undef
// elements.
for (; i < Mask.size(); i++)
if (Mask[i] >= 0 && static_cast<unsigned>(Mask[i]) != Index + i * Factor)
break;
if (i == Mask.size())
return true;
}
return false;
}
/// \brief Check if the mask is a DE-interleave mask for an interleaved load.
///
/// E.g. DE-interleave masks (Factor = 2) could be:
/// <0, 2, 4, 6> (mask of index 0 to extract even elements)
/// <1, 3, 5, 7> (mask of index 1 to extract odd elements)
static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
unsigned &Index) {
if (Mask.size() < 2)
return false;
// Check potential Factors.
for (Factor = 2; Factor <= MaxFactor; Factor++)
if (isDeInterleaveMaskOfFactor(Mask, Factor, Index))
return true;
return false;
}
/// \brief Check if the mask is RE-interleave mask for an interleaved store.
///
/// I.e. <0, NumSubElts, ... , NumSubElts*(Factor - 1), 1, NumSubElts + 1, ...>
///
/// E.g. The RE-interleave mask (Factor = 2) could be:
/// <0, 4, 1, 5, 2, 6, 3, 7>
static bool isReInterleaveMask(ArrayRef<int> Mask, unsigned &Factor) {
unsigned NumElts = Mask.size();
if (NumElts < 4)
return false;
// Check potential Factors.
for (Factor = 2; Factor <= MaxFactor; Factor++) {
if (NumElts % Factor)
continue;
unsigned NumSubElts = NumElts / Factor;
if (!isPowerOf2_32(NumSubElts))
continue;
// Check whether each element matchs the RE-interleaved rule. Ignore undef
// elements.
unsigned i = 0;
for (; i < NumElts; i++)
if (Mask[i] >= 0 &&
static_cast<unsigned>(Mask[i]) !=
(i % Factor) * NumSubElts + i / Factor)
break;
// Find a RE-interleaved mask of current factor.
if (i == NumElts)
return true;
}
return false;
}
bool InterleavedAccess::lowerInterleavedLoad(
LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts) {
if (!LI->isSimple())
return false;
SmallVector<ShuffleVectorInst *, 4> Shuffles;
// Check if all users of this load are shufflevectors.
for (auto UI = LI->user_begin(), E = LI->user_end(); UI != E; UI++) {
ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(*UI);
if (!SVI || !isa<UndefValue>(SVI->getOperand(1)))
return false;
Shuffles.push_back(SVI);
}
if (Shuffles.empty())
return false;
unsigned Factor, Index;
// Check if the first shufflevector is DE-interleave shuffle.
if (!isDeInterleaveMask(Shuffles[0]->getShuffleMask(), Factor, Index))
return false;
// Holds the corresponding index for each DE-interleave shuffle.
SmallVector<unsigned, 4> Indices;
Indices.push_back(Index);
Type *VecTy = Shuffles[0]->getType();
// Check if other shufflevectors are also DE-interleaved of the same type
// and factor as the first shufflevector.
for (unsigned i = 1; i < Shuffles.size(); i++) {
if (Shuffles[i]->getType() != VecTy)
return false;
if (!isDeInterleaveMaskOfFactor(Shuffles[i]->getShuffleMask(), Factor,
Index))
return false;
Indices.push_back(Index);
}
DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n");
// Try to create target specific intrinsics to replace the load and shuffles.
if (!TLI->lowerInterleavedLoad(LI, Shuffles, Indices, Factor))
return false;
for (auto SVI : Shuffles)
DeadInsts.push_back(SVI);
DeadInsts.push_back(LI);
return true;
}
bool InterleavedAccess::lowerInterleavedStore(
StoreInst *SI, SmallVector<Instruction *, 32> &DeadInsts) {
if (!SI->isSimple())
return false;
ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(SI->getValueOperand());
if (!SVI || !SVI->hasOneUse())
return false;
// Check if the shufflevector is RE-interleave shuffle.
unsigned Factor;
if (!isReInterleaveMask(SVI->getShuffleMask(), Factor))
return false;
DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n");
// Try to create target specific intrinsics to replace the store and shuffle.
if (!TLI->lowerInterleavedStore(SI, SVI, Factor))
return false;
// Already have a new target specific interleaved store. Erase the old store.
DeadInsts.push_back(SI);
DeadInsts.push_back(SVI);
return true;
}
bool InterleavedAccess::runOnFunction(Function &F) {
if (!TM || !LowerInterleavedAccesses)
return false;
DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
TLI = TM->getSubtargetImpl(F)->getTargetLowering();
MaxFactor = TLI->getMaxSupportedInterleaveFactor();
// Holds dead instructions that will be erased later.
SmallVector<Instruction *, 32> DeadInsts;
bool Changed = false;
for (auto &I : inst_range(F)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I))
Changed |= lowerInterleavedLoad(LI, DeadInsts);
if (StoreInst *SI = dyn_cast<StoreInst>(&I))
Changed |= lowerInterleavedStore(SI, DeadInsts);
}
for (auto I : DeadInsts)
I->eraseFromParent();
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineSink.cpp | //===-- MachineSink.cpp - Sinking for machine instructions ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass moves instructions into successor blocks when possible, so that
// they aren't executed on paths where their results aren't needed.
//
// This pass is not intended to be a replacement or a complete alternative
// for an LLVM-IR-level sinking pass. It is only designed to sink simple
// constructs that are not exposed before lowering and instruction selection.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "machine-sink"
static cl::opt<bool>
SplitEdges("machine-sink-split",
cl::desc("Split critical edges during machine sinking"),
cl::init(true), cl::Hidden);
static cl::opt<bool>
UseBlockFreqInfo("machine-sink-bfi",
cl::desc("Use block frequency info to find successors to sink"),
cl::init(true), cl::Hidden);
STATISTIC(NumSunk, "Number of machine instructions sunk");
STATISTIC(NumSplit, "Number of critical edges split");
STATISTIC(NumCoalesces, "Number of copies coalesced");
namespace {
class MachineSinking : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI; // Machine register information
MachineDominatorTree *DT; // Machine dominator tree
MachinePostDominatorTree *PDT; // Machine post dominator tree
MachineLoopInfo *LI;
const MachineBlockFrequencyInfo *MBFI;
AliasAnalysis *AA;
// Remember which edges have been considered for breaking.
SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
CEBCandidates;
// Remember which edges we are about to split.
// This is different from CEBCandidates since those edges
// will be split.
SetVector<std::pair<MachineBasicBlock*,MachineBasicBlock*> > ToSplit;
SparseBitVector<> RegsToClearKillFlags;
typedef std::map<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>
AllSuccsCache;
public:
static char ID; // Pass identification
MachineSinking() : MachineFunctionPass(ID) {
initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachinePostDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
AU.addPreserved<MachinePostDominatorTree>();
AU.addPreserved<MachineLoopInfo>();
if (UseBlockFreqInfo)
AU.addRequired<MachineBlockFrequencyInfo>();
}
void releaseMemory() override {
CEBCandidates.clear();
}
private:
bool ProcessBlock(MachineBasicBlock &MBB);
bool isWorthBreakingCriticalEdge(MachineInstr *MI,
MachineBasicBlock *From,
MachineBasicBlock *To);
/// \brief Postpone the splitting of the given critical
/// edge (\p From, \p To).
///
/// We do not split the edges on the fly. Indeed, this invalidates
/// the dominance information and thus triggers a lot of updates
/// of that information underneath.
/// Instead, we postpone all the splits after each iteration of
/// the main loop. That way, the information is at least valid
/// for the lifetime of an iteration.
///
/// \return True if the edge is marked as toSplit, false otherwise.
/// False can be returned if, for instance, this is not profitable.
bool PostponeSplitCriticalEdge(MachineInstr *MI,
MachineBasicBlock *From,
MachineBasicBlock *To,
bool BreakPHIEdge);
bool SinkInstruction(MachineInstr *MI, bool &SawStore,
AllSuccsCache &AllSuccessors);
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
MachineBasicBlock *DefMBB,
bool &BreakPHIEdge, bool &LocalUse) const;
MachineBasicBlock *FindSuccToSinkTo(MachineInstr *MI, MachineBasicBlock *MBB,
bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
bool isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
MachineBasicBlock *MBB,
MachineBasicBlock *SuccToSinkTo,
AllSuccsCache &AllSuccessors);
bool PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB);
SmallVector<MachineBasicBlock *, 4> &
GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
AllSuccsCache &AllSuccessors) const;
};
} // end anonymous namespace
char MachineSinking::ID = 0;
char &llvm::MachineSinkingID = MachineSinking::ID;
INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
"Machine code sinking", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MachineSinking, "machine-sink",
"Machine code sinking", false, false)
bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB) {
if (!MI->isCopy())
return false;
unsigned SrcReg = MI->getOperand(1).getReg();
unsigned DstReg = MI->getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
!TargetRegisterInfo::isVirtualRegister(DstReg) ||
!MRI->hasOneNonDBGUse(SrcReg))
return false;
const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
if (SRC != DRC)
return false;
MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
if (DefMI->isCopyLike())
return false;
DEBUG(dbgs() << "Coalescing: " << *DefMI);
DEBUG(dbgs() << "*** to: " << *MI);
MRI->replaceRegWith(DstReg, SrcReg);
MI->eraseFromParent();
// Conservatively, clear any kill flags, since it's possible that they are no
// longer correct.
MRI->clearKillFlags(SrcReg);
++NumCoalesces;
return true;
}
/// AllUsesDominatedByBlock - Return true if all uses of the specified register
/// occur in blocks dominated by the specified block. If any use is in the
/// definition block, then return false since it is never legal to move def
/// after uses.
bool
MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
MachineBasicBlock *MBB,
MachineBasicBlock *DefMBB,
bool &BreakPHIEdge,
bool &LocalUse) const {
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Only makes sense for vregs");
// Ignore debug uses because debug info doesn't affect the code.
if (MRI->use_nodbg_empty(Reg))
return true;
// BreakPHIEdge is true if all the uses are in the successor MBB being sunken
// into and they are all PHI nodes. In this case, machine-sink must break
// the critical edge first. e.g.
//
// BB#1: derived from LLVM BB %bb4.preheader
// Predecessors according to CFG: BB#0
// ...
// %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
// ...
// JE_4 <BB#37>, %EFLAGS<imp-use>
// Successors according to CFG: BB#37 BB#2
//
// BB#2: derived from LLVM BB %bb.nph
// Predecessors according to CFG: BB#0 BB#1
// %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
BreakPHIEdge = true;
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
MachineInstr *UseInst = MO.getParent();
unsigned OpNo = &MO - &UseInst->getOperand(0);
MachineBasicBlock *UseBlock = UseInst->getParent();
if (!(UseBlock == MBB && UseInst->isPHI() &&
UseInst->getOperand(OpNo+1).getMBB() == DefMBB)) {
BreakPHIEdge = false;
break;
}
}
if (BreakPHIEdge)
return true;
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
// Determine the block of the use.
MachineInstr *UseInst = MO.getParent();
unsigned OpNo = &MO - &UseInst->getOperand(0);
MachineBasicBlock *UseBlock = UseInst->getParent();
if (UseInst->isPHI()) {
// PHI nodes use the operand in the predecessor block, not the block with
// the PHI.
UseBlock = UseInst->getOperand(OpNo+1).getMBB();
} else if (UseBlock == DefMBB) {
LocalUse = true;
return false;
}
// Check that it dominates.
if (!DT->dominates(MBB, UseBlock))
return false;
}
return true;
}
bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
DEBUG(dbgs() << "******** Machine Sinking ********\n");
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
DT = &getAnalysis<MachineDominatorTree>();
PDT = &getAnalysis<MachinePostDominatorTree>();
LI = &getAnalysis<MachineLoopInfo>();
MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr;
AA = &getAnalysis<AliasAnalysis>();
bool EverMadeChange = false;
while (1) {
bool MadeChange = false;
// Process all basic blocks.
CEBCandidates.clear();
ToSplit.clear();
for (auto &MBB: MF)
MadeChange |= ProcessBlock(MBB);
// If we have anything we marked as toSplit, split it now.
for (auto &Pair : ToSplit) {
auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, this);
if (NewSucc != nullptr) {
DEBUG(dbgs() << " *** Splitting critical edge:"
" BB#" << Pair.first->getNumber()
<< " -- BB#" << NewSucc->getNumber()
<< " -- BB#" << Pair.second->getNumber() << '\n');
MadeChange = true;
++NumSplit;
} else
DEBUG(dbgs() << " *** Not legal to break critical edge\n");
}
// If this iteration over the code changed anything, keep iterating.
if (!MadeChange) break;
EverMadeChange = true;
}
// Now clear any kill flags for recorded registers.
for (auto I : RegsToClearKillFlags)
MRI->clearKillFlags(I);
RegsToClearKillFlags.clear();
return EverMadeChange;
}
bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
// Can't sink anything out of a block that has less than two successors.
if (MBB.succ_size() <= 1 || MBB.empty()) return false;
// Don't bother sinking code out of unreachable blocks. In addition to being
// unprofitable, it can also lead to infinite looping, because in an
// unreachable loop there may be nowhere to stop.
if (!DT->isReachableFromEntry(&MBB)) return false;
bool MadeChange = false;
// Cache all successors, sorted by frequency info and loop depth.
AllSuccsCache AllSuccessors;
// Walk the basic block bottom-up. Remember if we saw a store.
MachineBasicBlock::iterator I = MBB.end();
--I;
bool ProcessedBegin, SawStore = false;
do {
MachineInstr *MI = I; // The instruction to sink.
// Predecrement I (if it's not begin) so that it isn't invalidated by
// sinking.
ProcessedBegin = I == MBB.begin();
if (!ProcessedBegin)
--I;
if (MI->isDebugValue())
continue;
bool Joined = PerformTrivialForwardCoalescing(MI, &MBB);
if (Joined) {
MadeChange = true;
continue;
}
if (SinkInstruction(MI, SawStore, AllSuccessors))
++NumSunk, MadeChange = true;
// If we just processed the first instruction in the block, we're done.
} while (!ProcessedBegin);
return MadeChange;
}
bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
MachineBasicBlock *From,
MachineBasicBlock *To) {
// FIXME: Need much better heuristics.
// If the pass has already considered breaking this edge (during this pass
// through the function), then let's go ahead and break it. This means
// sinking multiple "cheap" instructions into the same block.
if (!CEBCandidates.insert(std::make_pair(From, To)).second)
return true;
if (!MI->isCopy() && !TII->isAsCheapAsAMove(MI))
return true;
// MI is cheap, we probably don't want to break the critical edge for it.
// However, if this would allow some definitions of its source operands
// to be sunk then it's probably worth it.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (Reg == 0)
continue;
// We don't move live definitions of physical registers,
// so sinking their uses won't enable any opportunities.
if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
// If this instruction is the only user of a virtual register,
// check if breaking the edge will enable sinking
// both this instruction and the defining instruction.
if (MRI->hasOneNonDBGUse(Reg)) {
// If the definition resides in same MBB,
// claim it's likely we can sink these together.
// If definition resides elsewhere, we aren't
// blocking it from being sunk so don't break the edge.
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (DefMI->getParent() == MI->getParent())
return true;
}
}
return false;
}
bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr *MI,
MachineBasicBlock *FromBB,
MachineBasicBlock *ToBB,
bool BreakPHIEdge) {
if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
return false;
// Avoid breaking back edge. From == To means backedge for single BB loop.
if (!SplitEdges || FromBB == ToBB)
return false;
// Check for backedges of more "complex" loops.
if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
LI->isLoopHeader(ToBB))
return false;
// It's not always legal to break critical edges and sink the computation
// to the edge.
//
// BB#1:
// v1024
// Beq BB#3
// <fallthrough>
// BB#2:
// ... no uses of v1024
// <fallthrough>
// BB#3:
// ...
// = v1024
//
// If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
//
// BB#1:
// ...
// Bne BB#2
// BB#4:
// v1024 =
// B BB#3
// BB#2:
// ... no uses of v1024
// <fallthrough>
// BB#3:
// ...
// = v1024
//
// This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
// flow. We need to ensure the new basic block where the computation is
// sunk to dominates all the uses.
// It's only legal to break critical edge and sink the computation to the
// new block if all the predecessors of "To", except for "From", are
// not dominated by "From". Given SSA property, this means these
// predecessors are dominated by "To".
//
// There is no need to do this check if all the uses are PHI nodes. PHI
// sources are only defined on the specific predecessor edges.
if (!BreakPHIEdge) {
for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
E = ToBB->pred_end(); PI != E; ++PI) {
if (*PI == FromBB)
continue;
if (!DT->dominates(ToBB, *PI))
return false;
}
}
ToSplit.insert(std::make_pair(FromBB, ToBB));
return true;
}
static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
}
/// collectDebgValues - Scan instructions following MI and collect any
/// matching DBG_VALUEs.
static void collectDebugValues(MachineInstr *MI,
SmallVectorImpl<MachineInstr *> &DbgValues) {
DbgValues.clear();
if (!MI->getOperand(0).isReg())
return;
MachineBasicBlock::iterator DI = MI; ++DI;
for (MachineBasicBlock::iterator DE = MI->getParent()->end();
DI != DE; ++DI) {
if (!DI->isDebugValue())
return;
if (DI->getOperand(0).isReg() &&
DI->getOperand(0).getReg() == MI->getOperand(0).getReg())
DbgValues.push_back(DI);
}
}
/// isProfitableToSinkTo - Return true if it is profitable to sink MI.
bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
MachineBasicBlock *MBB,
MachineBasicBlock *SuccToSinkTo,
AllSuccsCache &AllSuccessors) {
assert (MI && "Invalid MachineInstr!");
assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
if (MBB == SuccToSinkTo)
return false;
// It is profitable if SuccToSinkTo does not post dominate current block.
if (!PDT->dominates(SuccToSinkTo, MBB))
return true;
// It is profitable to sink an instruction from a deeper loop to a shallower
// loop, even if the latter post-dominates the former (PR21115).
if (LI->getLoopDepth(MBB) > LI->getLoopDepth(SuccToSinkTo))
return true;
// Check if only use in post dominated block is PHI instruction.
bool NonPHIUse = false;
for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
MachineBasicBlock *UseBlock = UseInst.getParent();
if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
NonPHIUse = true;
}
if (!NonPHIUse)
return true;
// If SuccToSinkTo post dominates then also it may be profitable if MI
// can further profitably sinked into another block in next round.
bool BreakPHIEdge = false;
// FIXME - If finding successor is compile time expensive then cache results.
if (MachineBasicBlock *MBB2 =
FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
// If SuccToSinkTo is final destination and it is a post dominator of current
// block then it is not profitable to sink MI into SuccToSinkTo block.
return false;
}
/// Get the sorted sequence of successors for this MachineBasicBlock, possibly
/// computing it if it was not already cached.
SmallVector<MachineBasicBlock *, 4> &
MachineSinking::GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
AllSuccsCache &AllSuccessors) const {
// Do we have the sorted successors in cache ?
auto Succs = AllSuccessors.find(MBB);
if (Succs != AllSuccessors.end())
return Succs->second;
SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->succ_begin(),
MBB->succ_end());
// Handle cases where sinking can happen but where the sink point isn't a
// successor. For example:
//
// x = computation
// if () {} else {}
// use x
//
const std::vector<MachineDomTreeNode *> &Children =
DT->getNode(MBB)->getChildren();
for (const auto &DTChild : Children)
// DomTree children of MBB that have MBB as immediate dominator are added.
if (DTChild->getIDom()->getBlock() == MI->getParent() &&
// Skip MBBs already added to the AllSuccs vector above.
!MBB->isSuccessor(DTChild->getBlock()))
AllSuccs.push_back(DTChild->getBlock());
// Sort Successors according to their loop depth or block frequency info.
std::stable_sort(
AllSuccs.begin(), AllSuccs.end(),
[this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
return HasBlockFreq ? LHSFreq < RHSFreq
: LI->getLoopDepth(L) < LI->getLoopDepth(R);
});
auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
return it.first->second;
}
/// FindSuccToSinkTo - Find a successor to sink this instruction to.
MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
MachineBasicBlock *MBB,
bool &BreakPHIEdge,
AllSuccsCache &AllSuccessors) {
assert (MI && "Invalid MachineInstr!");
assert (MBB && "Invalid MachineBasicBlock!");
// Loop over all the operands of the specified instruction. If there is
// anything we can't handle, bail out.
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
MachineBasicBlock *SuccToSinkTo = nullptr;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue; // Ignore non-register operands.
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
return nullptr;
} else if (!MO.isDead()) {
// A def that isn't dead. We can't move it.
return nullptr;
}
} else {
// Virtual register uses are always safe to sink.
if (MO.isUse()) continue;
// If it's not safe to move defs of the register class, then abort.
if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
return nullptr;
// Virtual register defs can only be sunk if all their uses are in blocks
// dominated by one of the successors.
if (SuccToSinkTo) {
// If a previous operand picked a block to sink to, then this operand
// must be sinkable to the same block.
bool LocalUse = false;
if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
BreakPHIEdge, LocalUse))
return nullptr;
continue;
}
// Otherwise, we should look at all the successors and decide which one
// we should sink to. If we have reliable block frequency information
// (frequency != 0) available, give successors with smaller frequencies
// higher priority, otherwise prioritize smaller loop depths.
for (MachineBasicBlock *SuccBlock :
GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
bool LocalUse = false;
if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
BreakPHIEdge, LocalUse)) {
SuccToSinkTo = SuccBlock;
break;
}
if (LocalUse)
// Def is used locally, it's never safe to move this def.
return nullptr;
}
// If we couldn't find a block to sink to, ignore this instruction.
if (!SuccToSinkTo)
return nullptr;
if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
return nullptr;
}
}
// It is not possible to sink an instruction into its own block. This can
// happen with loops.
if (MBB == SuccToSinkTo)
return nullptr;
// It's not safe to sink instructions to EH landing pad. Control flow into
// landing pad is implicitly defined.
if (SuccToSinkTo && SuccToSinkTo->isLandingPad())
return nullptr;
return SuccToSinkTo;
}
/// SinkInstruction - Determine whether it is safe to sink the specified machine
/// instruction out of its current block into a successor.
bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore,
AllSuccsCache &AllSuccessors) {
// Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
// be close to the source to make it easier to coalesce.
if (AvoidsSinking(MI, MRI))
return false;
// Check if it's safe to move the instruction.
if (!MI->isSafeToMove(AA, SawStore))
return false;
// Convergent operations may only be moved to control equivalent locations.
if (MI->isConvergent())
return false;
// FIXME: This should include support for sinking instructions within the
// block they are currently in to shorten the live ranges. We often get
// instructions sunk into the top of a large block, but it would be better to
// also sink them down before their first use in the block. This xform has to
// be careful not to *increase* register pressure though, e.g. sinking
// "x = y + z" down if it kills y and z would increase the live ranges of y
// and z and only shrink the live range of x.
bool BreakPHIEdge = false;
MachineBasicBlock *ParentBlock = MI->getParent();
MachineBasicBlock *SuccToSinkTo =
FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
// If there are no outputs, it must have side-effects.
if (!SuccToSinkTo)
return false;
// If the instruction to move defines a dead physical register which is live
// when leaving the basic block, don't move it because it could turn into a
// "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI->getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
if (SuccToSinkTo->isLiveIn(Reg))
return false;
}
DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
// If the block has multiple predecessors, this is a critical edge.
// Decide if we can sink along it or need to break the edge.
if (SuccToSinkTo->pred_size() > 1) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
bool TryBreak = false;
bool store = true;
if (!MI->isSafeToMove(AA, store)) {
DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
TryBreak = true;
}
// We don't want to sink across a critical edge if we don't dominate the
// successor. We could be introducing calculations to new code paths.
if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
TryBreak = true;
}
// Don't sink instructions into a loop.
if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
DEBUG(dbgs() << " *** NOTE: Loop header found\n");
TryBreak = true;
}
// Otherwise we are OK with sinking along a critical edge.
if (!TryBreak)
DEBUG(dbgs() << "Sinking along critical edge.\n");
else {
// Mark this edge as to be split.
// If the edge can actually be split, the next iteration of the main loop
// will sink MI in the newly created block.
bool Status =
PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
if (!Status)
DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
"break critical edge\n");
// The instruction will not be sunk this time.
return false;
}
}
if (BreakPHIEdge) {
// BreakPHIEdge is true if all the uses are in the successor MBB being
// sunken into and they are all PHI nodes. In this case, machine-sink must
// break the critical edge first.
bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
SuccToSinkTo, BreakPHIEdge);
if (!Status)
DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
"break critical edge\n");
// The instruction will not be sunk this time.
return false;
}
// Determine where to insert into. Skip phi nodes.
MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
++InsertPos;
// collect matching debug values.
SmallVector<MachineInstr *, 2> DbgValuesToSink;
collectDebugValues(MI, DbgValuesToSink);
// Move the instruction.
SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
++MachineBasicBlock::iterator(MI));
// Move debug values.
for (SmallVectorImpl<MachineInstr *>::iterator DBI = DbgValuesToSink.begin(),
DBE = DbgValuesToSink.end(); DBI != DBE; ++DBI) {
MachineInstr *DbgMI = *DBI;
SuccToSinkTo->splice(InsertPos, ParentBlock, DbgMI,
++MachineBasicBlock::iterator(DbgMI));
}
// Conservatively, clear any kill flags, since it's possible that they are no
// longer correct.
// Note that we have to clear the kill flags for any register this instruction
// uses as we may sink over another instruction which currently kills the
// used registers.
for (MachineOperand &MO : MI->operands()) {
if (MO.isReg() && MO.isUse())
RegsToClearKillFlags.set(MO.getReg()); // Remember to clear kill flags.
}
return true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/PHIElimination.cpp | //===-- PhiElimination.cpp - Eliminate PHI nodes by inserting copies ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass eliminates machine instruction PHI nodes by inserting copy
// instructions. This destroys SSA information, but is the desired input for
// some register allocators.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "PHIEliminationUtils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "phielim"
static cl::opt<bool>
DisableEdgeSplitting("disable-phi-elim-edge-splitting", cl::init(false),
cl::Hidden, cl::desc("Disable critical edge splitting "
"during PHI elimination"));
static cl::opt<bool>
SplitAllCriticalEdges("phi-elim-split-all-critical-edges", cl::init(false),
cl::Hidden, cl::desc("Split all critical edges during "
"PHI elimination"));
static cl::opt<bool> NoPhiElimLiveOutEarlyExit(
"no-phi-elim-live-out-early-exit", cl::init(false), cl::Hidden,
cl::desc("Do not use an early exit if isLiveOutPastPHIs returns true."));
namespace {
class PHIElimination : public MachineFunctionPass {
MachineRegisterInfo *MRI; // Machine register information
LiveVariables *LV;
LiveIntervals *LIS;
public:
static char ID; // Pass identification, replacement for typeid
PHIElimination() : MachineFunctionPass(ID) {
initializePHIEliminationPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &Fn) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
/// EliminatePHINodes - Eliminate phi nodes by inserting copy instructions
/// in predecessor basic blocks.
///
bool EliminatePHINodes(MachineFunction &MF, MachineBasicBlock &MBB);
void LowerPHINode(MachineBasicBlock &MBB,
MachineBasicBlock::iterator LastPHIIt);
/// analyzePHINodes - Gather information about the PHI nodes in
/// here. In particular, we want to map the number of uses of a virtual
/// register which is used in a PHI node. We map that to the BB the
/// vreg is coming from. This is used later to determine when the vreg
/// is killed in the BB.
///
void analyzePHINodes(const MachineFunction& Fn);
/// Split critical edges where necessary for good coalescer performance.
bool SplitPHIEdges(MachineFunction &MF, MachineBasicBlock &MBB,
MachineLoopInfo *MLI);
// These functions are temporary abstractions around LiveVariables and
// LiveIntervals, so they can go away when LiveVariables does.
bool isLiveIn(unsigned Reg, const MachineBasicBlock *MBB);
bool isLiveOutPastPHIs(unsigned Reg, const MachineBasicBlock *MBB);
typedef std::pair<unsigned, unsigned> BBVRegPair;
typedef DenseMap<BBVRegPair, unsigned> VRegPHIUse;
VRegPHIUse VRegPHIUseCount;
// Defs of PHI sources which are implicit_def.
SmallPtrSet<MachineInstr*, 4> ImpDefs;
// Map reusable lowered PHI node -> incoming join register.
typedef DenseMap<MachineInstr*, unsigned,
MachineInstrExpressionTrait> LoweredPHIMap;
LoweredPHIMap LoweredPHIs;
};
}
STATISTIC(NumLowered, "Number of phis lowered");
STATISTIC(NumCriticalEdgesSplit, "Number of critical edges split");
STATISTIC(NumReused, "Number of reused lowered phis");
char PHIElimination::ID = 0;
char& llvm::PHIEliminationID = PHIElimination::ID;
INITIALIZE_PASS_BEGIN(PHIElimination, "phi-node-elimination",
"Eliminate PHI nodes for register allocation",
false, false)
INITIALIZE_PASS_DEPENDENCY(LiveVariables)
INITIALIZE_PASS_END(PHIElimination, "phi-node-elimination",
"Eliminate PHI nodes for register allocation", false, false)
void PHIElimination::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<LiveVariables>();
AU.addPreserved<SlotIndexes>();
AU.addPreserved<LiveIntervals>();
AU.addPreserved<MachineDominatorTree>();
AU.addPreserved<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool PHIElimination::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
LV = getAnalysisIfAvailable<LiveVariables>();
LIS = getAnalysisIfAvailable<LiveIntervals>();
bool Changed = false;
// This pass takes the function out of SSA form.
MRI->leaveSSA();
// Split critical edges to help the coalescer. This does not yet support
// updating LiveIntervals, so we disable it.
if (!DisableEdgeSplitting && (LV || LIS)) {
MachineLoopInfo *MLI = getAnalysisIfAvailable<MachineLoopInfo>();
for (auto &MBB : MF)
Changed |= SplitPHIEdges(MF, MBB, MLI);
}
// Populate VRegPHIUseCount
analyzePHINodes(MF);
// Eliminate PHI instructions by inserting copies into predecessor blocks.
for (auto &MBB : MF)
Changed |= EliminatePHINodes(MF, MBB);
// Remove dead IMPLICIT_DEF instructions.
for (MachineInstr *DefMI : ImpDefs) {
unsigned DefReg = DefMI->getOperand(0).getReg();
if (MRI->use_nodbg_empty(DefReg)) {
if (LIS)
LIS->RemoveMachineInstrFromMaps(DefMI);
DefMI->eraseFromParent();
}
}
// Clean up the lowered PHI instructions.
for (LoweredPHIMap::iterator I = LoweredPHIs.begin(), E = LoweredPHIs.end();
I != E; ++I) {
if (LIS)
LIS->RemoveMachineInstrFromMaps(I->first);
MF.DeleteMachineInstr(I->first);
}
LoweredPHIs.clear();
ImpDefs.clear();
VRegPHIUseCount.clear();
return Changed;
}
/// EliminatePHINodes - Eliminate phi nodes by inserting copy instructions in
/// predecessor basic blocks.
///
bool PHIElimination::EliminatePHINodes(MachineFunction &MF,
MachineBasicBlock &MBB) {
if (MBB.empty() || !MBB.front().isPHI())
return false; // Quick exit for basic blocks without PHIs.
// Get an iterator to the first instruction after the last PHI node (this may
// also be the end of the basic block).
MachineBasicBlock::iterator LastPHIIt =
std::prev(MBB.SkipPHIsAndLabels(MBB.begin()));
while (MBB.front().isPHI())
LowerPHINode(MBB, LastPHIIt);
return true;
}
/// isImplicitlyDefined - Return true if all defs of VirtReg are implicit-defs.
/// This includes registers with no defs.
static bool isImplicitlyDefined(unsigned VirtReg,
const MachineRegisterInfo *MRI) {
for (MachineInstr &DI : MRI->def_instructions(VirtReg))
if (!DI.isImplicitDef())
return false;
return true;
}
/// isSourceDefinedByImplicitDef - Return true if all sources of the phi node
/// are implicit_def's.
static bool isSourceDefinedByImplicitDef(const MachineInstr *MPhi,
const MachineRegisterInfo *MRI) {
for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2)
if (!isImplicitlyDefined(MPhi->getOperand(i).getReg(), MRI))
return false;
return true;
}
/// LowerPHINode - Lower the PHI node at the top of the specified block,
///
void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
MachineBasicBlock::iterator LastPHIIt) {
++NumLowered;
MachineBasicBlock::iterator AfterPHIsIt = std::next(LastPHIIt);
// Unlink the PHI node from the basic block, but don't delete the PHI yet.
MachineInstr *MPhi = MBB.remove(MBB.begin());
unsigned NumSrcs = (MPhi->getNumOperands() - 1) / 2;
unsigned DestReg = MPhi->getOperand(0).getReg();
assert(MPhi->getOperand(0).getSubReg() == 0 && "Can't handle sub-reg PHIs");
bool isDead = MPhi->getOperand(0).isDead();
// Create a new register for the incoming PHI arguments.
MachineFunction &MF = *MBB.getParent();
unsigned IncomingReg = 0;
bool reusedIncoming = false; // Is IncomingReg reused from an earlier PHI?
// Insert a register to register copy at the top of the current block (but
// after any remaining phi nodes) which copies the new incoming register
// into the phi node destination.
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
if (isSourceDefinedByImplicitDef(MPhi, MRI))
// If all sources of a PHI node are implicit_def, just emit an
// implicit_def instead of a copy.
BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
else {
// Can we reuse an earlier PHI node? This only happens for critical edges,
// typically those created by tail duplication.
unsigned &entry = LoweredPHIs[MPhi];
if (entry) {
// An identical PHI node was already lowered. Reuse the incoming register.
IncomingReg = entry;
reusedIncoming = true;
++NumReused;
DEBUG(dbgs() << "Reusing " << PrintReg(IncomingReg) << " for " << *MPhi);
} else {
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
}
BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
TII->get(TargetOpcode::COPY), DestReg)
.addReg(IncomingReg);
}
// Update live variable information if there is any.
if (LV) {
MachineInstr *PHICopy = std::prev(AfterPHIsIt);
if (IncomingReg) {
LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg);
// Increment use count of the newly created virtual register.
LV->setPHIJoin(IncomingReg);
// When we are reusing the incoming register, it may already have been
// killed in this block. The old kill will also have been inserted at
// AfterPHIsIt, so it appears before the current PHICopy.
if (reusedIncoming)
if (MachineInstr *OldKill = VI.findKill(&MBB)) {
DEBUG(dbgs() << "Remove old kill from " << *OldKill);
LV->removeVirtualRegisterKilled(IncomingReg, OldKill);
DEBUG(MBB.dump());
}
// Add information to LiveVariables to know that the incoming value is
// killed. Note that because the value is defined in several places (once
// each for each incoming block), the "def" block and instruction fields
// for the VarInfo is not filled in.
LV->addVirtualRegisterKilled(IncomingReg, PHICopy);
}
// Since we are going to be deleting the PHI node, if it is the last use of
// any registers, or if the value itself is dead, we need to move this
// information over to the new copy we just inserted.
LV->removeVirtualRegistersKilled(MPhi);
// If the result is dead, update LV.
if (isDead) {
LV->addVirtualRegisterDead(DestReg, PHICopy);
LV->removeVirtualRegisterDead(DestReg, MPhi);
}
}
// Update LiveIntervals for the new copy or implicit def.
if (LIS) {
MachineInstr *NewInstr = std::prev(AfterPHIsIt);
SlotIndex DestCopyIndex = LIS->InsertMachineInstrInMaps(NewInstr);
SlotIndex MBBStartIndex = LIS->getMBBStartIdx(&MBB);
if (IncomingReg) {
// Add the region from the beginning of MBB to the copy instruction to
// IncomingReg's live interval.
LiveInterval &IncomingLI = LIS->createEmptyInterval(IncomingReg);
VNInfo *IncomingVNI = IncomingLI.getVNInfoAt(MBBStartIndex);
if (!IncomingVNI)
IncomingVNI = IncomingLI.getNextValue(MBBStartIndex,
LIS->getVNInfoAllocator());
IncomingLI.addSegment(LiveInterval::Segment(MBBStartIndex,
DestCopyIndex.getRegSlot(),
IncomingVNI));
}
LiveInterval &DestLI = LIS->getInterval(DestReg);
assert(DestLI.begin() != DestLI.end() &&
"PHIs should have nonempty LiveIntervals.");
if (DestLI.endIndex().isDead()) {
// A dead PHI's live range begins and ends at the start of the MBB, but
// the lowered copy, which will still be dead, needs to begin and end at
// the copy instruction.
VNInfo *OrigDestVNI = DestLI.getVNInfoAt(MBBStartIndex);
assert(OrigDestVNI && "PHI destination should be live at block entry.");
DestLI.removeSegment(MBBStartIndex, MBBStartIndex.getDeadSlot());
DestLI.createDeadDef(DestCopyIndex.getRegSlot(),
LIS->getVNInfoAllocator());
DestLI.removeValNo(OrigDestVNI);
} else {
// Otherwise, remove the region from the beginning of MBB to the copy
// instruction from DestReg's live interval.
DestLI.removeSegment(MBBStartIndex, DestCopyIndex.getRegSlot());
VNInfo *DestVNI = DestLI.getVNInfoAt(DestCopyIndex.getRegSlot());
assert(DestVNI && "PHI destination should be live at its definition.");
DestVNI->def = DestCopyIndex.getRegSlot();
}
}
// Adjust the VRegPHIUseCount map to account for the removal of this PHI node.
for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2)
--VRegPHIUseCount[BBVRegPair(MPhi->getOperand(i+1).getMBB()->getNumber(),
MPhi->getOperand(i).getReg())];
// Now loop over all of the incoming arguments, changing them to copy into the
// IncomingReg register in the corresponding predecessor basic block.
SmallPtrSet<MachineBasicBlock*, 8> MBBsInsertedInto;
for (int i = NumSrcs - 1; i >= 0; --i) {
unsigned SrcReg = MPhi->getOperand(i*2+1).getReg();
unsigned SrcSubReg = MPhi->getOperand(i*2+1).getSubReg();
bool SrcUndef = MPhi->getOperand(i*2+1).isUndef() ||
isImplicitlyDefined(SrcReg, MRI);
assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
"Machine PHI Operands must all be virtual registers!");
// Get the MachineBasicBlock equivalent of the BasicBlock that is the source
// path the PHI.
MachineBasicBlock &opBlock = *MPhi->getOperand(i*2+2).getMBB();
// Check to make sure we haven't already emitted the copy for this block.
// This can happen because PHI nodes may have multiple entries for the same
// basic block.
if (!MBBsInsertedInto.insert(&opBlock).second)
continue; // If the copy has already been emitted, we're done.
// Find a safe location to insert the copy, this may be the first terminator
// in the block (or end()).
MachineBasicBlock::iterator InsertPos =
findPHICopyInsertPoint(&opBlock, &MBB, SrcReg);
// Insert the copy.
MachineInstr *NewSrcInstr = nullptr;
if (!reusedIncoming && IncomingReg) {
if (SrcUndef) {
// The source register is undefined, so there is no need for a real
// COPY, but we still need to ensure joint dominance by defs.
// Insert an IMPLICIT_DEF instruction.
NewSrcInstr = BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF),
IncomingReg);
// Clean up the old implicit-def, if there even was one.
if (MachineInstr *DefMI = MRI->getVRegDef(SrcReg))
if (DefMI->isImplicitDef())
ImpDefs.insert(DefMI);
} else {
NewSrcInstr = BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
TII->get(TargetOpcode::COPY), IncomingReg)
.addReg(SrcReg, 0, SrcSubReg);
}
}
// We only need to update the LiveVariables kill of SrcReg if this was the
// last PHI use of SrcReg to be lowered on this CFG edge and it is not live
// out of the predecessor. We can also ignore undef sources.
if (LV && !SrcUndef &&
!VRegPHIUseCount[BBVRegPair(opBlock.getNumber(), SrcReg)] &&
!LV->isLiveOut(SrcReg, opBlock)) {
// We want to be able to insert a kill of the register if this PHI (aka,
// the copy we just inserted) is the last use of the source value. Live
// variable analysis conservatively handles this by saying that the value
// is live until the end of the block the PHI entry lives in. If the value
// really is dead at the PHI copy, there will be no successor blocks which
// have the value live-in.
// Okay, if we now know that the value is not live out of the block, we
// can add a kill marker in this block saying that it kills the incoming
// value!
// In our final twist, we have to decide which instruction kills the
// register. In most cases this is the copy, however, terminator
// instructions at the end of the block may also use the value. In this
// case, we should mark the last such terminator as being the killing
// block, not the copy.
MachineBasicBlock::iterator KillInst = opBlock.end();
MachineBasicBlock::iterator FirstTerm = opBlock.getFirstTerminator();
for (MachineBasicBlock::iterator Term = FirstTerm;
Term != opBlock.end(); ++Term) {
if (Term->readsRegister(SrcReg))
KillInst = Term;
}
if (KillInst == opBlock.end()) {
// No terminator uses the register.
if (reusedIncoming || !IncomingReg) {
// We may have to rewind a bit if we didn't insert a copy this time.
KillInst = FirstTerm;
while (KillInst != opBlock.begin()) {
--KillInst;
if (KillInst->isDebugValue())
continue;
if (KillInst->readsRegister(SrcReg))
break;
}
} else {
// We just inserted this copy.
KillInst = std::prev(InsertPos);
}
}
assert(KillInst->readsRegister(SrcReg) && "Cannot find kill instruction");
// Finally, mark it killed.
LV->addVirtualRegisterKilled(SrcReg, KillInst);
// This vreg no longer lives all of the way through opBlock.
unsigned opBlockNum = opBlock.getNumber();
LV->getVarInfo(SrcReg).AliveBlocks.reset(opBlockNum);
}
if (LIS) {
if (NewSrcInstr) {
LIS->InsertMachineInstrInMaps(NewSrcInstr);
LIS->addSegmentToEndOfBlock(IncomingReg, NewSrcInstr);
}
if (!SrcUndef &&
!VRegPHIUseCount[BBVRegPair(opBlock.getNumber(), SrcReg)]) {
LiveInterval &SrcLI = LIS->getInterval(SrcReg);
bool isLiveOut = false;
for (MachineBasicBlock::succ_iterator SI = opBlock.succ_begin(),
SE = opBlock.succ_end(); SI != SE; ++SI) {
SlotIndex startIdx = LIS->getMBBStartIdx(*SI);
VNInfo *VNI = SrcLI.getVNInfoAt(startIdx);
// Definitions by other PHIs are not truly live-in for our purposes.
if (VNI && VNI->def != startIdx) {
isLiveOut = true;
break;
}
}
if (!isLiveOut) {
MachineBasicBlock::iterator KillInst = opBlock.end();
MachineBasicBlock::iterator FirstTerm = opBlock.getFirstTerminator();
for (MachineBasicBlock::iterator Term = FirstTerm;
Term != opBlock.end(); ++Term) {
if (Term->readsRegister(SrcReg))
KillInst = Term;
}
if (KillInst == opBlock.end()) {
// No terminator uses the register.
if (reusedIncoming || !IncomingReg) {
// We may have to rewind a bit if we didn't just insert a copy.
KillInst = FirstTerm;
while (KillInst != opBlock.begin()) {
--KillInst;
if (KillInst->isDebugValue())
continue;
if (KillInst->readsRegister(SrcReg))
break;
}
} else {
// We just inserted this copy.
KillInst = std::prev(InsertPos);
}
}
assert(KillInst->readsRegister(SrcReg) &&
"Cannot find kill instruction");
SlotIndex LastUseIndex = LIS->getInstructionIndex(KillInst);
SrcLI.removeSegment(LastUseIndex.getRegSlot(),
LIS->getMBBEndIdx(&opBlock));
}
}
}
}
// Really delete the PHI instruction now, if it is not in the LoweredPHIs map.
if (reusedIncoming || !IncomingReg) {
if (LIS)
LIS->RemoveMachineInstrFromMaps(MPhi);
MF.DeleteMachineInstr(MPhi);
}
}
/// analyzePHINodes - Gather information about the PHI nodes in here. In
/// particular, we want to map the number of uses of a virtual register which is
/// used in a PHI node. We map that to the BB the vreg is coming from. This is
/// used later to determine when the vreg is killed in the BB.
///
void PHIElimination::analyzePHINodes(const MachineFunction& MF) {
for (const auto &MBB : MF)
for (const auto &BBI : MBB) {
if (!BBI.isPHI())
break;
for (unsigned i = 1, e = BBI.getNumOperands(); i != e; i += 2)
++VRegPHIUseCount[BBVRegPair(BBI.getOperand(i+1).getMBB()->getNumber(),
BBI.getOperand(i).getReg())];
}
}
bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineLoopInfo *MLI) {
if (MBB.empty() || !MBB.front().isPHI() || MBB.isLandingPad())
return false; // Quick exit for basic blocks without PHIs.
const MachineLoop *CurLoop = MLI ? MLI->getLoopFor(&MBB) : nullptr;
bool IsLoopHeader = CurLoop && &MBB == CurLoop->getHeader();
bool Changed = false;
for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end();
BBI != BBE && BBI->isPHI(); ++BBI) {
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI->getOperand(i).getReg();
MachineBasicBlock *PreMBB = BBI->getOperand(i+1).getMBB();
// Is there a critical edge from PreMBB to MBB?
if (PreMBB->succ_size() == 1)
continue;
// Avoid splitting backedges of loops. It would introduce small
// out-of-line blocks into the loop which is very bad for code placement.
if (PreMBB == &MBB && !SplitAllCriticalEdges)
continue;
const MachineLoop *PreLoop = MLI ? MLI->getLoopFor(PreMBB) : nullptr;
if (IsLoopHeader && PreLoop == CurLoop && !SplitAllCriticalEdges)
continue;
// LV doesn't consider a phi use live-out, so isLiveOut only returns true
// when the source register is live-out for some other reason than a phi
// use. That means the copy we will insert in PreMBB won't be a kill, and
// there is a risk it may not be coalesced away.
//
// If the copy would be a kill, there is no need to split the edge.
bool ShouldSplit = isLiveOutPastPHIs(Reg, PreMBB);
if (!ShouldSplit && !NoPhiElimLiveOutEarlyExit)
continue;
if (ShouldSplit) {
DEBUG(dbgs() << PrintReg(Reg) << " live-out before critical edge BB#"
<< PreMBB->getNumber() << " -> BB#" << MBB.getNumber()
<< ": " << *BBI);
}
// If Reg is not live-in to MBB, it means it must be live-in to some
// other PreMBB successor, and we can avoid the interference by splitting
// the edge.
//
// If Reg *is* live-in to MBB, the interference is inevitable and a copy
// is likely to be left after coalescing. If we are looking at a loop
// exiting edge, split it so we won't insert code in the loop, otherwise
// don't bother.
ShouldSplit = ShouldSplit && !isLiveIn(Reg, &MBB);
// Check for a loop exiting edge.
if (!ShouldSplit && CurLoop != PreLoop) {
DEBUG({
dbgs() << "Split wouldn't help, maybe avoid loop copies?\n";
if (PreLoop) dbgs() << "PreLoop: " << *PreLoop;
if (CurLoop) dbgs() << "CurLoop: " << *CurLoop;
});
// This edge could be entering a loop, exiting a loop, or it could be
// both: Jumping directly form one loop to the header of a sibling
// loop.
// Split unless this edge is entering CurLoop from an outer loop.
ShouldSplit = PreLoop && !PreLoop->contains(CurLoop);
}
if (!ShouldSplit && !SplitAllCriticalEdges)
continue;
if (!PreMBB->SplitCriticalEdge(&MBB, this)) {
DEBUG(dbgs() << "Failed to split critical edge.\n");
continue;
}
Changed = true;
++NumCriticalEdgesSplit;
}
}
return Changed;
}
bool PHIElimination::isLiveIn(unsigned Reg, const MachineBasicBlock *MBB) {
assert((LV || LIS) &&
"isLiveIn() requires either LiveVariables or LiveIntervals");
if (LIS)
return LIS->isLiveInToMBB(LIS->getInterval(Reg), MBB);
else
return LV->isLiveIn(Reg, *MBB);
}
bool PHIElimination::isLiveOutPastPHIs(unsigned Reg,
const MachineBasicBlock *MBB) {
assert((LV || LIS) &&
"isLiveOutPastPHIs() requires either LiveVariables or LiveIntervals");
// LiveVariables considers uses in PHIs to be in the predecessor basic block,
// so that a register used only in a PHI is not live out of the block. In
// contrast, LiveIntervals considers uses in PHIs to be on the edge rather than
// in the predecessor basic block, so that a register used only in a PHI is live
// out of the block.
if (LIS) {
const LiveInterval &LI = LIS->getInterval(Reg);
for (const MachineBasicBlock *SI : MBB->successors())
if (LI.liveAt(LIS->getMBBStartIdx(SI)))
return true;
return false;
} else {
return LV->isLiveOut(Reg, *MBB);
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineSSAUpdater.cpp | //===- MachineSSAUpdater.cpp - Unstructured SSA Update Tool ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the MachineSSAUpdater class. It's based on SSAUpdater
// class in lib/Transforms/Utils.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineSSAUpdater.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
using namespace llvm;
#define DEBUG_TYPE "machine-ssaupdater"
typedef DenseMap<MachineBasicBlock*, unsigned> AvailableValsTy;
static AvailableValsTy &getAvailableVals(void *AV) {
return *static_cast<AvailableValsTy*>(AV);
}
MachineSSAUpdater::MachineSSAUpdater(MachineFunction &MF,
SmallVectorImpl<MachineInstr*> *NewPHI)
: AV(nullptr), InsertedPHIs(NewPHI) {
TII = MF.getSubtarget().getInstrInfo();
MRI = &MF.getRegInfo();
}
MachineSSAUpdater::~MachineSSAUpdater() {
delete static_cast<AvailableValsTy*>(AV);
}
/// Initialize - Reset this object to get ready for a new set of SSA
/// updates. ProtoValue is the value used to name PHI nodes.
void MachineSSAUpdater::Initialize(unsigned V) {
if (!AV)
AV = new AvailableValsTy();
else
getAvailableVals(AV).clear();
VR = V;
VRC = MRI->getRegClass(VR);
}
/// HasValueForBlock - Return true if the MachineSSAUpdater already has a value for
/// the specified block.
bool MachineSSAUpdater::HasValueForBlock(MachineBasicBlock *BB) const {
return getAvailableVals(AV).count(BB);
}
/// AddAvailableValue - Indicate that a rewritten value is available in the
/// specified block with the specified value.
void MachineSSAUpdater::AddAvailableValue(MachineBasicBlock *BB, unsigned V) {
getAvailableVals(AV)[BB] = V;
}
/// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
/// live at the end of the specified block.
unsigned MachineSSAUpdater::GetValueAtEndOfBlock(MachineBasicBlock *BB) {
return GetValueAtEndOfBlockInternal(BB);
}
static
unsigned LookForIdenticalPHI(MachineBasicBlock *BB,
SmallVectorImpl<std::pair<MachineBasicBlock*, unsigned> > &PredValues) {
if (BB->empty())
return 0;
MachineBasicBlock::iterator I = BB->begin();
if (!I->isPHI())
return 0;
AvailableValsTy AVals;
for (unsigned i = 0, e = PredValues.size(); i != e; ++i)
AVals[PredValues[i].first] = PredValues[i].second;
while (I != BB->end() && I->isPHI()) {
bool Same = true;
for (unsigned i = 1, e = I->getNumOperands(); i != e; i += 2) {
unsigned SrcReg = I->getOperand(i).getReg();
MachineBasicBlock *SrcBB = I->getOperand(i+1).getMBB();
if (AVals[SrcBB] != SrcReg) {
Same = false;
break;
}
}
if (Same)
return I->getOperand(0).getReg();
++I;
}
return 0;
}
/// InsertNewDef - Insert an empty PHI or IMPLICIT_DEF instruction which define
/// a value of the given register class at the start of the specified basic
/// block. It returns the virtual register defined by the instruction.
static
MachineInstrBuilder InsertNewDef(unsigned Opcode,
MachineBasicBlock *BB, MachineBasicBlock::iterator I,
const TargetRegisterClass *RC,
MachineRegisterInfo *MRI,
const TargetInstrInfo *TII) {
unsigned NewVR = MRI->createVirtualRegister(RC);
return BuildMI(*BB, I, DebugLoc(), TII->get(Opcode), NewVR);
}
/// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
/// is live in the middle of the specified block.
///
/// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
/// important case: if there is a definition of the rewritten value after the
/// 'use' in BB. Consider code like this:
///
/// X1 = ...
/// SomeBB:
/// use(X)
/// X2 = ...
/// br Cond, SomeBB, OutBB
///
/// In this case, there are two values (X1 and X2) added to the AvailableVals
/// set by the client of the rewriter, and those values are both live out of
/// their respective blocks. However, the use of X happens in the *middle* of
/// a block. Because of this, we need to insert a new PHI node in SomeBB to
/// merge the appropriate values, and this value isn't live out of the block.
///
unsigned MachineSSAUpdater::GetValueInMiddleOfBlock(MachineBasicBlock *BB) {
// If there is no definition of the renamed variable in this block, just use
// GetValueAtEndOfBlock to do our work.
if (!HasValueForBlock(BB))
return GetValueAtEndOfBlockInternal(BB);
// If there are no predecessors, just return undef.
if (BB->pred_empty()) {
// Insert an implicit_def to represent an undef value.
MachineInstr *NewDef = InsertNewDef(TargetOpcode::IMPLICIT_DEF,
BB, BB->getFirstTerminator(),
VRC, MRI, TII);
return NewDef->getOperand(0).getReg();
}
// Otherwise, we have the hard case. Get the live-in values for each
// predecessor.
SmallVector<std::pair<MachineBasicBlock*, unsigned>, 8> PredValues;
unsigned SingularValue = 0;
bool isFirstPred = true;
for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(),
E = BB->pred_end(); PI != E; ++PI) {
MachineBasicBlock *PredBB = *PI;
unsigned PredVal = GetValueAtEndOfBlockInternal(PredBB);
PredValues.push_back(std::make_pair(PredBB, PredVal));
// Compute SingularValue.
if (isFirstPred) {
SingularValue = PredVal;
isFirstPred = false;
} else if (PredVal != SingularValue)
SingularValue = 0;
}
// Otherwise, if all the merged values are the same, just use it.
if (SingularValue != 0)
return SingularValue;
// If an identical PHI is already in BB, just reuse it.
unsigned DupPHI = LookForIdenticalPHI(BB, PredValues);
if (DupPHI)
return DupPHI;
// Otherwise, we do need a PHI: insert one now.
MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->begin();
MachineInstrBuilder InsertedPHI = InsertNewDef(TargetOpcode::PHI, BB,
Loc, VRC, MRI, TII);
// Fill in all the predecessors of the PHI.
for (unsigned i = 0, e = PredValues.size(); i != e; ++i)
InsertedPHI.addReg(PredValues[i].second).addMBB(PredValues[i].first);
// See if the PHI node can be merged to a single value. This can happen in
// loop cases when we get a PHI of itself and one other value.
if (unsigned ConstVal = InsertedPHI->isConstantValuePHI()) {
InsertedPHI->eraseFromParent();
return ConstVal;
}
// If the client wants to know about all new instructions, tell it.
if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);
DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n");
return InsertedPHI->getOperand(0).getReg();
}
static
MachineBasicBlock *findCorrespondingPred(const MachineInstr *MI,
MachineOperand *U) {
for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2) {
if (&MI->getOperand(i) == U)
return MI->getOperand(i+1).getMBB();
}
llvm_unreachable("MachineOperand::getParent() failure?");
}
/// RewriteUse - Rewrite a use of the symbolic value. This handles PHI nodes,
/// which use their value in the corresponding predecessor.
void MachineSSAUpdater::RewriteUse(MachineOperand &U) {
MachineInstr *UseMI = U.getParent();
unsigned NewVR = 0;
if (UseMI->isPHI()) {
MachineBasicBlock *SourceBB = findCorrespondingPred(UseMI, &U);
NewVR = GetValueAtEndOfBlockInternal(SourceBB);
} else {
NewVR = GetValueInMiddleOfBlock(UseMI->getParent());
}
U.setReg(NewVR);
}
/// SSAUpdaterTraits<MachineSSAUpdater> - Traits for the SSAUpdaterImpl
/// template, specialized for MachineSSAUpdater.
namespace llvm {
template<>
class SSAUpdaterTraits<MachineSSAUpdater> {
public:
typedef MachineBasicBlock BlkT;
typedef unsigned ValT;
typedef MachineInstr PhiT;
typedef MachineBasicBlock::succ_iterator BlkSucc_iterator;
static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); }
static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); }
/// Iterator for PHI operands.
class PHI_iterator {
private:
MachineInstr *PHI;
unsigned idx;
public:
explicit PHI_iterator(MachineInstr *P) // begin iterator
: PHI(P), idx(1) {}
PHI_iterator(MachineInstr *P, bool) // end iterator
: PHI(P), idx(PHI->getNumOperands()) {}
PHI_iterator &operator++() { idx += 2; return *this; }
bool operator==(const PHI_iterator& x) const { return idx == x.idx; }
bool operator!=(const PHI_iterator& x) const { return !operator==(x); }
unsigned getIncomingValue() { return PHI->getOperand(idx).getReg(); }
MachineBasicBlock *getIncomingBlock() {
return PHI->getOperand(idx+1).getMBB();
}
};
static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
static inline PHI_iterator PHI_end(PhiT *PHI) {
return PHI_iterator(PHI, true);
}
/// FindPredecessorBlocks - Put the predecessors of BB into the Preds
/// vector.
static void FindPredecessorBlocks(MachineBasicBlock *BB,
SmallVectorImpl<MachineBasicBlock*> *Preds){
for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(),
E = BB->pred_end(); PI != E; ++PI)
Preds->push_back(*PI);
}
/// GetUndefVal - Create an IMPLICIT_DEF instruction with a new register.
/// Add it into the specified block and return the register.
static unsigned GetUndefVal(MachineBasicBlock *BB,
MachineSSAUpdater *Updater) {
// Insert an implicit_def to represent an undef value.
MachineInstr *NewDef = InsertNewDef(TargetOpcode::IMPLICIT_DEF,
BB, BB->getFirstTerminator(),
Updater->VRC, Updater->MRI,
Updater->TII);
return NewDef->getOperand(0).getReg();
}
/// CreateEmptyPHI - Create a PHI instruction that defines a new register.
/// Add it into the specified block and return the register.
static unsigned CreateEmptyPHI(MachineBasicBlock *BB, unsigned NumPreds,
MachineSSAUpdater *Updater) {
MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->begin();
MachineInstr *PHI = InsertNewDef(TargetOpcode::PHI, BB, Loc,
Updater->VRC, Updater->MRI,
Updater->TII);
return PHI->getOperand(0).getReg();
}
/// AddPHIOperand - Add the specified value as an operand of the PHI for
/// the specified predecessor block.
static void AddPHIOperand(MachineInstr *PHI, unsigned Val,
MachineBasicBlock *Pred) {
MachineInstrBuilder(*Pred->getParent(), PHI).addReg(Val).addMBB(Pred);
}
/// InstrIsPHI - Check if an instruction is a PHI.
///
static MachineInstr *InstrIsPHI(MachineInstr *I) {
if (I && I->isPHI())
return I;
return nullptr;
}
/// ValueIsPHI - Check if the instruction that defines the specified register
/// is a PHI instruction.
static MachineInstr *ValueIsPHI(unsigned Val, MachineSSAUpdater *Updater) {
return InstrIsPHI(Updater->MRI->getVRegDef(Val));
}
/// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source
/// operands, i.e., it was just added.
static MachineInstr *ValueIsNewPHI(unsigned Val, MachineSSAUpdater *Updater) {
MachineInstr *PHI = ValueIsPHI(Val, Updater);
if (PHI && PHI->getNumOperands() <= 1)
return PHI;
return nullptr;
}
/// GetPHIValue - For the specified PHI instruction, return the register
/// that it defines.
static unsigned GetPHIValue(MachineInstr *PHI) {
return PHI->getOperand(0).getReg();
}
};
} // End llvm namespace
/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
/// for the specified BB and if so, return it. If not, construct SSA form by
/// first calculating the required placement of PHIs and then inserting new
/// PHIs where needed.
unsigned MachineSSAUpdater::GetValueAtEndOfBlockInternal(MachineBasicBlock *BB){
AvailableValsTy &AvailableVals = getAvailableVals(AV);
if (unsigned V = AvailableVals[BB])
return V;
SSAUpdaterImpl<MachineSSAUpdater> Impl(this, &AvailableVals, InsertedPHIs);
return Impl.GetValue(BB);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineScheduler.cpp | //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// MachineScheduler schedules machine instructions after phi elimination. It
// preserves LiveIntervals so it can be invoked before register allocation.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/ScheduleDFS.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include <queue>
using namespace llvm;
#define DEBUG_TYPE "misched"
namespace llvm {
cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
cl::desc("Force top-down list scheduling"));
cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
cl::desc("Force bottom-up list scheduling"));
cl::opt<bool>
DumpCriticalPathLength("misched-dcpl", cl::Hidden,
cl::desc("Print critical path length to stdout"));
}
#ifndef NDEBUG
static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
cl::desc("Pop up a window to show MISched dags after they are processed"));
static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
cl::desc("Only schedule this function"));
static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
cl::desc("Only schedule this MBB#"));
#else
static bool ViewMISchedDAGs = false;
#endif // NDEBUG
static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
cl::desc("Enable register pressure scheduling."), cl::init(true));
static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
cl::desc("Enable cyclic critical path analysis."), cl::init(true));
static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
cl::desc("Enable load clustering."), cl::init(true));
// Experimental heuristics
static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
cl::desc("Enable scheduling for macro fusion."), cl::init(true));
static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
cl::desc("Verify machine instrs before and after machine scheduling"));
// DAG subtrees must have at least this many nodes.
static const unsigned MinSubtreeSize = 8;
// Pin the vtables to this file.
void MachineSchedStrategy::anchor() {}
void ScheduleDAGMutation::anchor() {}
//===----------------------------------------------------------------------===//
// Machine Instruction Scheduling Pass and Registry
//===----------------------------------------------------------------------===//
MachineSchedContext::MachineSchedContext():
MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
RegClassInfo = new RegisterClassInfo();
}
MachineSchedContext::~MachineSchedContext() {
delete RegClassInfo;
}
namespace {
/// Base class for a machine scheduler class that can run at any point.
class MachineSchedulerBase : public MachineSchedContext,
public MachineFunctionPass {
public:
MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
void print(raw_ostream &O, const Module* = nullptr) const override;
protected:
void scheduleRegions(ScheduleDAGInstrs &Scheduler);
};
/// MachineScheduler runs after coalescing and before register allocation.
class MachineScheduler : public MachineSchedulerBase {
public:
MachineScheduler();
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction&) override;
static char ID; // Class identification, replacement for typeinfo
protected:
ScheduleDAGInstrs *createMachineScheduler();
};
/// PostMachineScheduler runs after shortly before code emission.
class PostMachineScheduler : public MachineSchedulerBase {
public:
PostMachineScheduler();
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction&) override;
static char ID; // Class identification, replacement for typeinfo
protected:
ScheduleDAGInstrs *createPostMachineScheduler();
};
} // namespace
char MachineScheduler::ID = 0;
char &llvm::MachineSchedulerID = MachineScheduler::ID;
INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
"Machine Instruction Scheduler", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
"Machine Instruction Scheduler", false, false)
MachineScheduler::MachineScheduler()
: MachineSchedulerBase(ID) {
initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
}
void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequiredID(MachineDominatorsID);
AU.addRequired<MachineLoopInfo>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetPassConfig>();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
MachineFunctionPass::getAnalysisUsage(AU);
}
char PostMachineScheduler::ID = 0;
char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
INITIALIZE_PASS(PostMachineScheduler, "postmisched",
"PostRA Machine Instruction Scheduler", false, false)
PostMachineScheduler::PostMachineScheduler()
: MachineSchedulerBase(ID) {
initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
}
void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequiredID(MachineDominatorsID);
AU.addRequired<MachineLoopInfo>();
AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
MachinePassRegistry MachineSchedRegistry::Registry;
/// A dummy default scheduler factory indicates whether the scheduler
/// is overridden on the command line.
static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
return nullptr;
}
/// MachineSchedOpt allows command line selection of the scheduler.
static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
RegisterPassParser<MachineSchedRegistry> >
MachineSchedOpt("misched",
cl::init(&useDefaultMachineSched), cl::Hidden,
cl::desc("Machine instruction scheduler to use"));
static MachineSchedRegistry
DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
useDefaultMachineSched);
static cl::opt<bool> EnableMachineSched(
"enable-misched",
cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
cl::Hidden);
/// Forward declare the standard machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
/// Decrement this iterator until reaching the top or a non-debug instr.
static MachineBasicBlock::const_iterator
priorNonDebug(MachineBasicBlock::const_iterator I,
MachineBasicBlock::const_iterator Beg) {
assert(I != Beg && "reached the top of the region, cannot decrement");
while (--I != Beg) {
if (!I->isDebugValue())
break;
}
return I;
}
/// Non-const version.
static MachineBasicBlock::iterator
priorNonDebug(MachineBasicBlock::iterator I,
MachineBasicBlock::const_iterator Beg) {
return const_cast<MachineInstr*>(
&*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
}
/// If this iterator is a debug value, increment until reaching the End or a
/// non-debug instruction.
static MachineBasicBlock::const_iterator
nextIfDebug(MachineBasicBlock::const_iterator I,
MachineBasicBlock::const_iterator End) {
for(; I != End; ++I) {
if (!I->isDebugValue())
break;
}
return I;
}
/// Non-const version.
static MachineBasicBlock::iterator
nextIfDebug(MachineBasicBlock::iterator I,
MachineBasicBlock::const_iterator End) {
// Cast the return value to nonconst MachineInstr, then cast to an
// instr_iterator, which does not check for null, finally return a
// bundle_iterator.
return MachineBasicBlock::instr_iterator(
const_cast<MachineInstr*>(
&*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
}
/// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
// Select the scheduler, or set the default.
MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
if (Ctor != useDefaultMachineSched)
return Ctor(this);
// Get the default scheduler set by the target for this function.
ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
if (Scheduler)
return Scheduler;
// Default to GenericScheduler.
return createGenericSchedLive(this);
}
/// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
/// the caller. We don't have a command line option to override the postRA
/// scheduler. The Target must configure it.
ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
// Get the postRA scheduler set by the target for this function.
ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
if (Scheduler)
return Scheduler;
// Default to GenericScheduler.
return createGenericSchedPostRA(this);
}
/// Top-level MachineScheduler pass driver.
///
/// Visit blocks in function order. Divide each block into scheduling regions
/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
/// consistent with the DAG builder, which traverses the interior of the
/// scheduling regions bottom-up.
///
/// This design avoids exposing scheduling boundaries to the DAG builder,
/// simplifying the DAG builder's support for "special" target instructions.
/// At the same time the design allows target schedulers to operate across
/// scheduling boundaries, for example to bundle the boudary instructions
/// without reordering them. This creates complexity, because the target
/// scheduler must update the RegionBegin and RegionEnd positions cached by
/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
/// design would be to split blocks at scheduling boundaries, but LLVM has a
/// general bias against block splitting purely for implementation simplicity.
bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (EnableMachineSched.getNumOccurrences()) {
if (!EnableMachineSched)
return false;
} else if (!mf.getSubtarget().enableMachineScheduler())
return false;
DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
// Initialize the context of the pass.
MF = &mf;
MLI = &getAnalysis<MachineLoopInfo>();
MDT = &getAnalysis<MachineDominatorTree>();
PassConfig = &getAnalysis<TargetPassConfig>();
AA = &getAnalysis<AliasAnalysis>();
LIS = &getAnalysis<LiveIntervals>();
if (VerifyScheduling) {
DEBUG(LIS->dump());
MF->verify(this, "Before machine scheduling.");
}
RegClassInfo->runOnMachineFunction(*MF);
// Instantiate the selected scheduler for this target, function, and
// optimization level.
std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
scheduleRegions(*Scheduler);
DEBUG(LIS->dump());
if (VerifyScheduling)
MF->verify(this, "After machine scheduling.");
return true;
}
bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (skipOptnoneFunction(*mf.getFunction()))
return false;
if (!mf.getSubtarget().enablePostRAScheduler()) {
DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
return false;
}
DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
// Initialize the context of the pass.
MF = &mf;
PassConfig = &getAnalysis<TargetPassConfig>();
if (VerifyScheduling)
MF->verify(this, "Before post machine scheduling.");
// Instantiate the selected scheduler for this target, function, and
// optimization level.
std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
scheduleRegions(*Scheduler);
if (VerifyScheduling)
MF->verify(this, "After post machine scheduling.");
return true;
}
/// Return true of the given instruction should not be included in a scheduling
/// region.
///
/// MachineScheduler does not currently support scheduling across calls. To
/// handle calls, the DAG builder needs to be modified to create register
/// anti/output dependencies on the registers clobbered by the call's regmask
/// operand. In PreRA scheduling, the stack pointer adjustment already prevents
/// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
/// the boundary, but there would be no benefit to postRA scheduling across
/// calls this late anyway.
static bool isSchedBoundary(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB,
MachineFunction *MF,
const TargetInstrInfo *TII,
bool IsPostRA) {
return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
}
/// Main driver for both MachineScheduler and PostMachineScheduler.
void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
bool IsPostRA = Scheduler.isPostRA();
// Visit all machine basic blocks.
//
// TODO: Visit blocks in global postorder or postorder within the bottom-up
// loop tree. Then we can optionally compute global RegPressure.
for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
MBB != MBBEnd; ++MBB) {
Scheduler.startBlock(MBB);
#ifndef NDEBUG
if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
continue;
if (SchedOnlyBlock.getNumOccurrences()
&& (int)SchedOnlyBlock != MBB->getNumber())
continue;
#endif
// Break the block into scheduling regions [I, RegionEnd), and schedule each
// region as soon as it is discovered. RegionEnd points the scheduling
// boundary at the bottom of the region. The DAG does not include RegionEnd,
// but the region does (i.e. the next RegionEnd is above the previous
// RegionBegin). If the current block has no terminator then RegionEnd ==
// MBB->end() for the bottom region.
//
// The Scheduler may insert instructions during either schedule() or
// exitRegion(), even for empty regions. So the local iterators 'I' and
// 'RegionEnd' are invalid across these calls.
//
// MBB::size() uses instr_iterator to count. Here we need a bundle to count
// as a single instruction.
unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
// Avoid decrementing RegionEnd for blocks with no terminator.
if (RegionEnd != MBB->end() ||
isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) {
--RegionEnd;
// Count the boundary instruction.
--RemainingInstrs;
}
// The next region starts above the previous region. Look backward in the
// instruction stream until we find the nearest boundary.
unsigned NumRegionInstrs = 0;
MachineBasicBlock::iterator I = RegionEnd;
for(;I != MBB->begin(); --I, --RemainingInstrs) {
if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA))
break;
if (!I->isDebugValue())
++NumRegionInstrs;
}
// Notify the scheduler of the region, even if we may skip scheduling
// it. Perhaps it still needs to be bundled.
Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
// Skip empty scheduling regions (0 or 1 schedulable instructions).
if (I == RegionEnd || I == std::prev(RegionEnd)) {
// Close the current region. Bundle the terminator if needed.
// This invalidates 'RegionEnd' and 'I'.
Scheduler.exitRegion();
continue;
}
DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "")
<< "MI Scheduling **********\n");
DEBUG(dbgs() << MF->getName()
<< ":BB#" << MBB->getNumber() << " " << MBB->getName()
<< "\n From: " << *I << " To: ";
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
else dbgs() << "End";
dbgs() << " RegionInstrs: " << NumRegionInstrs
<< " Remaining: " << RemainingInstrs << "\n");
if (DumpCriticalPathLength) {
errs() << MF->getName();
errs() << ":BB# " << MBB->getNumber();
errs() << " " << MBB->getName() << " \n";
}
// Schedule a region: possibly reorder instructions.
// This invalidates 'RegionEnd' and 'I'.
Scheduler.schedule();
// Close the current region.
Scheduler.exitRegion();
// Scheduling has invalidated the current iterator 'I'. Ask the
// scheduler for the top of it's scheduled region.
RegionEnd = Scheduler.begin();
}
assert(RemainingInstrs == 0 && "Instruction count mismatch!");
Scheduler.finishBlock();
if (Scheduler.isPostRA()) {
// FIXME: Ideally, no further passes should rely on kill flags. However,
// thumb2 size reduction is currently an exception.
Scheduler.fixupKills(MBB);
}
}
Scheduler.finalizeSchedule();
}
void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
// unimplemented
}
LLVM_DUMP_METHOD
void ReadyQueue::dump() {
dbgs() << Name << ": ";
for (unsigned i = 0, e = Queue.size(); i < e; ++i)
dbgs() << Queue[i]->NodeNum << " ";
dbgs() << "\n";
}
//===----------------------------------------------------------------------===//
// ScheduleDAGMI - Basic machine instruction scheduling. This is
// independent of PreRA/PostRA scheduling and involves no extra book-keeping for
// virtual registers.
// ===----------------------------------------------------------------------===/
// Provide a vtable anchor.
ScheduleDAGMI::~ScheduleDAGMI() {
}
bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
}
bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
if (SuccSU != &ExitSU) {
// Do not use WillCreateCycle, it assumes SD scheduling.
// If Pred is reachable from Succ, then the edge creates a cycle.
if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
return false;
Topo.AddPred(SuccSU, PredDep.getSUnit());
}
SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
// Return true regardless of whether a new edge needed to be inserted.
return true;
}
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
/// NumPredsLeft reaches zero, release the successor node.
///
/// FIXME: Adjust SuccSU height based on MinLatency.
void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
if (SuccEdge->isWeak()) {
--SuccSU->WeakPredsLeft;
if (SuccEdge->isCluster())
NextClusterSucc = SuccSU;
return;
}
#ifndef NDEBUG
if (SuccSU->NumPredsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
// SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
// CurrCycle may have advanced since then.
if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
--SuccSU->NumPredsLeft;
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
SchedImpl->releaseTopNode(SuccSU);
}
/// releaseSuccessors - Call releaseSucc on each of SU's successors.
void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
releaseSucc(SU, &*I);
}
}
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
/// NumSuccsLeft reaches zero, release the predecessor node.
///
/// FIXME: Adjust PredSU height based on MinLatency.
void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
if (PredEdge->isWeak()) {
--PredSU->WeakSuccsLeft;
if (PredEdge->isCluster())
NextClusterPred = PredSU;
return;
}
#ifndef NDEBUG
if (PredSU->NumSuccsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
// SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
// CurrCycle may have advanced since then.
if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
--PredSU->NumSuccsLeft;
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
SchedImpl->releaseBottomNode(PredSU);
}
/// releasePredecessors - Call releasePred on each of SU's predecessors.
void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
releasePred(SU, &*I);
}
}
/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
/// crossing a scheduling boundary. [begin, end) includes all instructions in
/// the region, including the boundary itself and single-instruction regions
/// that don't get scheduled.
void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs)
{
ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
SchedImpl->initPolicy(begin, end, regioninstrs);
}
/// This is normally called from the main scheduler loop but may also be invoked
/// by the scheduling strategy to perform additional code motion.
void ScheduleDAGMI::moveInstruction(
MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
// Advance RegionBegin if the first instruction moves down.
if (&*RegionBegin == MI)
++RegionBegin;
// Update the instruction stream.
BB->splice(InsertPos, BB, MI);
// Update LiveIntervals
if (LIS)
LIS->handleMove(MI, /*UpdateFlags=*/true);
// Recede RegionBegin if an instruction moves above the first.
if (RegionBegin == InsertPos)
RegionBegin = MI;
}
bool ScheduleDAGMI::checkSchedLimit() {
#ifndef NDEBUG
if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
CurrentTop = CurrentBottom;
return false;
}
++NumInstrsScheduled;
#endif
return true;
}
/// Per-region scheduling driver, called back from
/// MachineScheduler::runOnMachineFunction. This is a simplified driver that
/// does not consider liveness or register pressure. It is useful for PostRA
/// scheduling and potentially other custom schedulers.
void ScheduleDAGMI::schedule() {
// Build the DAG.
buildSchedGraph(AA);
Topo.InitDAGTopologicalSorting();
postprocessDAG();
SmallVector<SUnit*, 8> TopRoots, BotRoots;
findRootsAndBiasEdges(TopRoots, BotRoots);
// Initialize the strategy before modifying the DAG.
// This may initialize a DFSResult to be used for queue priority.
SchedImpl->initialize(this);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
if (ViewMISchedDAGs) viewGraph();
// Initialize ready queues now that the DAG and priority data are finalized.
initQueues(TopRoots, BotRoots);
bool IsTopNode = false;
while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
assert(!SU->isScheduled && "Node already scheduled");
if (!checkSchedLimit())
break;
MachineInstr *MI = SU->getInstr();
if (IsTopNode) {
assert(SU->isTopReady() && "node still has unscheduled dependencies");
if (&*CurrentTop == MI)
CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
else
moveInstruction(MI, CurrentTop);
}
else {
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
MachineBasicBlock::iterator priorII =
priorNonDebug(CurrentBottom, CurrentTop);
if (&*priorII == MI)
CurrentBottom = priorII;
else {
if (&*CurrentTop == MI)
CurrentTop = nextIfDebug(++CurrentTop, priorII);
moveInstruction(MI, CurrentBottom);
CurrentBottom = MI;
}
}
// Notify the scheduling strategy before updating the DAG.
// This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
// runs, it can then use the accurate ReadyCycle time to determine whether
// newly released nodes can move to the readyQ.
SchedImpl->schedNode(SU, IsTopNode);
updateQueues(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
placeDebugValues();
DEBUG({
unsigned BBNum = begin()->getParent()->getNumber();
dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
dumpSchedule();
dbgs() << '\n';
});
}
/// Apply each ScheduleDAGMutation step in order.
void ScheduleDAGMI::postprocessDAG() {
for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
Mutations[i]->apply(this);
}
}
void ScheduleDAGMI::
findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
SmallVectorImpl<SUnit*> &BotRoots) {
for (std::vector<SUnit>::iterator
I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
SUnit *SU = &(*I);
assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
// Order predecessors so DFSResult follows the critical path.
SU->biasCriticalPath();
// A SUnit is ready to top schedule if it has no predecessors.
if (!I->NumPredsLeft)
TopRoots.push_back(SU);
// A SUnit is ready to bottom schedule if it has no successors.
if (!I->NumSuccsLeft)
BotRoots.push_back(SU);
}
ExitSU.biasCriticalPath();
}
/// Identify DAG roots and setup scheduler queues.
void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
ArrayRef<SUnit*> BotRoots) {
NextClusterSucc = nullptr;
NextClusterPred = nullptr;
// Release all DAG roots for scheduling, not including EntrySU/ExitSU.
//
// Nodes with unreleased weak edges can still be roots.
// Release top roots in forward order.
for (SmallVectorImpl<SUnit*>::const_iterator
I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
SchedImpl->releaseTopNode(*I);
}
// Release bottom roots in reverse order so the higher priority nodes appear
// first. This is more natural and slightly more efficient.
for (SmallVectorImpl<SUnit*>::const_reverse_iterator
I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
SchedImpl->releaseBottomNode(*I);
}
releaseSuccessors(&EntrySU);
releasePredecessors(&ExitSU);
SchedImpl->registerRoots();
// Advance past initial DebugValues.
CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
CurrentBottom = RegionEnd;
}
/// Update scheduler queues after scheduling an instruction.
void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
// Release dependent instructions for scheduling.
if (IsTopNode)
releaseSuccessors(SU);
else
releasePredecessors(SU);
SU->isScheduled = true;
}
/// Reinsert any remaining debug_values, just like the PostRA scheduler.
void ScheduleDAGMI::placeDebugValues() {
// If first instruction was a DBG_VALUE then put it back.
if (FirstDbgValue) {
BB->splice(RegionBegin, BB, FirstDbgValue);
RegionBegin = FirstDbgValue;
}
for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
MachineInstr *DbgValue = P.first;
MachineBasicBlock::iterator OrigPrevMI = P.second;
if (&*RegionBegin == DbgValue)
++RegionBegin;
BB->splice(++OrigPrevMI, BB, DbgValue);
if (OrigPrevMI == std::prev(RegionEnd))
RegionEnd = DbgValue;
}
DbgValues.clear();
FirstDbgValue = nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ScheduleDAGMI::dumpSchedule() const {
for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
if (SUnit *SU = getSUnit(&(*MI)))
SU->dump(this);
else
dbgs() << "Missing SUnit\n";
}
}
#endif
//===----------------------------------------------------------------------===//
// ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
// preservation.
//===----------------------------------------------------------------------===//
ScheduleDAGMILive::~ScheduleDAGMILive() {
delete DFSResult;
}
/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
/// crossing a scheduling boundary. [begin, end) includes all instructions in
/// the region, including the boundary itself and single-instruction regions
/// that don't get scheduled.
void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs)
{
// ScheduleDAGMI initializes SchedImpl's per-region policy.
ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
// For convenience remember the end of the liveness region.
LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
SUPressureDiffs.clear();
ShouldTrackPressure = SchedImpl->shouldTrackPressure();
}
// Setup the register pressure trackers for the top scheduled top and bottom
// scheduled regions.
void ScheduleDAGMILive::initRegPressure() {
TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
// Close the RPTracker to finalize live ins.
RPTracker.closeRegion();
DEBUG(RPTracker.dump());
// Initialize the live ins and live outs.
TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
// Close one end of the tracker so we can call
// getMaxUpward/DownwardPressureDelta before advancing across any
// instructions. This converts currently live regs into live ins/outs.
TopRPTracker.closeTop();
BotRPTracker.closeBottom();
BotRPTracker.initLiveThru(RPTracker);
if (!BotRPTracker.getLiveThru().empty()) {
TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
DEBUG(dbgs() << "Live Thru: ";
dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
};
// For each live out vreg reduce the pressure change associated with other
// uses of the same vreg below the live-out reaching def.
updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
// Account for liveness generated by the region boundary.
if (LiveRegionEnd != RegionEnd) {
SmallVector<unsigned, 8> LiveUses;
BotRPTracker.recede(&LiveUses);
updatePressureDiffs(LiveUses);
}
assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
// Cache the list of excess pressure sets in this region. This will also track
// the max pressure in the scheduled code for these sets.
RegionCriticalPSets.clear();
const std::vector<unsigned> &RegionPressure =
RPTracker.getPressure().MaxSetPressure;
for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
if (RegionPressure[i] > Limit) {
DEBUG(dbgs() << TRI->getRegPressureSetName(i)
<< " Limit " << Limit
<< " Actual " << RegionPressure[i] << "\n");
RegionCriticalPSets.push_back(PressureChange(i));
}
}
DEBUG(dbgs() << "Excess PSets: ";
for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
dbgs() << TRI->getRegPressureSetName(
RegionCriticalPSets[i].getPSet()) << " ";
dbgs() << "\n");
}
void ScheduleDAGMILive::
updateScheduledPressure(const SUnit *SU,
const std::vector<unsigned> &NewMaxPressure) {
const PressureDiff &PDiff = getPressureDiff(SU);
unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
I != E; ++I) {
if (!I->isValid())
break;
unsigned ID = I->getPSet();
while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
++CritIdx;
if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
&& NewMaxPressure[ID] <= INT16_MAX)
RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
}
unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
if (NewMaxPressure[ID] >= Limit - 2) {
DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
<< NewMaxPressure[ID]
<< ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
<< "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
}
}
}
/// Update the PressureDiff array for liveness after scheduling this
/// instruction.
void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) {
/// FIXME: Currently assuming single-use physregs.
unsigned Reg = LiveUses[LUIdx];
DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
if (!TRI->isVirtualRegister(Reg))
continue;
// This may be called before CurrentBottom has been initialized. However,
// BotRPTracker must have a valid position. We want the value live into the
// instruction or live out of the block, so ask for the previous
// instruction's live-out.
const LiveInterval &LI = LIS->getInterval(Reg);
VNInfo *VNI;
MachineBasicBlock::const_iterator I =
nextIfDebug(BotRPTracker.getPos(), BB->end());
if (I == BB->end())
VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
else {
LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I));
VNI = LRQ.valueIn();
}
// RegisterPressureTracker guarantees that readsReg is true for LiveUses.
assert(VNI && "No live value at use.");
for (VReg2UseMap::iterator
UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
SUnit *SU = UI->SU;
DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
<< *SU->getInstr());
// If this use comes before the reaching def, it cannot be a last use, so
// descrease its pressure change.
if (!SU->isScheduled && SU != &ExitSU) {
LiveQueryResult LRQ
= LI.Query(LIS->getInstructionIndex(SU->getInstr()));
if (LRQ.valueIn() == VNI)
getPressureDiff(SU).addPressureChange(Reg, true, &MRI);
}
}
}
}
/// schedule - Called back from MachineScheduler::runOnMachineFunction
/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
/// only includes instructions that have DAG nodes, not scheduling boundaries.
///
/// This is a skeletal driver, with all the functionality pushed into helpers,
/// so that it can be easilly extended by experimental schedulers. Generally,
/// implementing MachineSchedStrategy should be sufficient to implement a new
/// scheduling algorithm. However, if a scheduler further subclasses
/// ScheduleDAGMILive then it will want to override this virtual method in order
/// to update any specialized state.
void ScheduleDAGMILive::schedule() {
buildDAGWithRegPressure();
Topo.InitDAGTopologicalSorting();
postprocessDAG();
SmallVector<SUnit*, 8> TopRoots, BotRoots;
findRootsAndBiasEdges(TopRoots, BotRoots);
// Initialize the strategy before modifying the DAG.
// This may initialize a DFSResult to be used for queue priority.
SchedImpl->initialize(this);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
if (ViewMISchedDAGs) viewGraph();
// Initialize ready queues now that the DAG and priority data are finalized.
initQueues(TopRoots, BotRoots);
if (ShouldTrackPressure) {
assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
TopRPTracker.setPos(CurrentTop);
}
bool IsTopNode = false;
while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
assert(!SU->isScheduled && "Node already scheduled");
if (!checkSchedLimit())
break;
scheduleMI(SU, IsTopNode);
if (DFSResult) {
unsigned SubtreeID = DFSResult->getSubtreeID(SU);
if (!ScheduledTrees.test(SubtreeID)) {
ScheduledTrees.set(SubtreeID);
DFSResult->scheduleTree(SubtreeID);
SchedImpl->scheduleTree(SubtreeID);
}
}
// Notify the scheduling strategy after updating the DAG.
SchedImpl->schedNode(SU, IsTopNode);
updateQueues(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
placeDebugValues();
DEBUG({
unsigned BBNum = begin()->getParent()->getNumber();
dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
dumpSchedule();
dbgs() << '\n';
});
}
/// Build the DAG and setup three register pressure trackers.
void ScheduleDAGMILive::buildDAGWithRegPressure() {
if (!ShouldTrackPressure) {
RPTracker.reset();
RegionCriticalPSets.clear();
buildSchedGraph(AA);
return;
}
// Initialize the register pressure tracker used by buildSchedGraph.
RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
/*TrackUntiedDefs=*/true);
// Account for liveness generate by the region boundary.
if (LiveRegionEnd != RegionEnd)
RPTracker.recede();
// Build the DAG, and compute current register pressure.
buildSchedGraph(AA, &RPTracker, &SUPressureDiffs);
// Initialize top/bottom trackers after computing region pressure.
initRegPressure();
}
void ScheduleDAGMILive::computeDFSResult() {
if (!DFSResult)
DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
DFSResult->clear();
ScheduledTrees.clear();
DFSResult->resize(SUnits.size());
DFSResult->compute(SUnits);
ScheduledTrees.resize(DFSResult->getNumSubtrees());
}
/// Compute the max cyclic critical path through the DAG. The scheduling DAG
/// only provides the critical path for single block loops. To handle loops that
/// span blocks, we could use the vreg path latencies provided by
/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
/// available for use in the scheduler.
///
/// The cyclic path estimation identifies a def-use pair that crosses the back
/// edge and considers the depth and height of the nodes. For example, consider
/// the following instruction sequence where each instruction has unit latency
/// and defines an epomymous virtual register:
///
/// a->b(a,c)->c(b)->d(c)->exit
///
/// The cyclic critical path is a two cycles: b->c->b
/// The acyclic critical path is four cycles: a->b->c->d->exit
/// LiveOutHeight = height(c) = len(c->d->exit) = 2
/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
/// LiveInDepth = depth(b) = len(a->b) = 1
///
/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
/// CyclicCriticalPath = min(2, 2) = 2
///
/// This could be relevant to PostRA scheduling, but is currently implemented
/// assuming LiveIntervals.
unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
// This only applies to single block loop.
if (!BB->isSuccessor(BB))
return 0;
unsigned MaxCyclicLatency = 0;
// Visit each live out vreg def to find def/use pairs that cross iterations.
ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs;
for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end();
RI != RE; ++RI) {
unsigned Reg = *RI;
if (!TRI->isVirtualRegister(Reg))
continue;
const LiveInterval &LI = LIS->getInterval(Reg);
const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
if (!DefVNI)
continue;
MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
const SUnit *DefSU = getSUnit(DefMI);
if (!DefSU)
continue;
unsigned LiveOutHeight = DefSU->getHeight();
unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
// Visit all local users of the vreg def.
for (VReg2UseMap::iterator
UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
if (UI->SU == &ExitSU)
continue;
// Only consider uses of the phi.
LiveQueryResult LRQ =
LI.Query(LIS->getInstructionIndex(UI->SU->getInstr()));
if (!LRQ.valueIn()->isPHIDef())
continue;
// Assume that a path spanning two iterations is a cycle, which could
// overestimate in strange cases. This allows cyclic latency to be
// estimated as the minimum slack of the vreg's depth or height.
unsigned CyclicLatency = 0;
if (LiveOutDepth > UI->SU->getDepth())
CyclicLatency = LiveOutDepth - UI->SU->getDepth();
unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency;
if (LiveInHeight > LiveOutHeight) {
if (LiveInHeight - LiveOutHeight < CyclicLatency)
CyclicLatency = LiveInHeight - LiveOutHeight;
}
else
CyclicLatency = 0;
DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
<< UI->SU->NodeNum << ") = " << CyclicLatency << "c\n");
if (CyclicLatency > MaxCyclicLatency)
MaxCyclicLatency = CyclicLatency;
}
}
DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
return MaxCyclicLatency;
}
/// Move an instruction and update register pressure.
void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
// Move the instruction to its new location in the instruction stream.
MachineInstr *MI = SU->getInstr();
if (IsTopNode) {
assert(SU->isTopReady() && "node still has unscheduled dependencies");
if (&*CurrentTop == MI)
CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
else {
moveInstruction(MI, CurrentTop);
TopRPTracker.setPos(MI);
}
if (ShouldTrackPressure) {
// Update top scheduled pressure.
TopRPTracker.advance();
assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
}
}
else {
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
MachineBasicBlock::iterator priorII =
priorNonDebug(CurrentBottom, CurrentTop);
if (&*priorII == MI)
CurrentBottom = priorII;
else {
if (&*CurrentTop == MI) {
CurrentTop = nextIfDebug(++CurrentTop, priorII);
TopRPTracker.setPos(CurrentTop);
}
moveInstruction(MI, CurrentBottom);
CurrentBottom = MI;
}
if (ShouldTrackPressure) {
// Update bottom scheduled pressure.
SmallVector<unsigned, 8> LiveUses;
BotRPTracker.recede(&LiveUses);
assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
updatePressureDiffs(LiveUses);
}
}
}
//===----------------------------------------------------------------------===//
// LoadClusterMutation - DAG post-processing to cluster loads.
//===----------------------------------------------------------------------===//
namespace {
/// \brief Post-process the DAG to create cluster edges between neighboring
/// loads.
class LoadClusterMutation : public ScheduleDAGMutation {
struct LoadInfo {
SUnit *SU;
unsigned BaseReg;
unsigned Offset;
LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
: SU(su), BaseReg(reg), Offset(ofs) {}
bool operator<(const LoadInfo &RHS) const {
return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
}
};
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
public:
LoadClusterMutation(const TargetInstrInfo *tii,
const TargetRegisterInfo *tri)
: TII(tii), TRI(tri) {}
void apply(ScheduleDAGMI *DAG) override;
protected:
void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
};
} // anonymous
void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
ScheduleDAGMI *DAG) {
SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
SUnit *SU = Loads[Idx];
unsigned BaseReg;
unsigned Offset;
if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
}
if (LoadRecords.size() < 2)
return;
std::sort(LoadRecords.begin(), LoadRecords.end());
unsigned ClusterLength = 1;
for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
ClusterLength = 1;
continue;
}
SUnit *SUa = LoadRecords[Idx].SU;
SUnit *SUb = LoadRecords[Idx+1].SU;
if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
&& DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
<< SUb->NodeNum << ")\n");
// Copy successor edges from SUa to SUb. Interleaving computation
// dependent on SUa can prevent load combining due to register reuse.
// Predecessor edges do not need to be copied from SUb to SUa since nearby
// loads should have effectively the same inputs.
for (SUnit::const_succ_iterator
SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
if (SI->getSUnit() == SUb)
continue;
DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
}
++ClusterLength;
}
else
ClusterLength = 1;
}
}
/// \brief Callback from DAG postProcessing to create cluster edges for loads.
void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
// Map DAG NodeNum to store chain ID.
DenseMap<unsigned, unsigned> StoreChainIDs;
// Map each store chain to a set of dependent loads.
SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
SUnit *SU = &DAG->SUnits[Idx];
if (!SU->getInstr()->mayLoad())
continue;
unsigned ChainPredID = DAG->SUnits.size();
for (SUnit::const_pred_iterator
PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
if (PI->isCtrl()) {
ChainPredID = PI->getSUnit()->NodeNum;
break;
}
}
// Check if this chain-like pred has been seen
// before. ChainPredID==MaxNodeID for loads at the top of the schedule.
unsigned NumChains = StoreChainDependents.size();
std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
if (Result.second)
StoreChainDependents.resize(NumChains + 1);
StoreChainDependents[Result.first->second].push_back(SU);
}
// Iterate over the store chains.
for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
}
//===----------------------------------------------------------------------===//
// MacroFusion - DAG post-processing to encourage fusion of macro ops.
//===----------------------------------------------------------------------===//
namespace {
/// \brief Post-process the DAG to create cluster edges between instructions
/// that may be fused by the processor into a single operation.
class MacroFusion : public ScheduleDAGMutation {
const TargetInstrInfo *TII;
public:
MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
void apply(ScheduleDAGMI *DAG) override;
};
} // anonymous
/// \brief Callback from DAG postProcessing to create cluster edges to encourage
/// fused operations.
void MacroFusion::apply(ScheduleDAGMI *DAG) {
// For now, assume targets can only fuse with the branch.
MachineInstr *Branch = DAG->ExitSU.getInstr();
if (!Branch)
return;
for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
SUnit *SU = &DAG->SUnits[--Idx];
if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
continue;
// Create a single weak edge from SU to ExitSU. The only effect is to cause
// bottom-up scheduling to heavily prioritize the clustered SU. There is no
// need to copy predecessor edges from ExitSU to SU, since top-down
// scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
// of SU, we could create an artificial edge from the deepest root, but it
// hasn't been needed yet.
bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
(void)Success;
assert(Success && "No DAG nodes should be reachable from ExitSU");
DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
break;
}
}
//===----------------------------------------------------------------------===//
// CopyConstrain - DAG post-processing to encourage copy elimination.
//===----------------------------------------------------------------------===//
namespace {
/// \brief Post-process the DAG to create weak edges from all uses of a copy to
/// the one use that defines the copy's source vreg, most likely an induction
/// variable increment.
class CopyConstrain : public ScheduleDAGMutation {
// Transient state.
SlotIndex RegionBeginIdx;
// RegionEndIdx is the slot index of the last non-debug instruction in the
// scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
SlotIndex RegionEndIdx;
public:
CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
void apply(ScheduleDAGMI *DAG) override;
protected:
void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
};
} // anonymous
/// constrainLocalCopy handles two possibilities:
/// 1) Local src:
/// I0: = dst
/// I1: src = ...
/// I2: = dst
/// I3: dst = src (copy)
/// (create pred->succ edges I0->I1, I2->I1)
///
/// 2) Local copy:
/// I0: dst = src (copy)
/// I1: = dst
/// I2: src = ...
/// I3: = dst
/// (create pred->succ edges I1->I2, I3->I2)
///
/// Although the MachineScheduler is currently constrained to single blocks,
/// this algorithm should handle extended blocks. An EBB is a set of
/// contiguously numbered blocks such that the previous block in the EBB is
/// always the single predecessor.
void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
LiveIntervals *LIS = DAG->getLIS();
MachineInstr *Copy = CopySU->getInstr();
// Check for pure vreg copies.
unsigned SrcReg = Copy->getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
return;
unsigned DstReg = Copy->getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(DstReg))
return;
// Check if either the dest or source is local. If it's live across a back
// edge, it's not local. Note that if both vregs are live across the back
// edge, we cannot successfully contrain the copy without cyclic scheduling.
// If both the copy's source and dest are local live intervals, then we
// should treat the dest as the global for the purpose of adding
// constraints. This adds edges from source's other uses to the copy.
unsigned LocalReg = SrcReg;
unsigned GlobalReg = DstReg;
LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
LocalReg = DstReg;
GlobalReg = SrcReg;
LocalLI = &LIS->getInterval(LocalReg);
if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
return;
}
LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
// Find the global segment after the start of the local LI.
LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
// If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
// local live range. We could create edges from other global uses to the local
// start, but the coalescer should have already eliminated these cases, so
// don't bother dealing with it.
if (GlobalSegment == GlobalLI->end())
return;
// If GlobalSegment is killed at the LocalLI->start, the call to find()
// returned the next global segment. But if GlobalSegment overlaps with
// LocalLI->start, then advance to the next segement. If a hole in GlobalLI
// exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
if (GlobalSegment->contains(LocalLI->beginIndex()))
++GlobalSegment;
if (GlobalSegment == GlobalLI->end())
return;
// Check if GlobalLI contains a hole in the vicinity of LocalLI.
if (GlobalSegment != GlobalLI->begin()) {
// Two address defs have no hole.
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
GlobalSegment->start)) {
return;
}
// If the prior global segment may be defined by the same two-address
// instruction that also defines LocalLI, then can't make a hole here.
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
LocalLI->beginIndex())) {
return;
}
// If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
// it would be a disconnected component in the live range.
assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
"Disconnected LRG within the scheduling region.");
}
MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
if (!GlobalDef)
return;
SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
if (!GlobalSU)
return;
// GlobalDef is the bottom of the GlobalLI hole. Open the hole by
// constraining the uses of the last local def to precede GlobalDef.
SmallVector<SUnit*,8> LocalUses;
const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
for (SUnit::const_succ_iterator
I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
I != E; ++I) {
if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
continue;
if (I->getSUnit() == GlobalSU)
continue;
if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
return;
LocalUses.push_back(I->getSUnit());
}
// Open the top of the GlobalLI hole by constraining any earlier global uses
// to precede the start of LocalLI.
SmallVector<SUnit*,8> GlobalUses;
MachineInstr *FirstLocalDef =
LIS->getInstructionFromIndex(LocalLI->beginIndex());
SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
for (SUnit::const_pred_iterator
I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
continue;
if (I->getSUnit() == FirstLocalSU)
continue;
if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
return;
GlobalUses.push_back(I->getSUnit());
}
DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
// Add the weak edges.
for (SmallVectorImpl<SUnit*>::const_iterator
I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
<< GlobalSU->NodeNum << ")\n");
DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
}
for (SmallVectorImpl<SUnit*>::const_iterator
I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
<< FirstLocalSU->NodeNum << ")\n");
DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
}
}
/// \brief Callback from DAG postProcessing to create weak edges to encourage
/// copy elimination.
void CopyConstrain::apply(ScheduleDAGMI *DAG) {
assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
if (FirstPos == DAG->end())
return;
RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
RegionEndIdx = DAG->getLIS()->getInstructionIndex(
&*priorNonDebug(DAG->end(), DAG->begin()));
for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
SUnit *SU = &DAG->SUnits[Idx];
if (!SU->getInstr()->isCopy())
continue;
constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
}
}
//===----------------------------------------------------------------------===//
// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
// and possibly other custom schedulers.
//===----------------------------------------------------------------------===//
static const unsigned InvalidCycle = ~0U;
SchedBoundary::~SchedBoundary() { delete HazardRec; }
void SchedBoundary::reset() {
// A new HazardRec is created for each DAG and owned by SchedBoundary.
// Destroying and reconstructing it is very expensive though. So keep
// invalid, placeholder HazardRecs.
if (HazardRec && HazardRec->isEnabled()) {
delete HazardRec;
HazardRec = nullptr;
}
Available.clear();
Pending.clear();
CheckPending = false;
NextSUs.clear();
CurrCycle = 0;
CurrMOps = 0;
MinReadyCycle = UINT_MAX;
ExpectedLatency = 0;
DependentLatency = 0;
RetiredMOps = 0;
MaxExecutedResCount = 0;
ZoneCritResIdx = 0;
IsResourceLimited = false;
ReservedCycles.clear();
#ifndef NDEBUG
// Track the maximum number of stall cycles that could arise either from the
// latency of a DAG edge or the number of cycles that a processor resource is
// reserved (SchedBoundary::ReservedCycles).
MaxObservedStall = 0;
#endif
// Reserve a zero-count for invalid CritResIdx.
ExecutedResCounts.resize(1);
assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
}
void SchedRemainder::
init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
reset();
if (!SchedModel->hasInstrSchedModel())
return;
RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
for (std::vector<SUnit>::iterator
I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
* SchedModel->getMicroOpFactor();
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned PIdx = PI->ProcResourceIdx;
unsigned Factor = SchedModel->getResourceFactor(PIdx);
RemainingCounts[PIdx] += (Factor * PI->Cycles);
}
}
}
void SchedBoundary::
init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
reset();
DAG = dag;
SchedModel = smodel;
Rem = rem;
if (SchedModel->hasInstrSchedModel()) {
ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
}
}
/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
/// these "soft stalls" differently than the hard stall cycles based on CPU
/// resources and computed by checkHazard(). A fully in-order model
/// (MicroOpBufferSize==0) will not make use of this since instructions are not
/// available for scheduling until they are ready. However, a weaker in-order
/// model may use this for heuristics. For example, if a processor has in-order
/// behavior when reading certain resources, this may come into play.
unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
if (!SU->isUnbuffered)
return 0;
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
if (ReadyCycle > CurrCycle)
return ReadyCycle - CurrCycle;
return 0;
}
/// Compute the next cycle at which the given processor resource can be
/// scheduled.
unsigned SchedBoundary::
getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
unsigned NextUnreserved = ReservedCycles[PIdx];
// If this resource has never been used, always return cycle zero.
if (NextUnreserved == InvalidCycle)
return 0;
// For bottom-up scheduling add the cycles needed for the current operation.
if (!isTop())
NextUnreserved += Cycles;
return NextUnreserved;
}
/// Does this SU have a hazard within the current instruction group.
///
/// The scheduler supports two modes of hazard recognition. The first is the
/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
/// supports highly complicated in-order reservation tables
/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
///
/// The second is a streamlined mechanism that checks for hazards based on
/// simple counters that the scheduler itself maintains. It explicitly checks
/// for instruction dispatch limitations, including the number of micro-ops that
/// can dispatch per cycle.
///
/// TODO: Also check whether the SU must start a new group.
bool SchedBoundary::checkHazard(SUnit *SU) {
if (HazardRec->isEnabled()
&& HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
return true;
}
unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
<< SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
return true;
}
if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
if (NRCycle > CurrCycle) {
#ifndef NDEBUG
MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
#endif
DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
<< SchedModel->getResourceName(PI->ProcResourceIdx)
<< "=" << NRCycle << "c\n");
return true;
}
}
}
return false;
}
// Find the unscheduled node in ReadySUs with the highest latency.
unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
SUnit *LateSU = nullptr;
unsigned RemLatency = 0;
for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
I != E; ++I) {
unsigned L = getUnscheduledLatency(*I);
if (L > RemLatency) {
RemLatency = L;
LateSU = *I;
}
}
if (LateSU) {
DEBUG(dbgs() << Available.getName() << " RemLatency SU("
<< LateSU->NodeNum << ") " << RemLatency << "c\n");
}
return RemLatency;
}
// Count resources in this zone and the remaining unscheduled
// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
// resource index, or zero if the zone is issue limited.
unsigned SchedBoundary::
getOtherResourceCount(unsigned &OtherCritIdx) {
OtherCritIdx = 0;
if (!SchedModel->hasInstrSchedModel())
return 0;
unsigned OtherCritCount = Rem->RemIssueCount
+ (RetiredMOps * SchedModel->getMicroOpFactor());
DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
<< OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
PIdx != PEnd; ++PIdx) {
unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
if (OtherCount > OtherCritCount) {
OtherCritCount = OtherCount;
OtherCritIdx = PIdx;
}
}
if (OtherCritIdx) {
DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
<< OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
<< " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
}
return OtherCritCount;
}
void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
assert(SU->getInstr() && "Scheduled SUnit must have instr");
#ifndef NDEBUG
// ReadyCycle was been bumped up to the CurrCycle when this node was
// scheduled, but CurrCycle may have been eagerly advanced immediately after
// scheduling, so may now be greater than ReadyCycle.
if (ReadyCycle > CurrCycle)
MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
#endif
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
// Check for interlocks first. For the purpose of other heuristics, an
// instruction that cannot issue appears as if it's not in the ReadyQueue.
bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
Pending.push(SU);
else
Available.push(SU);
// Record this node as an immediate dependent of the scheduled node.
NextSUs.insert(SU);
}
void SchedBoundary::releaseTopNode(SUnit *SU) {
if (SU->isScheduled)
return;
releaseNode(SU, SU->TopReadyCycle);
}
void SchedBoundary::releaseBottomNode(SUnit *SU) {
if (SU->isScheduled)
return;
releaseNode(SU, SU->BotReadyCycle);
}
/// Move the boundary of scheduled code by one cycle.
void SchedBoundary::bumpCycle(unsigned NextCycle) {
if (SchedModel->getMicroOpBufferSize() == 0) {
assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
if (MinReadyCycle > NextCycle)
NextCycle = MinReadyCycle;
}
// Update the current micro-ops, which will issue in the next cycle.
unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
// Decrement DependentLatency based on the next cycle.
if ((NextCycle - CurrCycle) > DependentLatency)
DependentLatency = 0;
else
DependentLatency -= (NextCycle - CurrCycle);
if (!HazardRec->isEnabled()) {
// Bypass HazardRec virtual calls.
CurrCycle = NextCycle;
}
else {
// Bypass getHazardType calls in case of long latency.
for (; CurrCycle != NextCycle; ++CurrCycle) {
if (isTop())
HazardRec->AdvanceCycle();
else
HazardRec->RecedeCycle();
}
}
CheckPending = true;
unsigned LFactor = SchedModel->getLatencyFactor();
IsResourceLimited =
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
> (int)LFactor;
DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
}
void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
ExecutedResCounts[PIdx] += Count;
if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
MaxExecutedResCount = ExecutedResCounts[PIdx];
}
/// Add the given processor resource to this scheduled zone.
///
/// \param Cycles indicates the number of consecutive (non-pipelined) cycles
/// during which this resource is consumed.
///
/// \return the next cycle at which the instruction may execute without
/// oversubscribing resources.
unsigned SchedBoundary::
countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
unsigned Factor = SchedModel->getResourceFactor(PIdx);
unsigned Count = Factor * Cycles;
DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
<< " +" << Cycles << "x" << Factor << "u\n");
// Update Executed resources counts.
incExecutedResources(PIdx, Count);
assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
Rem->RemainingCounts[PIdx] -= Count;
// Check if this resource exceeds the current critical resource. If so, it
// becomes the critical resource.
if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
ZoneCritResIdx = PIdx;
DEBUG(dbgs() << " *** Critical resource "
<< SchedModel->getResourceName(PIdx) << ": "
<< getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
}
// For reserved resources, record the highest cycle using the resource.
unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
if (NextAvailable > CurrCycle) {
DEBUG(dbgs() << " Resource conflict: "
<< SchedModel->getProcResource(PIdx)->Name << " reserved until @"
<< NextAvailable << "\n");
}
return NextAvailable;
}
/// Move the boundary of scheduled code by one SUnit.
void SchedBoundary::bumpNode(SUnit *SU) {
// Update the reservation table.
if (HazardRec->isEnabled()) {
if (!isTop() && SU->isCall) {
// Calls are scheduled with their preceding instructions. For bottom-up
// scheduling, clear the pipeline state before emitting.
HazardRec->Reset();
}
HazardRec->EmitInstruction(SU);
}
// checkHazard should prevent scheduling multiple instructions per cycle that
// exceed the issue width.
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
assert(
(CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
"Cannot schedule this instruction's MicroOps in the current cycle.");
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
unsigned NextCycle = CurrCycle;
switch (SchedModel->getMicroOpBufferSize()) {
case 0:
assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
break;
case 1:
if (ReadyCycle > NextCycle) {
NextCycle = ReadyCycle;
DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
}
break;
default:
// We don't currently model the OOO reorder buffer, so consider all
// scheduled MOps to be "retired". We do loosely model in-order resource
// latency. If this instruction uses an in-order resource, account for any
// likely stall cycles.
if (SU->isUnbuffered && ReadyCycle > NextCycle)
NextCycle = ReadyCycle;
break;
}
RetiredMOps += IncMOps;
// Update resource counts and critical resource.
if (SchedModel->hasInstrSchedModel()) {
unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
Rem->RemIssueCount -= DecRemIssue;
if (ZoneCritResIdx) {
// Scale scheduled micro-ops for comparing with the critical resource.
unsigned ScaledMOps =
RetiredMOps * SchedModel->getMicroOpFactor();
// If scaled micro-ops are now more than the previous critical resource by
// a full cycle, then micro-ops issue becomes critical.
if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
>= (int)SchedModel->getLatencyFactor()) {
ZoneCritResIdx = 0;
DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
<< ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
}
}
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned RCycle =
countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
if (RCycle > NextCycle)
NextCycle = RCycle;
}
if (SU->hasReservedResource) {
// For reserved resources, record the highest cycle using the resource.
// For top-down scheduling, this is the cycle in which we schedule this
// instruction plus the number of cycles the operations reserves the
// resource. For bottom-up is it simply the instruction's cycle.
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned PIdx = PI->ProcResourceIdx;
if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
if (isTop()) {
ReservedCycles[PIdx] =
std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
}
else
ReservedCycles[PIdx] = NextCycle;
}
}
}
}
// Update ExpectedLatency and DependentLatency.
unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
if (SU->getDepth() > TopLatency) {
TopLatency = SU->getDepth();
DEBUG(dbgs() << " " << Available.getName()
<< " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
}
if (SU->getHeight() > BotLatency) {
BotLatency = SU->getHeight();
DEBUG(dbgs() << " " << Available.getName()
<< " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
}
// If we stall for any reason, bump the cycle.
if (NextCycle > CurrCycle) {
bumpCycle(NextCycle);
}
else {
// After updating ZoneCritResIdx and ExpectedLatency, check if we're
// resource limited. If a stall occurred, bumpCycle does this.
unsigned LFactor = SchedModel->getLatencyFactor();
IsResourceLimited =
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
> (int)LFactor;
}
// Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
// resets CurrMOps. Loop to handle instructions with more MOps than issue in
// one cycle. Since we commonly reach the max MOps here, opportunistically
// bump the cycle to avoid uselessly checking everything in the readyQ.
CurrMOps += IncMOps;
while (CurrMOps >= SchedModel->getIssueWidth()) {
DEBUG(dbgs() << " *** Max MOps " << CurrMOps
<< " at cycle " << CurrCycle << '\n');
bumpCycle(++NextCycle);
}
DEBUG(dumpScheduledState());
}
/// Release pending ready nodes in to the available queue. This makes them
/// visible to heuristics.
void SchedBoundary::releasePending() {
// If the available queue is empty, it is safe to reset MinReadyCycle.
if (Available.empty())
MinReadyCycle = UINT_MAX;
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
SUnit *SU = *(Pending.begin()+i);
unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
if (!IsBuffered && ReadyCycle > CurrCycle)
continue;
if (checkHazard(SU))
continue;
Available.push(SU);
Pending.remove(Pending.begin()+i);
--i; --e;
}
DEBUG(if (!Pending.empty()) Pending.dump());
CheckPending = false;
}
/// Remove SU from the ready set for this boundary.
void SchedBoundary::removeReady(SUnit *SU) {
if (Available.isInQueue(SU))
Available.remove(Available.find(SU));
else {
assert(Pending.isInQueue(SU) && "bad ready count");
Pending.remove(Pending.find(SU));
}
}
/// If this queue only has one ready candidate, return it. As a side effect,
/// defer any nodes that now hit a hazard, and advance the cycle until at least
/// one node is ready. If multiple instructions are ready, return NULL.
SUnit *SchedBoundary::pickOnlyChoice() {
if (CheckPending)
releasePending();
if (CurrMOps > 0) {
// Defer any ready instrs that now have a hazard.
for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
if (checkHazard(*I)) {
Pending.push(*I);
I = Available.remove(I);
continue;
}
++I;
}
}
for (unsigned i = 0; Available.empty(); ++i) {
// FIXME: Re-enable assert once PR20057 is resolved.
// assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
// "permanent hazard");
(void)i;
bumpCycle(CurrCycle + 1);
releasePending();
}
if (Available.size() == 1)
return *Available.begin();
return nullptr;
}
#ifndef NDEBUG
// This is useful information to dump after bumpNode.
// Note that the Queue contents are more useful before pickNodeFromQueue.
void SchedBoundary::dumpScheduledState() {
unsigned ResFactor;
unsigned ResCount;
if (ZoneCritResIdx) {
ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
ResCount = getResourceCount(ZoneCritResIdx);
}
else {
ResFactor = SchedModel->getMicroOpFactor();
ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
}
unsigned LFactor = SchedModel->getLatencyFactor();
dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
<< " Retired: " << RetiredMOps;
dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
<< ResCount / ResFactor << " "
<< SchedModel->getResourceName(ZoneCritResIdx)
<< "\n ExpectedLatency: " << ExpectedLatency << "c\n"
<< (IsResourceLimited ? " - Resource" : " - Latency")
<< " limited.\n";
}
#endif
//===----------------------------------------------------------------------===//
// GenericScheduler - Generic implementation of MachineSchedStrategy.
//===----------------------------------------------------------------------===//
void GenericSchedulerBase::SchedCandidate::
initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel) {
if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
return;
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
if (PI->ProcResourceIdx == Policy.ReduceResIdx)
ResDelta.CritResources += PI->Cycles;
if (PI->ProcResourceIdx == Policy.DemandResIdx)
ResDelta.DemandedResources += PI->Cycles;
}
}
/// Set the CandPolicy given a scheduling zone given the current resources and
/// latencies inside and outside the zone.
void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
bool IsPostRA,
SchedBoundary &CurrZone,
SchedBoundary *OtherZone) {
// Apply preemptive heuristics based on the total latency and resources
// inside and outside this zone. Potential stalls should be considered before
// following this policy.
// Compute remaining latency. We need this both to determine whether the
// overall schedule has become latency-limited and whether the instructions
// outside this zone are resource or latency limited.
//
// The "dependent" latency is updated incrementally during scheduling as the
// max height/depth of scheduled nodes minus the cycles since it was
// scheduled:
// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
//
// The "independent" latency is the max ready queue depth:
// ILat = max N.depth for N in Available|Pending
//
// RemainingLatency is the greater of independent and dependent latency.
unsigned RemLatency = CurrZone.getDependentLatency();
RemLatency = std::max(RemLatency,
CurrZone.findMaxLatency(CurrZone.Available.elements()));
RemLatency = std::max(RemLatency,
CurrZone.findMaxLatency(CurrZone.Pending.elements()));
// Compute the critical resource outside the zone.
unsigned OtherCritIdx = 0;
unsigned OtherCount =
OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
bool OtherResLimited = false;
if (SchedModel->hasInstrSchedModel()) {
unsigned LFactor = SchedModel->getLatencyFactor();
OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
}
// Schedule aggressively for latency in PostRA mode. We don't check for
// acyclic latency during PostRA, and highly out-of-order processors will
// skip PostRA scheduling.
if (!OtherResLimited) {
if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
Policy.ReduceLatency |= true;
DEBUG(dbgs() << " " << CurrZone.Available.getName()
<< " RemainingLatency " << RemLatency << " + "
<< CurrZone.getCurrCycle() << "c > CritPath "
<< Rem.CriticalPath << "\n");
}
}
// If the same resource is limiting inside and outside the zone, do nothing.
if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
return;
DEBUG(
if (CurrZone.isResourceLimited()) {
dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
<< SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
<< "\n";
}
if (OtherResLimited)
dbgs() << " RemainingLimit: "
<< SchedModel->getResourceName(OtherCritIdx) << "\n";
if (!CurrZone.isResourceLimited() && !OtherResLimited)
dbgs() << " Latency limited both directions.\n");
if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
if (OtherResLimited)
Policy.DemandResIdx = OtherCritIdx;
}
#ifndef NDEBUG
const char *GenericSchedulerBase::getReasonStr(
GenericSchedulerBase::CandReason Reason) {
switch (Reason) {
case NoCand: return "NOCAND ";
case PhysRegCopy: return "PREG-COPY";
case RegExcess: return "REG-EXCESS";
case RegCritical: return "REG-CRIT ";
case Stall: return "STALL ";
case Cluster: return "CLUSTER ";
case Weak: return "WEAK ";
case RegMax: return "REG-MAX ";
case ResourceReduce: return "RES-REDUCE";
case ResourceDemand: return "RES-DEMAND";
case TopDepthReduce: return "TOP-DEPTH ";
case TopPathReduce: return "TOP-PATH ";
case BotHeightReduce:return "BOT-HEIGHT";
case BotPathReduce: return "BOT-PATH ";
case NextDefUse: return "DEF-USE ";
case NodeOrder: return "ORDER ";
};
llvm_unreachable("Unknown reason!");
}
void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
PressureChange P;
unsigned ResIdx = 0;
unsigned Latency = 0;
switch (Cand.Reason) {
default:
break;
case RegExcess:
P = Cand.RPDelta.Excess;
break;
case RegCritical:
P = Cand.RPDelta.CriticalMax;
break;
case RegMax:
P = Cand.RPDelta.CurrentMax;
break;
case ResourceReduce:
ResIdx = Cand.Policy.ReduceResIdx;
break;
case ResourceDemand:
ResIdx = Cand.Policy.DemandResIdx;
break;
case TopDepthReduce:
Latency = Cand.SU->getDepth();
break;
case TopPathReduce:
Latency = Cand.SU->getHeight();
break;
case BotHeightReduce:
Latency = Cand.SU->getHeight();
break;
case BotPathReduce:
Latency = Cand.SU->getDepth();
break;
}
dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
if (P.isValid())
dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
<< ":" << P.getUnitInc() << " ";
else
dbgs() << " ";
if (ResIdx)
dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
else
dbgs() << " ";
if (Latency)
dbgs() << " " << Latency << " cycles ";
else
dbgs() << " ";
dbgs() << '\n';
}
#endif
/// Return true if this heuristic determines order.
static bool tryLess(int TryVal, int CandVal,
GenericSchedulerBase::SchedCandidate &TryCand,
GenericSchedulerBase::SchedCandidate &Cand,
GenericSchedulerBase::CandReason Reason) {
if (TryVal < CandVal) {
TryCand.Reason = Reason;
return true;
}
if (TryVal > CandVal) {
if (Cand.Reason > Reason)
Cand.Reason = Reason;
return true;
}
Cand.setRepeat(Reason);
return false;
}
static bool tryGreater(int TryVal, int CandVal,
GenericSchedulerBase::SchedCandidate &TryCand,
GenericSchedulerBase::SchedCandidate &Cand,
GenericSchedulerBase::CandReason Reason) {
if (TryVal > CandVal) {
TryCand.Reason = Reason;
return true;
}
if (TryVal < CandVal) {
if (Cand.Reason > Reason)
Cand.Reason = Reason;
return true;
}
Cand.setRepeat(Reason);
return false;
}
static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
GenericSchedulerBase::SchedCandidate &Cand,
SchedBoundary &Zone) {
if (Zone.isTop()) {
if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
return true;
}
if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
TryCand, Cand, GenericSchedulerBase::TopPathReduce))
return true;
}
else {
if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
return true;
}
if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
TryCand, Cand, GenericSchedulerBase::BotPathReduce))
return true;
}
return false;
}
static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
bool IsTop) {
DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
<< GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
}
void GenericScheduler::initialize(ScheduleDAGMI *dag) {
assert(dag->hasVRegLiveness() &&
"(PreRA)GenericScheduler needs vreg liveness");
DAG = static_cast<ScheduleDAGMILive*>(dag);
SchedModel = DAG->getSchedModel();
TRI = DAG->TRI;
Rem.init(DAG, SchedModel);
Top.init(DAG, SchedModel, &Rem);
Bot.init(DAG, SchedModel, &Rem);
// Initialize resource counts.
// Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
// are disabled, then these HazardRecs will be disabled.
const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
if (!Top.HazardRec) {
Top.HazardRec =
DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Itin, DAG);
}
if (!Bot.HazardRec) {
Bot.HazardRec =
DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Itin, DAG);
}
}
/// Initialize the per-region scheduling policy.
void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned NumRegionInstrs) {
const MachineFunction &MF = *Begin->getParent()->getParent();
const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
// Avoid setting up the register pressure tracker for small regions to save
// compile time. As a rough heuristic, only track pressure when the number of
// schedulable instructions exceeds half the integer register file.
RegionPolicy.ShouldTrackPressure = true;
for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
if (TLI->isTypeLegal(LegalIntVT)) {
unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
TLI->getRegClassFor(LegalIntVT));
RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
}
}
// For generic targets, we default to bottom-up, because it's simpler and more
// compile-time optimizations have been implemented in that direction.
RegionPolicy.OnlyBottomUp = true;
// Allow the subtarget to override default policy.
MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
NumRegionInstrs);
// After subtarget overrides, apply command line options.
if (!EnableRegPressure)
RegionPolicy.ShouldTrackPressure = false;
// Check -misched-topdown/bottomup can force or unforce scheduling direction.
// e.g. -misched-bottomup=false allows scheduling in both directions.
assert((!ForceTopDown || !ForceBottomUp) &&
"-misched-topdown incompatible with -misched-bottomup");
if (ForceBottomUp.getNumOccurrences() > 0) {
RegionPolicy.OnlyBottomUp = ForceBottomUp;
if (RegionPolicy.OnlyBottomUp)
RegionPolicy.OnlyTopDown = false;
}
if (ForceTopDown.getNumOccurrences() > 0) {
RegionPolicy.OnlyTopDown = ForceTopDown;
if (RegionPolicy.OnlyTopDown)
RegionPolicy.OnlyBottomUp = false;
}
}
/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
/// critical path by more cycles than it takes to drain the instruction buffer.
/// We estimate an upper bounds on in-flight instructions as:
///
/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
/// InFlightIterations = AcyclicPath / CyclesPerIteration
/// InFlightResources = InFlightIterations * LoopResources
///
/// TODO: Check execution resources in addition to IssueCount.
void GenericScheduler::checkAcyclicLatency() {
if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
return;
// Scaled number of cycles per loop iteration.
unsigned IterCount =
std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
Rem.RemIssueCount);
// Scaled acyclic critical path.
unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
// InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
unsigned InFlightCount =
(AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
unsigned BufferLimit =
SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
DEBUG(dbgs() << "IssueCycles="
<< Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
<< "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
<< "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
<< " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
<< "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
if (Rem.IsAcyclicLatencyLimited)
dbgs() << " ACYCLIC LATENCY LIMIT\n");
}
void GenericScheduler::registerRoots() {
Rem.CriticalPath = DAG->ExitSU.getDepth();
// Some roots may not feed into ExitSU. Check all of them in case.
for (std::vector<SUnit*>::const_iterator
I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
if ((*I)->getDepth() > Rem.CriticalPath)
Rem.CriticalPath = (*I)->getDepth();
}
DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
if (DumpCriticalPathLength) {
errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
}
if (EnableCyclicPath) {
Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
checkAcyclicLatency();
}
}
static bool tryPressure(const PressureChange &TryP,
const PressureChange &CandP,
GenericSchedulerBase::SchedCandidate &TryCand,
GenericSchedulerBase::SchedCandidate &Cand,
GenericSchedulerBase::CandReason Reason) {
int TryRank = TryP.getPSetOrMax();
int CandRank = CandP.getPSetOrMax();
// If both candidates affect the same set, go with the smallest increase.
if (TryRank == CandRank) {
return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
Reason);
}
// If one candidate decreases and the other increases, go with it.
// Invalid candidates have UnitInc==0.
if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
Reason)) {
return true;
}
// If the candidates are decreasing pressure, reverse priority.
if (TryP.getUnitInc() < 0)
std::swap(TryRank, CandRank);
return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
}
static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
}
/// Minimize physical register live ranges. Regalloc wants them adjacent to
/// their physreg def/use.
///
/// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
/// with the operation that produces or consumes the physreg. We'll do this when
/// regalloc has support for parallel copies.
static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
const MachineInstr *MI = SU->getInstr();
if (!MI->isCopy())
return 0;
unsigned ScheduledOper = isTop ? 1 : 0;
unsigned UnscheduledOper = isTop ? 0 : 1;
// If we have already scheduled the physreg produce/consumer, immediately
// schedule the copy.
if (TargetRegisterInfo::isPhysicalRegister(
MI->getOperand(ScheduledOper).getReg()))
return 1;
// If the physreg is at the boundary, defer it. Otherwise schedule it
// immediately to free the dependent. We can hoist the copy later.
bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
if (TargetRegisterInfo::isPhysicalRegister(
MI->getOperand(UnscheduledOper).getReg()))
return AtBoundary ? -1 : 1;
return 0;
}
/// Apply a set of heursitics to a new candidate. Heuristics are currently
/// hierarchical. This may be more efficient than a graduated cost model because
/// we don't need to evaluate all aspects of the model for each node in the
/// queue. But it's really done to make the heuristics easier to debug and
/// statistically analyze.
///
/// \param Cand provides the policy and current best candidate.
/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
/// \param Zone describes the scheduled zone that we are extending.
/// \param RPTracker describes reg pressure within the scheduled zone.
/// \param TempTracker is a scratch pressure tracker to reuse in queries.
void GenericScheduler::tryCandidate(SchedCandidate &Cand,
SchedCandidate &TryCand,
SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
RegPressureTracker &TempTracker) {
if (DAG->isTrackingPressure()) {
// Always initialize TryCand's RPDelta.
if (Zone.isTop()) {
TempTracker.getMaxDownwardPressureDelta(
TryCand.SU->getInstr(),
TryCand.RPDelta,
DAG->getRegionCriticalPSets(),
DAG->getRegPressure().MaxSetPressure);
}
else {
if (VerifyScheduling) {
TempTracker.getMaxUpwardPressureDelta(
TryCand.SU->getInstr(),
&DAG->getPressureDiff(TryCand.SU),
TryCand.RPDelta,
DAG->getRegionCriticalPSets(),
DAG->getRegPressure().MaxSetPressure);
}
else {
RPTracker.getUpwardPressureDelta(
TryCand.SU->getInstr(),
DAG->getPressureDiff(TryCand.SU),
TryCand.RPDelta,
DAG->getRegionCriticalPSets(),
DAG->getRegPressure().MaxSetPressure);
}
}
}
DEBUG(if (TryCand.RPDelta.Excess.isValid())
dbgs() << " SU(" << TryCand.SU->NodeNum << ") "
<< TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
<< ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
// Initialize the candidate if needed.
if (!Cand.isValid()) {
TryCand.Reason = NodeOrder;
return;
}
if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
biasPhysRegCopy(Cand.SU, Zone.isTop()),
TryCand, Cand, PhysRegCopy))
return;
// Avoid exceeding the target's limit.
if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
Cand.RPDelta.Excess,
TryCand, Cand, RegExcess))
return;
// Avoid increasing the max critical pressure in the scheduled region.
if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
Cand.RPDelta.CriticalMax,
TryCand, Cand, RegCritical))
return;
// For loops that are acyclic path limited, aggressively schedule for latency.
// This can result in very long dependence chains scheduled in sequence, so
// once every cycle (when CurrMOps == 0), switch to normal heuristics.
if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
&& tryLatency(TryCand, Cand, Zone))
return;
// Prioritize instructions that read unbuffered resources by stall cycles.
if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
return;
// Keep clustered nodes together to encourage downstream peephole
// optimizations which may reduce resource requirements.
//
// This is a best effort to set things up for a post-RA pass. Optimizations
// like generating loads of multiple registers should ideally be done within
// the scheduler pass by combining the loads during DAG postprocessing.
const SUnit *NextClusterSU =
Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
TryCand, Cand, Cluster))
return;
// Weak edges are for clustering and other constraints.
if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
getWeakLeft(Cand.SU, Zone.isTop()),
TryCand, Cand, Weak)) {
return;
}
// Avoid increasing the max pressure of the entire region.
if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
Cand.RPDelta.CurrentMax,
TryCand, Cand, RegMax))
return;
// Avoid critical resource consumption and balance the schedule.
TryCand.initResourceDelta(DAG, SchedModel);
if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
TryCand, Cand, ResourceReduce))
return;
if (tryGreater(TryCand.ResDelta.DemandedResources,
Cand.ResDelta.DemandedResources,
TryCand, Cand, ResourceDemand))
return;
// Avoid serializing long latency dependence chains.
// For acyclic path limited loops, latency was already checked above.
if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited
&& tryLatency(TryCand, Cand, Zone)) {
return;
}
// Prefer immediate defs/users of the last scheduled instruction. This is a
// local pressure avoidance strategy that also makes the machine code
// readable.
if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
TryCand, Cand, NextDefUse))
return;
// Fall through to original instruction order.
if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
|| (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
TryCand.Reason = NodeOrder;
}
}
/// Pick the best candidate from the queue.
///
/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
/// DAG building. To adjust for the current scheduling location we need to
/// maintain the number of vreg uses remaining to be top-scheduled.
void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
SchedCandidate &Cand) {
ReadyQueue &Q = Zone.Available;
DEBUG(Q.dump());
// getMaxPressureDelta temporarily modifies the tracker.
RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
SchedCandidate TryCand(Cand.Policy);
TryCand.SU = *I;
tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
if (TryCand.Reason != NoCand) {
// Initialize resource delta if needed in case future heuristics query it.
if (TryCand.ResDelta == SchedResourceDelta())
TryCand.initResourceDelta(DAG, SchedModel);
Cand.setBest(TryCand);
DEBUG(traceCandidate(Cand));
}
}
}
/// Pick the best candidate node from either the top or bottom queue.
SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
// Schedule as far as possible in the direction of no choice. This is most
// efficient, but also provides the best heuristics for CriticalPSets.
if (SUnit *SU = Bot.pickOnlyChoice()) {
IsTopNode = false;
DEBUG(dbgs() << "Pick Bot NOCAND\n");
return SU;
}
if (SUnit *SU = Top.pickOnlyChoice()) {
IsTopNode = true;
DEBUG(dbgs() << "Pick Top NOCAND\n");
return SU;
}
CandPolicy NoPolicy;
SchedCandidate BotCand(NoPolicy);
SchedCandidate TopCand(NoPolicy);
// Set the bottom-up policy based on the state of the current bottom zone and
// the instructions outside the zone, including the top zone.
setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
// Set the top-down policy based on the state of the current top zone and
// the instructions outside the zone, including the bottom zone.
setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
// Prefer bottom scheduling when heuristics are silent.
pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
assert(BotCand.Reason != NoCand && "failed to find the first candidate");
// If either Q has a single candidate that provides the least increase in
// Excess pressure, we can immediately schedule from that Q.
//
// RegionCriticalPSets summarizes the pressure within the scheduled region and
// affects picking from either Q. If scheduling in one direction must
// increase pressure for one of the excess PSets, then schedule in that
// direction first to provide more freedom in the other direction.
if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
|| (BotCand.Reason == RegCritical
&& !BotCand.isRepeat(RegCritical)))
{
IsTopNode = false;
tracePick(BotCand, IsTopNode);
return BotCand.SU;
}
// Check if the top Q has a better candidate.
pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
assert(TopCand.Reason != NoCand && "failed to find the first candidate");
// Choose the queue with the most important (lowest enum) reason.
if (TopCand.Reason < BotCand.Reason) {
IsTopNode = true;
tracePick(TopCand, IsTopNode);
return TopCand.SU;
}
// Otherwise prefer the bottom candidate, in node order if all else failed.
IsTopNode = false;
tracePick(BotCand, IsTopNode);
return BotCand.SU;
}
/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
if (DAG->top() == DAG->bottom()) {
assert(Top.Available.empty() && Top.Pending.empty() &&
Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
return nullptr;
}
SUnit *SU;
do {
if (RegionPolicy.OnlyTopDown) {
SU = Top.pickOnlyChoice();
if (!SU) {
CandPolicy NoPolicy;
SchedCandidate TopCand(NoPolicy);
pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
assert(TopCand.Reason != NoCand && "failed to find a candidate");
tracePick(TopCand, true);
SU = TopCand.SU;
}
IsTopNode = true;
}
else if (RegionPolicy.OnlyBottomUp) {
SU = Bot.pickOnlyChoice();
if (!SU) {
CandPolicy NoPolicy;
SchedCandidate BotCand(NoPolicy);
pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
assert(BotCand.Reason != NoCand && "failed to find a candidate");
tracePick(BotCand, false);
SU = BotCand.SU;
}
IsTopNode = false;
}
else {
SU = pickNodeBidirectional(IsTopNode);
}
} while (SU->isScheduled);
if (SU->isTopReady())
Top.removeReady(SU);
if (SU->isBottomReady())
Bot.removeReady(SU);
DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
return SU;
}
void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
MachineBasicBlock::iterator InsertPos = SU->getInstr();
if (!isTop)
++InsertPos;
SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
// Find already scheduled copies with a single physreg dependence and move
// them just above the scheduled instruction.
for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
I != E; ++I) {
if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
continue;
SUnit *DepSU = I->getSUnit();
if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
continue;
MachineInstr *Copy = DepSU->getInstr();
if (!Copy->isCopy())
continue;
DEBUG(dbgs() << " Rescheduling physreg copy ";
I->getSUnit()->dump(DAG));
DAG->moveInstruction(Copy, InsertPos);
}
}
/// Update the scheduler's state after scheduling a node. This is the same node
/// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
/// update it's state based on the current cycle before MachineSchedStrategy
/// does.
///
/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
/// them here. See comments in biasPhysRegCopy.
void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
Top.bumpNode(SU);
if (SU->hasPhysRegUses)
reschedulePhysRegCopies(SU, true);
}
else {
SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
Bot.bumpNode(SU);
if (SU->hasPhysRegDefs)
reschedulePhysRegCopies(SU, false);
}
}
/// Create the standard converging machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
// Register DAG post-processors.
//
// FIXME: extend the mutation API to allow earlier mutations to instantiate
// data and pass it to later mutations. Have a single mutation that gathers
// the interesting nodes in one pass.
DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
if (EnableLoadCluster && DAG->TII->enableClusterLoads())
DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
if (EnableMacroFusion)
DAG->addMutation(make_unique<MacroFusion>(DAG->TII));
return DAG;
}
static MachineSchedRegistry
GenericSchedRegistry("converge", "Standard converging scheduler.",
createGenericSchedLive);
//===----------------------------------------------------------------------===//
// PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
//===----------------------------------------------------------------------===//
void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
DAG = Dag;
SchedModel = DAG->getSchedModel();
TRI = DAG->TRI;
Rem.init(DAG, SchedModel);
Top.init(DAG, SchedModel, &Rem);
BotRoots.clear();
// Initialize the HazardRecognizers. If itineraries don't exist, are empty,
// or are disabled, then these HazardRecs will be disabled.
const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
if (!Top.HazardRec) {
Top.HazardRec =
DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Itin, DAG);
}
}
void PostGenericScheduler::registerRoots() {
Rem.CriticalPath = DAG->ExitSU.getDepth();
// Some roots may not feed into ExitSU. Check all of them in case.
for (SmallVectorImpl<SUnit*>::const_iterator
I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
if ((*I)->getDepth() > Rem.CriticalPath)
Rem.CriticalPath = (*I)->getDepth();
}
DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
if (DumpCriticalPathLength) {
errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
}
}
/// Apply a set of heursitics to a new candidate for PostRA scheduling.
///
/// \param Cand provides the policy and current best candidate.
/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
SchedCandidate &TryCand) {
// Initialize the candidate if needed.
if (!Cand.isValid()) {
TryCand.Reason = NodeOrder;
return;
}
// Prioritize instructions that read unbuffered resources by stall cycles.
if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
return;
// Avoid critical resource consumption and balance the schedule.
if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
TryCand, Cand, ResourceReduce))
return;
if (tryGreater(TryCand.ResDelta.DemandedResources,
Cand.ResDelta.DemandedResources,
TryCand, Cand, ResourceDemand))
return;
// Avoid serializing long latency dependence chains.
if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
return;
}
// Fall through to original instruction order.
if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
TryCand.Reason = NodeOrder;
}
void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
ReadyQueue &Q = Top.Available;
DEBUG(Q.dump());
for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
SchedCandidate TryCand(Cand.Policy);
TryCand.SU = *I;
TryCand.initResourceDelta(DAG, SchedModel);
tryCandidate(Cand, TryCand);
if (TryCand.Reason != NoCand) {
Cand.setBest(TryCand);
DEBUG(traceCandidate(Cand));
}
}
}
/// Pick the next node to schedule.
SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
if (DAG->top() == DAG->bottom()) {
assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
return nullptr;
}
SUnit *SU;
do {
SU = Top.pickOnlyChoice();
if (!SU) {
CandPolicy NoPolicy;
SchedCandidate TopCand(NoPolicy);
// Set the top-down policy based on the state of the current top zone and
// the instructions outside the zone, including the bottom zone.
setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
pickNodeFromQueue(TopCand);
assert(TopCand.Reason != NoCand && "failed to find a candidate");
tracePick(TopCand, true);
SU = TopCand.SU;
}
} while (SU->isScheduled);
IsTopNode = true;
Top.removeReady(SU);
DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
return SU;
}
/// Called after ScheduleDAGMI has scheduled an instruction and updated
/// scheduled/remaining flags in the DAG nodes.
void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
Top.bumpNode(SU);
}
/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
}
//===----------------------------------------------------------------------===//
// ILP Scheduler. Currently for experimental analysis of heuristics.
//===----------------------------------------------------------------------===//
namespace {
/// \brief Order nodes by the ILP metric.
struct ILPOrder {
const SchedDFSResult *DFSResult;
const BitVector *ScheduledTrees;
bool MaximizeILP;
ILPOrder(bool MaxILP)
: DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
/// \brief Apply a less-than relation on node priority.
///
/// (Return true if A comes after B in the Q.)
bool operator()(const SUnit *A, const SUnit *B) const {
unsigned SchedTreeA = DFSResult->getSubtreeID(A);
unsigned SchedTreeB = DFSResult->getSubtreeID(B);
if (SchedTreeA != SchedTreeB) {
// Unscheduled trees have lower priority.
if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
return ScheduledTrees->test(SchedTreeB);
// Trees with shallower connections have have lower priority.
if (DFSResult->getSubtreeLevel(SchedTreeA)
!= DFSResult->getSubtreeLevel(SchedTreeB)) {
return DFSResult->getSubtreeLevel(SchedTreeA)
< DFSResult->getSubtreeLevel(SchedTreeB);
}
}
if (MaximizeILP)
return DFSResult->getILP(A) < DFSResult->getILP(B);
else
return DFSResult->getILP(A) > DFSResult->getILP(B);
}
};
/// \brief Schedule based on the ILP metric.
class ILPScheduler : public MachineSchedStrategy {
ScheduleDAGMILive *DAG;
ILPOrder Cmp;
std::vector<SUnit*> ReadyQ;
public:
ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
void initialize(ScheduleDAGMI *dag) override {
assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
DAG = static_cast<ScheduleDAGMILive*>(dag);
DAG->computeDFSResult();
Cmp.DFSResult = DAG->getDFSResult();
Cmp.ScheduledTrees = &DAG->getScheduledTrees();
ReadyQ.clear();
}
void registerRoots() override {
// Restore the heap in ReadyQ with the updated DFS results.
std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
}
/// Implement MachineSchedStrategy interface.
/// -----------------------------------------
/// Callback to select the highest priority node from the ready Q.
SUnit *pickNode(bool &IsTopNode) override {
if (ReadyQ.empty()) return nullptr;
std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
SUnit *SU = ReadyQ.back();
ReadyQ.pop_back();
IsTopNode = false;
DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
<< " ILP: " << DAG->getDFSResult()->getILP(SU)
<< " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
<< DAG->getDFSResult()->getSubtreeLevel(
DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
<< "Scheduling " << *SU->getInstr());
return SU;
}
/// \brief Scheduler callback to notify that a new subtree is scheduled.
void scheduleTree(unsigned SubtreeID) override {
std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
}
/// Callback after a node is scheduled. Mark a newly scheduled tree, notify
/// DFSResults, and resort the priority Q.
void schedNode(SUnit *SU, bool IsTopNode) override {
assert(!IsTopNode && "SchedDFSResult needs bottom-up");
}
void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
void releaseBottomNode(SUnit *SU) override {
ReadyQ.push_back(SU);
std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
}
};
} // namespace
static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
}
static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
}
static MachineSchedRegistry ILPMaxRegistry(
"ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
static MachineSchedRegistry ILPMinRegistry(
"ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
//===----------------------------------------------------------------------===//
// Machine Instruction Shuffler for Correctness Testing
//===----------------------------------------------------------------------===//
#ifndef NDEBUG
namespace {
/// Apply a less-than relation on the node order, which corresponds to the
/// instruction order prior to scheduling. IsReverse implements greater-than.
template<bool IsReverse>
struct SUnitOrder {
bool operator()(SUnit *A, SUnit *B) const {
if (IsReverse)
return A->NodeNum > B->NodeNum;
else
return A->NodeNum < B->NodeNum;
}
};
/// Reorder instructions as much as possible.
class InstructionShuffler : public MachineSchedStrategy {
bool IsAlternating;
bool IsTopDown;
// Using a less-than relation (SUnitOrder<false>) for the TopQ priority
// gives nodes with a higher number higher priority causing the latest
// instructions to be scheduled first.
PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
TopQ;
// When scheduling bottom-up, use greater-than as the queue priority.
PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
BottomQ;
public:
InstructionShuffler(bool alternate, bool topdown)
: IsAlternating(alternate), IsTopDown(topdown) {}
void initialize(ScheduleDAGMI*) override {
TopQ.clear();
BottomQ.clear();
}
/// Implement MachineSchedStrategy interface.
/// -----------------------------------------
SUnit *pickNode(bool &IsTopNode) override {
SUnit *SU;
if (IsTopDown) {
do {
if (TopQ.empty()) return nullptr;
SU = TopQ.top();
TopQ.pop();
} while (SU->isScheduled);
IsTopNode = true;
}
else {
do {
if (BottomQ.empty()) return nullptr;
SU = BottomQ.top();
BottomQ.pop();
} while (SU->isScheduled);
IsTopNode = false;
}
if (IsAlternating)
IsTopDown = !IsTopDown;
return SU;
}
void schedNode(SUnit *SU, bool IsTopNode) override {}
void releaseTopNode(SUnit *SU) override {
TopQ.push(SU);
}
void releaseBottomNode(SUnit *SU) override {
BottomQ.push(SU);
}
};
} // namespace
static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
bool Alternate = !ForceTopDown && !ForceBottomUp;
bool TopDown = !ForceBottomUp;
assert((TopDown || !ForceTopDown) &&
"-misched-topdown incompatible with -misched-bottomup");
return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
}
static MachineSchedRegistry ShufflerRegistry(
"shuffle", "Shuffle machine instructions alternating directions",
createInstructionShuffler);
#endif // !NDEBUG
//===----------------------------------------------------------------------===//
// GraphWriter support for ScheduleDAGMILive.
//===----------------------------------------------------------------------===//
#ifndef NDEBUG
namespace llvm {
template<> struct GraphTraits<
ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
template<>
struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const ScheduleDAG *G) {
return G->MF.getName();
}
static bool renderGraphFromBottomUp() {
return true;
}
static bool isNodeHidden(const SUnit *Node) {
return (Node->Preds.size() > 10 || Node->Succs.size() > 10);
}
static bool hasNodeAddressLabel(const SUnit *Node,
const ScheduleDAG *Graph) {
return false;
}
/// If you want to override the dot attributes printed for a particular
/// edge, override this method.
static std::string getEdgeAttributes(const SUnit *Node,
SUnitIterator EI,
const ScheduleDAG *Graph) {
if (EI.isArtificialDep())
return "color=cyan,style=dashed";
if (EI.isCtrlDep())
return "color=blue,style=dashed";
return "";
}
static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
std::string Str;
raw_string_ostream SS(Str);
const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
SS << "SU:" << SU->NodeNum;
if (DFS)
SS << " I:" << DFS->getNumInstrs(SU);
return SS.str();
}
static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
return G->getGraphNodeLabel(SU);
}
static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
std::string Str("shape=Mrecord");
const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
if (DFS) {
Str += ",style=filled,fillcolor=\"#";
Str += DOT::getColorString(DFS->getSubtreeID(N));
Str += '"';
}
return Str;
}
};
} // namespace llvm
#endif // NDEBUG
/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
/// rendered using 'dot'.
///
void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
#ifndef NDEBUG
ViewGraph(this, Name, false, Title);
#else
errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
/// Out-of-line implementation with no arguments is handy for gdb.
void ScheduleDAGMI::viewGraph() {
viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/GCMetadataPrinter.cpp | //===-- GCMetadataPrinter.cpp - Garbage collection infrastructure ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the abstract base class GCMetadataPrinter.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCMetadataPrinter.h"
using namespace llvm;
GCMetadataPrinter::GCMetadataPrinter() {}
GCMetadataPrinter::~GCMetadataPrinter() {}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegisterScavenging.cpp | //===-- RegisterScavenging.cpp - Machine register scavenging --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the machine register scavenger. It can provide
// information, such as unused registers, at any point in a machine basic block.
// It also provides a mechanism to make registers available by evicting them to
// spill slots.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "reg-scavenging"
/// setUsed - Set the register units of this register as used.
void RegScavenger::setRegUsed(unsigned Reg) {
for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
RegUnitsAvailable.reset(*RUI);
}
void RegScavenger::initRegState() {
for (SmallVectorImpl<ScavengedInfo>::iterator I = Scavenged.begin(),
IE = Scavenged.end(); I != IE; ++I) {
I->Reg = 0;
I->Restore = nullptr;
}
// All register units start out unused.
RegUnitsAvailable.set();
if (!MBB)
return;
// Live-in registers are in use.
for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I)
setRegUsed(*I);
// Pristine CSRs are also unavailable.
const MachineFunction &MF = *MBB->getParent();
BitVector PR = MF.getFrameInfo()->getPristineRegs(MF);
for (int I = PR.find_first(); I>0; I = PR.find_next(I))
setRegUsed(I);
}
void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) {
MachineFunction &MF = *mbb->getParent();
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
assert((NumRegUnits == 0 || NumRegUnits == TRI->getNumRegUnits()) &&
"Target changed?");
// It is not possible to use the register scavenger after late optimization
// passes that don't preserve accurate liveness information.
assert(MRI->tracksLiveness() &&
"Cannot use register scavenger with inaccurate liveness");
// Self-initialize.
if (!MBB) {
NumRegUnits = TRI->getNumRegUnits();
RegUnitsAvailable.resize(NumRegUnits);
KillRegUnits.resize(NumRegUnits);
DefRegUnits.resize(NumRegUnits);
TmpRegUnits.resize(NumRegUnits);
}
MBB = mbb;
initRegState();
Tracking = false;
}
void RegScavenger::addRegUnits(BitVector &BV, unsigned Reg) {
for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
BV.set(*RUI);
}
void RegScavenger::determineKillsAndDefs() {
assert(Tracking && "Must be tracking to determine kills and defs");
MachineInstr *MI = MBBI;
assert(!MI->isDebugValue() && "Debug values have no kills or defs");
// Find out which registers are early clobbered, killed, defined, and marked
// def-dead in this instruction.
KillRegUnits.reset();
DefRegUnits.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask()) {
TmpRegUnits.clear();
for (unsigned RU = 0, RUEnd = TRI->getNumRegUnits(); RU != RUEnd; ++RU) {
for (MCRegUnitRootIterator RURI(RU, TRI); RURI.isValid(); ++RURI) {
if (MO.clobbersPhysReg(*RURI)) {
TmpRegUnits.set(RU);
break;
}
}
}
// Apply the mask.
KillRegUnits |= TmpRegUnits;
}
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) || isReserved(Reg))
continue;
if (MO.isUse()) {
// Ignore undef uses.
if (MO.isUndef())
continue;
if (MO.isKill())
addRegUnits(KillRegUnits, Reg);
} else {
assert(MO.isDef());
if (MO.isDead())
addRegUnits(KillRegUnits, Reg);
else
addRegUnits(DefRegUnits, Reg);
}
}
}
void RegScavenger::unprocess() {
assert(Tracking && "Cannot unprocess because we're not tracking");
MachineInstr *MI = MBBI;
if (!MI->isDebugValue()) {
determineKillsAndDefs();
// Commit the changes.
setUsed(KillRegUnits);
setUnused(DefRegUnits);
}
if (MBBI == MBB->begin()) {
MBBI = MachineBasicBlock::iterator(nullptr);
Tracking = false;
} else
--MBBI;
}
void RegScavenger::forward() {
// Move ptr forward.
if (!Tracking) {
MBBI = MBB->begin();
Tracking = true;
} else {
assert(MBBI != MBB->end() && "Already past the end of the basic block!");
MBBI = std::next(MBBI);
}
assert(MBBI != MBB->end() && "Already at the end of the basic block!");
MachineInstr *MI = MBBI;
for (SmallVectorImpl<ScavengedInfo>::iterator I = Scavenged.begin(),
IE = Scavenged.end(); I != IE; ++I) {
if (I->Restore != MI)
continue;
I->Reg = 0;
I->Restore = nullptr;
}
if (MI->isDebugValue())
return;
determineKillsAndDefs();
// Verify uses and defs.
#ifndef NDEBUG
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) || isReserved(Reg))
continue;
if (MO.isUse()) {
if (MO.isUndef())
continue;
if (!isRegUsed(Reg)) {
// Check if it's partial live: e.g.
// D0 = insert_subreg D0<undef>, S0
// ... D0
// The problem is the insert_subreg could be eliminated. The use of
// D0 is using a partially undef value. This is not *incorrect* since
// S1 is can be freely clobbered.
// Ideally we would like a way to model this, but leaving the
// insert_subreg around causes both correctness and performance issues.
bool SubUsed = false;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
if (isRegUsed(*SubRegs)) {
SubUsed = true;
break;
}
bool SuperUsed = false;
for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR) {
if (isRegUsed(*SR)) {
SuperUsed = true;
break;
}
}
if (!SubUsed && !SuperUsed) {
MBB->getParent()->verify(nullptr, "In Register Scavenger");
llvm_unreachable("Using an undefined register!");
}
(void)SubUsed;
(void)SuperUsed;
}
} else {
assert(MO.isDef());
#if 0
// FIXME: Enable this once we've figured out how to correctly transfer
// implicit kills during codegen passes like the coalescer.
assert((KillRegs.test(Reg) || isUnused(Reg) ||
isLiveInButUnusedBefore(Reg, MI, MBB, TRI, MRI)) &&
"Re-defining a live register!");
#endif
}
}
#endif // NDEBUG
// Commit the changes.
setUnused(KillRegUnits);
setUsed(DefRegUnits);
}
bool RegScavenger::isRegUsed(unsigned Reg, bool includeReserved) const {
if (includeReserved && isReserved(Reg))
return true;
for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
if (!RegUnitsAvailable.test(*RUI))
return true;
return false;
}
unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I)
if (!isRegUsed(*I)) {
DEBUG(dbgs() << "Scavenger found unused reg: " << TRI->getName(*I) <<
"\n");
return *I;
}
return 0;
}
/// getRegsAvailable - Return all available registers in the register class
/// in Mask.
BitVector RegScavenger::getRegsAvailable(const TargetRegisterClass *RC) {
BitVector Mask(TRI->getNumRegs());
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I)
if (!isRegUsed(*I))
Mask.set(*I);
return Mask;
}
/// findSurvivorReg - Return the candidate register that is unused for the
/// longest after StartMII. UseMI is set to the instruction where the search
/// stopped.
///
/// No more than InstrLimit instructions are inspected.
///
unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
BitVector &Candidates,
unsigned InstrLimit,
MachineBasicBlock::iterator &UseMI) {
int Survivor = Candidates.find_first();
assert(Survivor > 0 && "No candidates for scavenging");
MachineBasicBlock::iterator ME = MBB->getFirstTerminator();
assert(StartMI != ME && "MI already at terminator");
MachineBasicBlock::iterator RestorePointMI = StartMI;
MachineBasicBlock::iterator MI = StartMI;
bool inVirtLiveRange = false;
for (++MI; InstrLimit > 0 && MI != ME; ++MI, --InstrLimit) {
if (MI->isDebugValue()) {
++InstrLimit; // Don't count debug instructions
continue;
}
bool isVirtKillInsn = false;
bool isVirtDefInsn = false;
// Remove any candidates touched by instruction.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask())
Candidates.clearBitsNotInMask(MO.getRegMask());
if (!MO.isReg() || MO.isUndef() || !MO.getReg())
continue;
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isDef())
isVirtDefInsn = true;
else if (MO.isKill())
isVirtKillInsn = true;
continue;
}
for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
Candidates.reset(*AI);
}
// If we're not in a virtual reg's live range, this is a valid
// restore point.
if (!inVirtLiveRange) RestorePointMI = MI;
// Update whether we're in the live range of a virtual register
if (isVirtKillInsn) inVirtLiveRange = false;
if (isVirtDefInsn) inVirtLiveRange = true;
// Was our survivor untouched by this instruction?
if (Candidates.test(Survivor))
continue;
// All candidates gone?
if (Candidates.none())
break;
Survivor = Candidates.find_first();
}
// If we ran off the end, that's where we want to restore.
if (MI == ME) RestorePointMI = ME;
assert (RestorePointMI != StartMI &&
"No available scavenger restore location!");
// We ran out of candidates, so stop the search.
UseMI = RestorePointMI;
return Survivor;
}
static unsigned getFrameIndexOperandNum(MachineInstr *MI) {
unsigned i = 0;
while (!MI->getOperand(i).isFI()) {
++i;
assert(i < MI->getNumOperands() &&
"Instr doesn't have FrameIndex operand!");
}
return i;
}
unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
MachineBasicBlock::iterator I,
int SPAdj) {
// Consider all allocatable registers in the register class initially
BitVector Candidates =
TRI->getAllocatableSet(*I->getParent()->getParent(), RC);
// Exclude all the registers being used by the instruction.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
MachineOperand &MO = I->getOperand(i);
if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) &&
!TargetRegisterInfo::isVirtualRegister(MO.getReg()))
Candidates.reset(MO.getReg());
}
// Try to find a register that's unused if there is one, as then we won't
// have to spill.
BitVector Available = getRegsAvailable(RC);
Available &= Candidates;
if (Available.any())
Candidates = Available;
// Find the register whose use is furthest away.
MachineBasicBlock::iterator UseMI;
unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
// If we found an unused register there is no reason to spill it.
if (!isRegUsed(SReg)) {
DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n");
return SReg;
}
// Find an available scavenging slot.
unsigned SI;
for (SI = 0; SI < Scavenged.size(); ++SI)
if (Scavenged[SI].Reg == 0)
break;
if (SI == Scavenged.size()) {
// We need to scavenge a register but have no spill slot, the target
// must know how to do it (if not, we'll assert below).
Scavenged.push_back(ScavengedInfo());
}
// Avoid infinite regress
Scavenged[SI].Reg = SReg;
// If the target knows how to save/restore the register, let it do so;
// otherwise, use the emergency stack spill slot.
if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) {
// Spill the scavenged register before I.
assert(Scavenged[SI].FrameIndex >= 0 &&
"Cannot scavenge register without an emergency spill slot!");
TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex,
RC, TRI);
MachineBasicBlock::iterator II = std::prev(I);
unsigned FIOperandNum = getFrameIndexOperandNum(II);
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
// Restore the scavenged register before its use (or first terminator).
TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex,
RC, TRI);
II = std::prev(UseMI);
FIOperandNum = getFrameIndexOperandNum(II);
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
}
Scavenged[SI].Restore = std::prev(UseMI);
// Doing this here leads to infinite regress.
// Scavenged[SI].Reg = SReg;
DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) <<
"\n");
return SReg;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TargetLoweringBase.cpp | //===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the TargetLoweringBase class.
//
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cctype>
using namespace llvm;
static cl::opt<bool> JumpIsExpensiveOverride(
"jump-is-expensive", cl::init(false),
cl::desc("Do not create extra branches to split comparison logic."),
cl::Hidden);
/// InitLibcallNames - Set default libcall names.
///
static void InitLibcallNames(const char **Names, const Triple &TT) {
Names[RTLIB::SHL_I16] = "__ashlhi3";
Names[RTLIB::SHL_I32] = "__ashlsi3";
Names[RTLIB::SHL_I64] = "__ashldi3";
Names[RTLIB::SHL_I128] = "__ashlti3";
Names[RTLIB::SRL_I16] = "__lshrhi3";
Names[RTLIB::SRL_I32] = "__lshrsi3";
Names[RTLIB::SRL_I64] = "__lshrdi3";
Names[RTLIB::SRL_I128] = "__lshrti3";
Names[RTLIB::SRA_I16] = "__ashrhi3";
Names[RTLIB::SRA_I32] = "__ashrsi3";
Names[RTLIB::SRA_I64] = "__ashrdi3";
Names[RTLIB::SRA_I128] = "__ashrti3";
Names[RTLIB::MUL_I8] = "__mulqi3";
Names[RTLIB::MUL_I16] = "__mulhi3";
Names[RTLIB::MUL_I32] = "__mulsi3";
Names[RTLIB::MUL_I64] = "__muldi3";
Names[RTLIB::MUL_I128] = "__multi3";
Names[RTLIB::MULO_I32] = "__mulosi4";
Names[RTLIB::MULO_I64] = "__mulodi4";
Names[RTLIB::MULO_I128] = "__muloti4";
Names[RTLIB::SDIV_I8] = "__divqi3";
Names[RTLIB::SDIV_I16] = "__divhi3";
Names[RTLIB::SDIV_I32] = "__divsi3";
Names[RTLIB::SDIV_I64] = "__divdi3";
Names[RTLIB::SDIV_I128] = "__divti3";
Names[RTLIB::UDIV_I8] = "__udivqi3";
Names[RTLIB::UDIV_I16] = "__udivhi3";
Names[RTLIB::UDIV_I32] = "__udivsi3";
Names[RTLIB::UDIV_I64] = "__udivdi3";
Names[RTLIB::UDIV_I128] = "__udivti3";
Names[RTLIB::SREM_I8] = "__modqi3";
Names[RTLIB::SREM_I16] = "__modhi3";
Names[RTLIB::SREM_I32] = "__modsi3";
Names[RTLIB::SREM_I64] = "__moddi3";
Names[RTLIB::SREM_I128] = "__modti3";
Names[RTLIB::UREM_I8] = "__umodqi3";
Names[RTLIB::UREM_I16] = "__umodhi3";
Names[RTLIB::UREM_I32] = "__umodsi3";
Names[RTLIB::UREM_I64] = "__umoddi3";
Names[RTLIB::UREM_I128] = "__umodti3";
// These are generally not available.
Names[RTLIB::SDIVREM_I8] = nullptr;
Names[RTLIB::SDIVREM_I16] = nullptr;
Names[RTLIB::SDIVREM_I32] = nullptr;
Names[RTLIB::SDIVREM_I64] = nullptr;
Names[RTLIB::SDIVREM_I128] = nullptr;
Names[RTLIB::UDIVREM_I8] = nullptr;
Names[RTLIB::UDIVREM_I16] = nullptr;
Names[RTLIB::UDIVREM_I32] = nullptr;
Names[RTLIB::UDIVREM_I64] = nullptr;
Names[RTLIB::UDIVREM_I128] = nullptr;
Names[RTLIB::NEG_I32] = "__negsi2";
Names[RTLIB::NEG_I64] = "__negdi2";
Names[RTLIB::ADD_F32] = "__addsf3";
Names[RTLIB::ADD_F64] = "__adddf3";
Names[RTLIB::ADD_F80] = "__addxf3";
Names[RTLIB::ADD_F128] = "__addtf3";
Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
Names[RTLIB::SUB_F32] = "__subsf3";
Names[RTLIB::SUB_F64] = "__subdf3";
Names[RTLIB::SUB_F80] = "__subxf3";
Names[RTLIB::SUB_F128] = "__subtf3";
Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
Names[RTLIB::MUL_F32] = "__mulsf3";
Names[RTLIB::MUL_F64] = "__muldf3";
Names[RTLIB::MUL_F80] = "__mulxf3";
Names[RTLIB::MUL_F128] = "__multf3";
Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
Names[RTLIB::DIV_F32] = "__divsf3";
Names[RTLIB::DIV_F64] = "__divdf3";
Names[RTLIB::DIV_F80] = "__divxf3";
Names[RTLIB::DIV_F128] = "__divtf3";
Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
Names[RTLIB::REM_F32] = "fmodf";
Names[RTLIB::REM_F64] = "fmod";
Names[RTLIB::REM_F80] = "fmodl";
Names[RTLIB::REM_F128] = "fmodl";
Names[RTLIB::REM_PPCF128] = "fmodl";
Names[RTLIB::FMA_F32] = "fmaf";
Names[RTLIB::FMA_F64] = "fma";
Names[RTLIB::FMA_F80] = "fmal";
Names[RTLIB::FMA_F128] = "fmal";
Names[RTLIB::FMA_PPCF128] = "fmal";
Names[RTLIB::POWI_F32] = "__powisf2";
Names[RTLIB::POWI_F64] = "__powidf2";
Names[RTLIB::POWI_F80] = "__powixf2";
Names[RTLIB::POWI_F128] = "__powitf2";
Names[RTLIB::POWI_PPCF128] = "__powitf2";
Names[RTLIB::SQRT_F32] = "sqrtf";
Names[RTLIB::SQRT_F64] = "sqrt";
Names[RTLIB::SQRT_F80] = "sqrtl";
Names[RTLIB::SQRT_F128] = "sqrtl";
Names[RTLIB::SQRT_PPCF128] = "sqrtl";
Names[RTLIB::LOG_F32] = "logf";
Names[RTLIB::LOG_F64] = "log";
Names[RTLIB::LOG_F80] = "logl";
Names[RTLIB::LOG_F128] = "logl";
Names[RTLIB::LOG_PPCF128] = "logl";
Names[RTLIB::LOG2_F32] = "log2f";
Names[RTLIB::LOG2_F64] = "log2";
Names[RTLIB::LOG2_F80] = "log2l";
Names[RTLIB::LOG2_F128] = "log2l";
Names[RTLIB::LOG2_PPCF128] = "log2l";
Names[RTLIB::LOG10_F32] = "log10f";
Names[RTLIB::LOG10_F64] = "log10";
Names[RTLIB::LOG10_F80] = "log10l";
Names[RTLIB::LOG10_F128] = "log10l";
Names[RTLIB::LOG10_PPCF128] = "log10l";
Names[RTLIB::EXP_F32] = "expf";
Names[RTLIB::EXP_F64] = "exp";
Names[RTLIB::EXP_F80] = "expl";
Names[RTLIB::EXP_F128] = "expl";
Names[RTLIB::EXP_PPCF128] = "expl";
Names[RTLIB::EXP2_F32] = "exp2f";
Names[RTLIB::EXP2_F64] = "exp2";
Names[RTLIB::EXP2_F80] = "exp2l";
Names[RTLIB::EXP2_F128] = "exp2l";
Names[RTLIB::EXP2_PPCF128] = "exp2l";
Names[RTLIB::SIN_F32] = "sinf";
Names[RTLIB::SIN_F64] = "sin";
Names[RTLIB::SIN_F80] = "sinl";
Names[RTLIB::SIN_F128] = "sinl";
Names[RTLIB::SIN_PPCF128] = "sinl";
Names[RTLIB::COS_F32] = "cosf";
Names[RTLIB::COS_F64] = "cos";
Names[RTLIB::COS_F80] = "cosl";
Names[RTLIB::COS_F128] = "cosl";
Names[RTLIB::COS_PPCF128] = "cosl";
Names[RTLIB::POW_F32] = "powf";
Names[RTLIB::POW_F64] = "pow";
Names[RTLIB::POW_F80] = "powl";
Names[RTLIB::POW_F128] = "powl";
Names[RTLIB::POW_PPCF128] = "powl";
Names[RTLIB::CEIL_F32] = "ceilf";
Names[RTLIB::CEIL_F64] = "ceil";
Names[RTLIB::CEIL_F80] = "ceill";
Names[RTLIB::CEIL_F128] = "ceill";
Names[RTLIB::CEIL_PPCF128] = "ceill";
Names[RTLIB::TRUNC_F32] = "truncf";
Names[RTLIB::TRUNC_F64] = "trunc";
Names[RTLIB::TRUNC_F80] = "truncl";
Names[RTLIB::TRUNC_F128] = "truncl";
Names[RTLIB::TRUNC_PPCF128] = "truncl";
Names[RTLIB::RINT_F32] = "rintf";
Names[RTLIB::RINT_F64] = "rint";
Names[RTLIB::RINT_F80] = "rintl";
Names[RTLIB::RINT_F128] = "rintl";
Names[RTLIB::RINT_PPCF128] = "rintl";
Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
Names[RTLIB::NEARBYINT_F64] = "nearbyint";
Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
Names[RTLIB::ROUND_F32] = "roundf";
Names[RTLIB::ROUND_F64] = "round";
Names[RTLIB::ROUND_F80] = "roundl";
Names[RTLIB::ROUND_F128] = "roundl";
Names[RTLIB::ROUND_PPCF128] = "roundl";
Names[RTLIB::FLOOR_F32] = "floorf";
Names[RTLIB::FLOOR_F64] = "floor";
Names[RTLIB::FLOOR_F80] = "floorl";
Names[RTLIB::FLOOR_F128] = "floorl";
Names[RTLIB::FLOOR_PPCF128] = "floorl";
Names[RTLIB::FMIN_F32] = "fminf";
Names[RTLIB::FMIN_F64] = "fmin";
Names[RTLIB::FMIN_F80] = "fminl";
Names[RTLIB::FMIN_F128] = "fminl";
Names[RTLIB::FMIN_PPCF128] = "fminl";
Names[RTLIB::FMAX_F32] = "fmaxf";
Names[RTLIB::FMAX_F64] = "fmax";
Names[RTLIB::FMAX_F80] = "fmaxl";
Names[RTLIB::FMAX_F128] = "fmaxl";
Names[RTLIB::FMAX_PPCF128] = "fmaxl";
Names[RTLIB::ROUND_F32] = "roundf";
Names[RTLIB::ROUND_F64] = "round";
Names[RTLIB::ROUND_F80] = "roundl";
Names[RTLIB::ROUND_F128] = "roundl";
Names[RTLIB::ROUND_PPCF128] = "roundl";
Names[RTLIB::COPYSIGN_F32] = "copysignf";
Names[RTLIB::COPYSIGN_F64] = "copysign";
Names[RTLIB::COPYSIGN_F80] = "copysignl";
Names[RTLIB::COPYSIGN_F128] = "copysignl";
Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi";
Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi";
Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi";
Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi";
Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi";
Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi";
Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi";
Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi";
Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
Names[RTLIB::OEQ_F32] = "__eqsf2";
Names[RTLIB::OEQ_F64] = "__eqdf2";
Names[RTLIB::OEQ_F128] = "__eqtf2";
Names[RTLIB::UNE_F32] = "__nesf2";
Names[RTLIB::UNE_F64] = "__nedf2";
Names[RTLIB::UNE_F128] = "__netf2";
Names[RTLIB::OGE_F32] = "__gesf2";
Names[RTLIB::OGE_F64] = "__gedf2";
Names[RTLIB::OGE_F128] = "__getf2";
Names[RTLIB::OLT_F32] = "__ltsf2";
Names[RTLIB::OLT_F64] = "__ltdf2";
Names[RTLIB::OLT_F128] = "__lttf2";
Names[RTLIB::OLE_F32] = "__lesf2";
Names[RTLIB::OLE_F64] = "__ledf2";
Names[RTLIB::OLE_F128] = "__letf2";
Names[RTLIB::OGT_F32] = "__gtsf2";
Names[RTLIB::OGT_F64] = "__gtdf2";
Names[RTLIB::OGT_F128] = "__gttf2";
Names[RTLIB::UO_F32] = "__unordsf2";
Names[RTLIB::UO_F64] = "__unorddf2";
Names[RTLIB::UO_F128] = "__unordtf2";
Names[RTLIB::O_F32] = "__unordsf2";
Names[RTLIB::O_F64] = "__unorddf2";
Names[RTLIB::O_F128] = "__unordtf2";
Names[RTLIB::MEMCPY] = "memcpy";
Names[RTLIB::MEMMOVE] = "memmove";
Names[RTLIB::MEMSET] = "memset";
Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = "__sync_val_compare_and_swap_16";
Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
Names[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = "__sync_lock_test_and_set_16";
Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
Names[RTLIB::SYNC_FETCH_AND_ADD_16] = "__sync_fetch_and_add_16";
Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
Names[RTLIB::SYNC_FETCH_AND_SUB_16] = "__sync_fetch_and_sub_16";
Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
Names[RTLIB::SYNC_FETCH_AND_AND_16] = "__sync_fetch_and_and_16";
Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
Names[RTLIB::SYNC_FETCH_AND_OR_16] = "__sync_fetch_and_or_16";
Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
Names[RTLIB::SYNC_FETCH_AND_XOR_16] = "__sync_fetch_and_xor_16";
Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
Names[RTLIB::SYNC_FETCH_AND_NAND_16] = "__sync_fetch_and_nand_16";
Names[RTLIB::SYNC_FETCH_AND_MAX_1] = "__sync_fetch_and_max_1";
Names[RTLIB::SYNC_FETCH_AND_MAX_2] = "__sync_fetch_and_max_2";
Names[RTLIB::SYNC_FETCH_AND_MAX_4] = "__sync_fetch_and_max_4";
Names[RTLIB::SYNC_FETCH_AND_MAX_8] = "__sync_fetch_and_max_8";
Names[RTLIB::SYNC_FETCH_AND_MAX_16] = "__sync_fetch_and_max_16";
Names[RTLIB::SYNC_FETCH_AND_UMAX_1] = "__sync_fetch_and_umax_1";
Names[RTLIB::SYNC_FETCH_AND_UMAX_2] = "__sync_fetch_and_umax_2";
Names[RTLIB::SYNC_FETCH_AND_UMAX_4] = "__sync_fetch_and_umax_4";
Names[RTLIB::SYNC_FETCH_AND_UMAX_8] = "__sync_fetch_and_umax_8";
Names[RTLIB::SYNC_FETCH_AND_UMAX_16] = "__sync_fetch_and_umax_16";
Names[RTLIB::SYNC_FETCH_AND_MIN_1] = "__sync_fetch_and_min_1";
Names[RTLIB::SYNC_FETCH_AND_MIN_2] = "__sync_fetch_and_min_2";
Names[RTLIB::SYNC_FETCH_AND_MIN_4] = "__sync_fetch_and_min_4";
Names[RTLIB::SYNC_FETCH_AND_MIN_8] = "__sync_fetch_and_min_8";
Names[RTLIB::SYNC_FETCH_AND_MIN_16] = "__sync_fetch_and_min_16";
Names[RTLIB::SYNC_FETCH_AND_UMIN_1] = "__sync_fetch_and_umin_1";
Names[RTLIB::SYNC_FETCH_AND_UMIN_2] = "__sync_fetch_and_umin_2";
Names[RTLIB::SYNC_FETCH_AND_UMIN_4] = "__sync_fetch_and_umin_4";
Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
if (TT.getEnvironment() == Triple::GNU) {
Names[RTLIB::SINCOS_F32] = "sincosf";
Names[RTLIB::SINCOS_F64] = "sincos";
Names[RTLIB::SINCOS_F80] = "sincosl";
Names[RTLIB::SINCOS_F128] = "sincosl";
Names[RTLIB::SINCOS_PPCF128] = "sincosl";
} else {
// These are generally not available.
Names[RTLIB::SINCOS_F32] = nullptr;
Names[RTLIB::SINCOS_F64] = nullptr;
Names[RTLIB::SINCOS_F80] = nullptr;
Names[RTLIB::SINCOS_F128] = nullptr;
Names[RTLIB::SINCOS_PPCF128] = nullptr;
}
if (!TT.isOSOpenBSD()) {
Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
} else {
// These are generally not available.
Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
}
// For f16/f32 conversions, Darwin uses the standard naming scheme, instead
// of the gnueabi-style __gnu_*_ieee.
// FIXME: What about other targets?
if (TT.isOSDarwin()) {
Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
}
}
/// InitLibcallCallingConvs - Set default libcall CallingConvs.
///
static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
CCs[i] = CallingConv::C;
}
}
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f16) {
if (RetVT == MVT::f32)
return FPEXT_F16_F32;
} else if (OpVT == MVT::f32) {
if (RetVT == MVT::f64)
return FPEXT_F32_F64;
if (RetVT == MVT::f128)
return FPEXT_F32_F128;
} else if (OpVT == MVT::f64) {
if (RetVT == MVT::f128)
return FPEXT_F64_F128;
}
return UNKNOWN_LIBCALL;
}
/// getFPROUND - Return the FPROUND_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::f16) {
if (OpVT == MVT::f32)
return FPROUND_F32_F16;
if (OpVT == MVT::f64)
return FPROUND_F64_F16;
if (OpVT == MVT::f80)
return FPROUND_F80_F16;
if (OpVT == MVT::f128)
return FPROUND_F128_F16;
if (OpVT == MVT::ppcf128)
return FPROUND_PPCF128_F16;
} else if (RetVT == MVT::f32) {
if (OpVT == MVT::f64)
return FPROUND_F64_F32;
if (OpVT == MVT::f80)
return FPROUND_F80_F32;
if (OpVT == MVT::f128)
return FPROUND_F128_F32;
if (OpVT == MVT::ppcf128)
return FPROUND_PPCF128_F32;
} else if (RetVT == MVT::f64) {
if (OpVT == MVT::f80)
return FPROUND_F80_F64;
if (OpVT == MVT::f128)
return FPROUND_F128_F64;
if (OpVT == MVT::ppcf128)
return FPROUND_PPCF128_F64;
}
return UNKNOWN_LIBCALL;
}
/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOSINT_F32_I8;
if (RetVT == MVT::i16)
return FPTOSINT_F32_I16;
if (RetVT == MVT::i32)
return FPTOSINT_F32_I32;
if (RetVT == MVT::i64)
return FPTOSINT_F32_I64;
if (RetVT == MVT::i128)
return FPTOSINT_F32_I128;
} else if (OpVT == MVT::f64) {
if (RetVT == MVT::i8)
return FPTOSINT_F64_I8;
if (RetVT == MVT::i16)
return FPTOSINT_F64_I16;
if (RetVT == MVT::i32)
return FPTOSINT_F64_I32;
if (RetVT == MVT::i64)
return FPTOSINT_F64_I64;
if (RetVT == MVT::i128)
return FPTOSINT_F64_I128;
} else if (OpVT == MVT::f80) {
if (RetVT == MVT::i32)
return FPTOSINT_F80_I32;
if (RetVT == MVT::i64)
return FPTOSINT_F80_I64;
if (RetVT == MVT::i128)
return FPTOSINT_F80_I128;
} else if (OpVT == MVT::f128) {
if (RetVT == MVT::i32)
return FPTOSINT_F128_I32;
if (RetVT == MVT::i64)
return FPTOSINT_F128_I64;
if (RetVT == MVT::i128)
return FPTOSINT_F128_I128;
} else if (OpVT == MVT::ppcf128) {
if (RetVT == MVT::i32)
return FPTOSINT_PPCF128_I32;
if (RetVT == MVT::i64)
return FPTOSINT_PPCF128_I64;
if (RetVT == MVT::i128)
return FPTOSINT_PPCF128_I128;
}
return UNKNOWN_LIBCALL;
}
/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOUINT_F32_I8;
if (RetVT == MVT::i16)
return FPTOUINT_F32_I16;
if (RetVT == MVT::i32)
return FPTOUINT_F32_I32;
if (RetVT == MVT::i64)
return FPTOUINT_F32_I64;
if (RetVT == MVT::i128)
return FPTOUINT_F32_I128;
} else if (OpVT == MVT::f64) {
if (RetVT == MVT::i8)
return FPTOUINT_F64_I8;
if (RetVT == MVT::i16)
return FPTOUINT_F64_I16;
if (RetVT == MVT::i32)
return FPTOUINT_F64_I32;
if (RetVT == MVT::i64)
return FPTOUINT_F64_I64;
if (RetVT == MVT::i128)
return FPTOUINT_F64_I128;
} else if (OpVT == MVT::f80) {
if (RetVT == MVT::i32)
return FPTOUINT_F80_I32;
if (RetVT == MVT::i64)
return FPTOUINT_F80_I64;
if (RetVT == MVT::i128)
return FPTOUINT_F80_I128;
} else if (OpVT == MVT::f128) {
if (RetVT == MVT::i32)
return FPTOUINT_F128_I32;
if (RetVT == MVT::i64)
return FPTOUINT_F128_I64;
if (RetVT == MVT::i128)
return FPTOUINT_F128_I128;
} else if (OpVT == MVT::ppcf128) {
if (RetVT == MVT::i32)
return FPTOUINT_PPCF128_I32;
if (RetVT == MVT::i64)
return FPTOUINT_PPCF128_I64;
if (RetVT == MVT::i128)
return FPTOUINT_PPCF128_I128;
}
return UNKNOWN_LIBCALL;
}
/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::i32) {
if (RetVT == MVT::f32)
return SINTTOFP_I32_F32;
if (RetVT == MVT::f64)
return SINTTOFP_I32_F64;
if (RetVT == MVT::f80)
return SINTTOFP_I32_F80;
if (RetVT == MVT::f128)
return SINTTOFP_I32_F128;
if (RetVT == MVT::ppcf128)
return SINTTOFP_I32_PPCF128;
} else if (OpVT == MVT::i64) {
if (RetVT == MVT::f32)
return SINTTOFP_I64_F32;
if (RetVT == MVT::f64)
return SINTTOFP_I64_F64;
if (RetVT == MVT::f80)
return SINTTOFP_I64_F80;
if (RetVT == MVT::f128)
return SINTTOFP_I64_F128;
if (RetVT == MVT::ppcf128)
return SINTTOFP_I64_PPCF128;
} else if (OpVT == MVT::i128) {
if (RetVT == MVT::f32)
return SINTTOFP_I128_F32;
if (RetVT == MVT::f64)
return SINTTOFP_I128_F64;
if (RetVT == MVT::f80)
return SINTTOFP_I128_F80;
if (RetVT == MVT::f128)
return SINTTOFP_I128_F128;
if (RetVT == MVT::ppcf128)
return SINTTOFP_I128_PPCF128;
}
return UNKNOWN_LIBCALL;
}
/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::i32) {
if (RetVT == MVT::f32)
return UINTTOFP_I32_F32;
if (RetVT == MVT::f64)
return UINTTOFP_I32_F64;
if (RetVT == MVT::f80)
return UINTTOFP_I32_F80;
if (RetVT == MVT::f128)
return UINTTOFP_I32_F128;
if (RetVT == MVT::ppcf128)
return UINTTOFP_I32_PPCF128;
} else if (OpVT == MVT::i64) {
if (RetVT == MVT::f32)
return UINTTOFP_I64_F32;
if (RetVT == MVT::f64)
return UINTTOFP_I64_F64;
if (RetVT == MVT::f80)
return UINTTOFP_I64_F80;
if (RetVT == MVT::f128)
return UINTTOFP_I64_F128;
if (RetVT == MVT::ppcf128)
return UINTTOFP_I64_PPCF128;
} else if (OpVT == MVT::i128) {
if (RetVT == MVT::f32)
return UINTTOFP_I128_F32;
if (RetVT == MVT::f64)
return UINTTOFP_I128_F64;
if (RetVT == MVT::f80)
return UINTTOFP_I128_F80;
if (RetVT == MVT::f128)
return UINTTOFP_I128_F128;
if (RetVT == MVT::ppcf128)
return UINTTOFP_I128_PPCF128;
}
return UNKNOWN_LIBCALL;
}
RTLIB::Libcall RTLIB::getATOMIC(unsigned Opc, MVT VT) {
#define OP_TO_LIBCALL(Name, Enum) \
case Name: \
switch (VT.SimpleTy) { \
default: \
return UNKNOWN_LIBCALL; \
case MVT::i8: \
return Enum##_1; \
case MVT::i16: \
return Enum##_2; \
case MVT::i32: \
return Enum##_4; \
case MVT::i64: \
return Enum##_8; \
case MVT::i128: \
return Enum##_16; \
}
switch (Opc) {
OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
}
#undef OP_TO_LIBCALL
return UNKNOWN_LIBCALL;
}
/// InitCmpLibcallCCs - Set default comparison libcall CC.
///
static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
CCs[RTLIB::UNE_F32] = ISD::SETNE;
CCs[RTLIB::UNE_F64] = ISD::SETNE;
CCs[RTLIB::UNE_F128] = ISD::SETNE;
CCs[RTLIB::OGE_F32] = ISD::SETGE;
CCs[RTLIB::OGE_F64] = ISD::SETGE;
CCs[RTLIB::OGE_F128] = ISD::SETGE;
CCs[RTLIB::OLT_F32] = ISD::SETLT;
CCs[RTLIB::OLT_F64] = ISD::SETLT;
CCs[RTLIB::OLT_F128] = ISD::SETLT;
CCs[RTLIB::OLE_F32] = ISD::SETLE;
CCs[RTLIB::OLE_F64] = ISD::SETLE;
CCs[RTLIB::OLE_F128] = ISD::SETLE;
CCs[RTLIB::OGT_F32] = ISD::SETGT;
CCs[RTLIB::OGT_F64] = ISD::SETGT;
CCs[RTLIB::OGT_F128] = ISD::SETGT;
CCs[RTLIB::UO_F32] = ISD::SETNE;
CCs[RTLIB::UO_F64] = ISD::SETNE;
CCs[RTLIB::UO_F128] = ISD::SETNE;
CCs[RTLIB::O_F32] = ISD::SETEQ;
CCs[RTLIB::O_F64] = ISD::SETEQ;
CCs[RTLIB::O_F128] = ISD::SETEQ;
}
/// NOTE: The TargetMachine owns TLOF.
TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
initActions();
// Perform these initializations only once.
MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
= MaxStoresPerMemmoveOptSize = 4;
UseUnderscoreSetJmp = false;
UseUnderscoreLongJmp = false;
SelectIsExpensive = false;
HasMultipleConditionRegisters = false;
HasExtractBitsInsn = false;
IntDivIsCheap = false;
FsqrtIsCheap = false;
Pow2SDivIsCheap = false;
JumpIsExpensive = JumpIsExpensiveOverride;
PredictableSelectIsExpensive = false;
MaskAndBranchFoldingIsLegal = false;
EnableExtLdPromotion = false;
HasFloatingPointExceptions = true;
StackPointerRegisterToSaveRestore = 0;
ExceptionPointerRegister = 0;
ExceptionSelectorRegister = 0;
BooleanContents = UndefinedBooleanContent;
BooleanFloatContents = UndefinedBooleanContent;
BooleanVectorContents = UndefinedBooleanContent;
SchedPreferenceInfo = Sched::ILP;
JumpBufSize = 0;
JumpBufAlignment = 0;
MinFunctionAlignment = 0;
PrefFunctionAlignment = 0;
PrefLoopAlignment = 0;
MinStackArgumentAlignment = 1;
InsertFencesForAtomic = false;
MinimumJumpTableEntries = 4;
InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
InitCmpLibcallCCs(CmpLibcallCCs);
InitLibcallCallingConvs(LibcallCallingConvs);
}
void TargetLoweringBase::initActions() {
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadExtActions, 0, sizeof(LoadExtActions));
memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
memset(CondCodeActions, 0, sizeof(CondCodeActions));
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
// Set default actions for various operations.
for (MVT VT : MVT::all_valuetypes()) {
// Default all indexed load / store to expand.
for (unsigned IM = (unsigned)ISD::PRE_INC;
IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
setIndexedLoadAction(IM, VT, Expand);
setIndexedStoreAction(IM, VT, Expand);
}
// Most backends expect to see the node which just returns the value loaded.
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
// These operations default to expand.
setOperationAction(ISD::FGETSIGN, VT, Expand);
setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
setOperationAction(ISD::FMINNUM, VT, Expand);
setOperationAction(ISD::FMAXNUM, VT, Expand);
setOperationAction(ISD::FMAD, VT, Expand);
setOperationAction(ISD::SMIN, VT, Expand);
setOperationAction(ISD::SMAX, VT, Expand);
setOperationAction(ISD::UMIN, VT, Expand);
setOperationAction(ISD::UMAX, VT, Expand);
// Overflow operations default to expand
setOperationAction(ISD::SADDO, VT, Expand);
setOperationAction(ISD::SSUBO, VT, Expand);
setOperationAction(ISD::UADDO, VT, Expand);
setOperationAction(ISD::USUBO, VT, Expand);
setOperationAction(ISD::SMULO, VT, Expand);
setOperationAction(ISD::UMULO, VT, Expand);
// These library functions default to expand.
setOperationAction(ISD::FROUND, VT, Expand);
// These operations default to expand for vector types.
if (VT.isVector()) {
setOperationAction(ISD::FCOPYSIGN, VT, Expand);
setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
}
}
// Most targets ignore the @llvm.prefetch intrinsic.
setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
// ConstantFP nodes default to expand. Targets can either change this to
// Legal, in which case all fp constants are legal, or use isFPImmLegal()
// to optimize expansions for certain constants.
setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
// These library functions default to expand.
for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
setOperationAction(ISD::FLOG , VT, Expand);
setOperationAction(ISD::FLOG2, VT, Expand);
setOperationAction(ISD::FLOG10, VT, Expand);
setOperationAction(ISD::FEXP , VT, Expand);
setOperationAction(ISD::FEXP2, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
setOperationAction(ISD::FMINNUM, VT, Expand);
setOperationAction(ISD::FMAXNUM, VT, Expand);
setOperationAction(ISD::FNEARBYINT, VT, Expand);
setOperationAction(ISD::FCEIL, VT, Expand);
setOperationAction(ISD::FRINT, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
setOperationAction(ISD::FROUND, VT, Expand);
}
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, MVT::Other, Expand);
// On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
// here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
//
setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
}
MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
EVT) const {
return MVT::getIntegerVT(8 * DL.getPointerSize(0));
}
EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
const DataLayout &DL) const {
assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
if (LHSTy.isVector())
return LHSTy;
return getScalarShiftAmountTy(DL, LHSTy);
}
/// canOpTrap - Returns true if the operation can trap for the value type.
/// VT must be a legal type.
bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
assert(isTypeLegal(VT));
switch (Op) {
default:
return false;
case ISD::FDIV:
case ISD::FREM:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM:
return true;
}
}
void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
// If the command-line option was specified, ignore this request.
if (!JumpIsExpensiveOverride.getNumOccurrences())
JumpIsExpensive = isExpensive;
}
TargetLoweringBase::LegalizeKind
TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
// If this is a simple type, use the ComputeRegisterProp mechanism.
if (VT.isSimple()) {
MVT SVT = VT.getSimpleVT();
assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
MVT NVT = TransformToType[SVT.SimpleTy];
LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
assert((LA == TypeLegal || LA == TypeSoftenFloat ||
ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
"Promote may not follow Expand or Promote");
if (LA == TypeSplitVector)
return LegalizeKind(LA,
EVT::getVectorVT(Context, SVT.getVectorElementType(),
SVT.getVectorNumElements() / 2));
if (LA == TypeScalarizeVector)
return LegalizeKind(LA, SVT.getVectorElementType());
return LegalizeKind(LA, NVT);
}
// Handle Extended Scalar Types.
if (!VT.isVector()) {
assert(VT.isInteger() && "Float types must be simple");
unsigned BitSize = VT.getSizeInBits();
// First promote to a power-of-two size, then expand if necessary.
if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
EVT NVT = VT.getRoundIntegerType(Context);
assert(NVT != VT && "Unable to round integer VT");
LegalizeKind NextStep = getTypeConversion(Context, NVT);
// Avoid multi-step promotion.
if (NextStep.first == TypePromoteInteger)
return NextStep;
// Return rounded integer type.
return LegalizeKind(TypePromoteInteger, NVT);
}
return LegalizeKind(TypeExpandInteger,
EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
}
// Handle vector types.
unsigned NumElts = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
// Vectors with only one element are always scalarized.
if (NumElts == 1)
return LegalizeKind(TypeScalarizeVector, EltVT);
// Try to widen vector elements until the element type is a power of two and
// promote it to a legal type later on, for example:
// <3 x i8> -> <4 x i8> -> <4 x i32>
if (EltVT.isInteger()) {
// Vectors with a number of elements that is not a power of two are always
// widened, for example <3 x i8> -> <4 x i8>.
if (!VT.isPow2VectorType()) {
NumElts = (unsigned)NextPowerOf2(NumElts);
EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
return LegalizeKind(TypeWidenVector, NVT);
}
// Examine the element type.
LegalizeKind LK = getTypeConversion(Context, EltVT);
// If type is to be expanded, split the vector.
// <4 x i140> -> <2 x i140>
if (LK.first == TypeExpandInteger)
return LegalizeKind(TypeSplitVector,
EVT::getVectorVT(Context, EltVT, NumElts / 2));
// Promote the integer element types until a legal vector type is found
// or until the element integer type is too big. If a legal type was not
// found, fallback to the usual mechanism of widening/splitting the
// vector.
EVT OldEltVT = EltVT;
while (1) {
// Increase the bitwidth of the element to the next pow-of-two
// (which is greater than 8 bits).
EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
.getRoundIntegerType(Context);
// Stop trying when getting a non-simple element type.
// Note that vector elements may be greater than legal vector element
// types. Example: X86 XMM registers hold 64bit element on 32bit
// systems.
if (!EltVT.isSimple())
break;
// Build a new vector type and check if it is legal.
MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
// Found a legal promoted vector type.
if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
return LegalizeKind(TypePromoteInteger,
EVT::getVectorVT(Context, EltVT, NumElts));
}
// Reset the type to the unexpanded type if we did not find a legal vector
// type with a promoted vector element type.
EltVT = OldEltVT;
}
// Try to widen the vector until a legal type is found.
// If there is no wider legal type, split the vector.
while (1) {
// Round up to the next power of 2.
NumElts = (unsigned)NextPowerOf2(NumElts);
// If there is no simple vector type with this many elements then there
// cannot be a larger legal vector type. Note that this assumes that
// there are no skipped intermediate vector types in the simple types.
if (!EltVT.isSimple())
break;
MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
if (LargerVector == MVT())
break;
// If this type is legal then widen the vector.
if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
return LegalizeKind(TypeWidenVector, LargerVector);
}
// Widen odd vectors to next power of two.
if (!VT.isPow2VectorType()) {
EVT NVT = VT.getPow2VectorType(Context);
return LegalizeKind(TypeWidenVector, NVT);
}
// Vectors with illegal element types are expanded.
EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
return LegalizeKind(TypeSplitVector, NVT);
}
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
unsigned &NumIntermediates,
MVT &RegisterVT,
TargetLoweringBase *TLI) {
// Figure out the right, legal destination reg to copy into.
unsigned NumElts = VT.getVectorNumElements();
MVT EltTy = VT.getVectorElementType();
unsigned NumVectorRegs = 1;
// FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
// could break down into LHS/RHS like LegalizeDAG does.
if (!isPowerOf2_32(NumElts)) {
NumVectorRegs = NumElts;
NumElts = 1;
}
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
NumElts >>= 1;
NumVectorRegs <<= 1;
}
NumIntermediates = NumVectorRegs;
MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
if (!TLI->isTypeLegal(NewVT))
NewVT = EltTy;
IntermediateVT = NewVT;
unsigned NewVTSize = NewVT.getSizeInBits();
// Convert sizes such as i33 to i64.
if (!isPowerOf2_32(NewVTSize))
NewVTSize = NextPowerOf2(NewVTSize);
MVT DestVT = TLI->getRegisterType(NewVT);
RegisterVT = DestVT;
if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
// Otherwise, promotion or legal types use the same number of registers as
// the vector decimated to the appropriate level.
return NumVectorRegs;
}
/// isLegalRC - Return true if the value types that can be represented by the
/// specified register class are all legal.
bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
I != E; ++I) {
if (isTypeLegal(*I))
return true;
}
return false;
}
/// Replace/modify any TargetFrameIndex operands with a targte-dependent
/// sequence of memory operands that is recognized by PrologEpilogInserter.
MachineBasicBlock*
TargetLoweringBase::emitPatchPoint(MachineInstr *MI,
MachineBasicBlock *MBB) const {
MachineFunction &MF = *MI->getParent()->getParent();
// MI changes inside this loop as we grow operands.
for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
MachineOperand &MO = MI->getOperand(OperIdx);
if (!MO.isFI())
continue;
// foldMemoryOperand builds a new MI after replacing a single FI operand
// with the canonical set of five x86 addressing-mode operands.
int FI = MO.getIndex();
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
// Copy operands before the frame-index.
for (unsigned i = 0; i < OperIdx; ++i)
MIB.addOperand(MI->getOperand(i));
// Add frame index operands: direct-mem-ref tag, #FI, offset.
MIB.addImm(StackMaps::DirectMemRefOp);
MIB.addOperand(MI->getOperand(OperIdx));
MIB.addImm(0);
// Copy the operands after the frame index.
for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
MIB.addOperand(MI->getOperand(i));
// Inherit previous memory operands.
MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
// Add a new memory operand for this FI.
const MachineFrameInfo &MFI = *MF.getFrameInfo();
assert(MFI.getObjectOffset(FI) != -1);
unsigned Flags = MachineMemOperand::MOLoad;
if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
Flags |= MachineMemOperand::MOStore;
Flags |= MachineMemOperand::MOVolatile;
}
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(FI), Flags,
TM.getDataLayout()->getPointerSize(), MFI.getObjectAlignment(FI));
MIB->addMemOperand(MF, MMO);
// Replace the instruction and update the operand index.
MBB->insert(MachineBasicBlock::iterator(MI), MIB);
OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
MI->eraseFromParent();
MI = MIB;
}
return MBB;
}
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
// This function is in TargetLowering because it uses RegClassForVT which would
// need to be moved to TargetRegisterInfo and would necessitate moving
// isTypeLegal over as well - a massive change that would just require
// TargetLowering having a TargetRegisterInfo class member that it would use.
std::pair<const TargetRegisterClass *, uint8_t>
TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const {
const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
if (!RC)
return std::make_pair(RC, 0);
// Compute the set of all super-register classes.
BitVector SuperRegRC(TRI->getNumRegClasses());
for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
SuperRegRC.setBitsInMask(RCI.getMask());
// Find the first legal register class with the largest spill size.
const TargetRegisterClass *BestRC = RC;
for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
// We want the largest possible spill size.
if (SuperRC->getSize() <= BestRC->getSize())
continue;
if (!isLegalRC(SuperRC))
continue;
BestRC = SuperRC;
}
return std::make_pair(BestRC, 1);
}
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
void TargetLoweringBase::computeRegisterProperties(
const TargetRegisterInfo *TRI) {
static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
"Too many value types for ValueTypeActions to hold!");
// Everything defaults to needing one register.
for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
NumRegistersForVT[i] = 1;
RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
}
// ...except isVoid, which doesn't need any registers.
NumRegistersForVT[MVT::isVoid] = 0;
// Find the largest integer register class.
unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
// Every integer value type larger than this largest register takes twice as
// many registers to represent as the previous ValueType.
for (unsigned ExpandedReg = LargestIntReg + 1;
ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
TypeExpandInteger);
}
// Inspect all of the ValueType's smaller than the largest integer
// register to see which ones need promotion.
unsigned LegalIntReg = LargestIntReg;
for (unsigned IntReg = LargestIntReg - 1;
IntReg >= (unsigned)MVT::i1; --IntReg) {
MVT IVT = (MVT::SimpleValueType)IntReg;
if (isTypeLegal(IVT)) {
LegalIntReg = IntReg;
} else {
RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
(const MVT::SimpleValueType)LegalIntReg;
ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
}
}
// ppcf128 type is really two f64's.
if (!isTypeLegal(MVT::ppcf128)) {
NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
TransformToType[MVT::ppcf128] = MVT::f64;
ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
}
// Decide how to handle f128. If the target does not have native f128 support,
// expand it to i128 and we will be generating soft float library calls.
if (!isTypeLegal(MVT::f128)) {
NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
TransformToType[MVT::f128] = MVT::i128;
ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
}
// Decide how to handle f64. If the target does not have native f64 support,
// expand it to i64 and we will be generating soft float library calls.
if (!isTypeLegal(MVT::f64)) {
NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
TransformToType[MVT::f64] = MVT::i64;
ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
}
// Decide how to handle f32. If the target does not have native f32 support,
// expand it to i32 and we will be generating soft float library calls.
if (!isTypeLegal(MVT::f32)) {
NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
TransformToType[MVT::f32] = MVT::i32;
ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
}
if (!isTypeLegal(MVT::f16)) {
// If the target has native f32 support, promote f16 operations to f32. If
// f32 is not supported, generate soft float library calls.
if (isTypeLegal(MVT::f32)) {
NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
TransformToType[MVT::f16] = MVT::f32;
ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
} else {
NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
TransformToType[MVT::f16] = MVT::i16;
ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat);
}
}
// Loop over all of the vector value types to see which need transformations.
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
MVT VT = (MVT::SimpleValueType) i;
if (isTypeLegal(VT))
continue;
MVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
bool IsLegalWiderType = false;
LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
switch (PreferredAction) {
case TypePromoteInteger: {
// Try to promote the elements of integer vectors. If no legal
// promotion was found, fall through to the widen-vector method.
for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
MVT SVT = (MVT::SimpleValueType) nVT;
// Promote vectors of integers to vectors with the same number
// of elements, with a wider element type.
if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits()
&& SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)
&& SVT.getScalarType().isInteger()) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
IsLegalWiderType = true;
break;
}
}
if (IsLegalWiderType)
break;
}
case TypeWidenVector: {
// Try to widen the vector.
for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
MVT SVT = (MVT::SimpleValueType) nVT;
if (SVT.getVectorElementType() == EltVT
&& SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
ValueTypeActions.setTypeAction(VT, TypeWidenVector);
IsLegalWiderType = true;
break;
}
}
if (IsLegalWiderType)
break;
}
case TypeSplitVector:
case TypeScalarizeVector: {
MVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
NumIntermediates, RegisterVT, this);
RegisterTypeForVT[i] = RegisterVT;
MVT NVT = VT.getPow2VectorType();
if (NVT == VT) {
// Type is already a power of 2. The default action is to split.
TransformToType[i] = MVT::Other;
if (PreferredAction == TypeScalarizeVector)
ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
else if (PreferredAction == TypeSplitVector)
ValueTypeActions.setTypeAction(VT, TypeSplitVector);
else
// Set type action according to the number of elements.
ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
: TypeSplitVector);
} else {
TransformToType[i] = NVT;
ValueTypeActions.setTypeAction(VT, TypeWidenVector);
}
break;
}
default:
llvm_unreachable("Unknown vector legalization action!");
}
}
// Determine the 'representative' register class for each value type.
// An representative register class is the largest (meaning one which is
// not a sub-register class / subreg register class) legal register class for
// a group of value types. For example, on i386, i8, i16, and i32
// representative would be GR32; while on x86_64 it's GR64.
for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
const TargetRegisterClass* RRC;
uint8_t Cost;
std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
RepRegClassForVT[i] = RRC;
RepRegClassCostForVT[i] = Cost;
}
}
EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
EVT VT) const {
assert(!VT.isVector() && "No default SetCC type for vectors!");
return getPointerTy(DL).SimpleTy;
}
MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
return MVT::i32; // return the default value
}
/// getVectorTypeBreakdown - Vector types are broken down into some number of
/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
///
/// This method returns the number of registers needed, and the VT for each
/// register. It also returns the VT and quantity of the intermediate values
/// before they are promoted/expanded.
///
unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
EVT &IntermediateVT,
unsigned &NumIntermediates,
MVT &RegisterVT) const {
unsigned NumElts = VT.getVectorNumElements();
// If there is a wider vector type with the same element type as this one,
// or a promoted vector type that has the same number of elements which
// are wider, then we should convert to that legal vector type.
// This handles things like <2 x float> -> <4 x float> and
// <4 x i1> -> <4 x i32>.
LegalizeTypeAction TA = getTypeAction(Context, VT);
if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
EVT RegisterEVT = getTypeToTransformTo(Context, VT);
if (isTypeLegal(RegisterEVT)) {
IntermediateVT = RegisterEVT;
RegisterVT = RegisterEVT.getSimpleVT();
NumIntermediates = 1;
return 1;
}
}
// Figure out the right, legal destination reg to copy into.
EVT EltTy = VT.getVectorElementType();
unsigned NumVectorRegs = 1;
// FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
// could break down into LHS/RHS like LegalizeDAG does.
if (!isPowerOf2_32(NumElts)) {
NumVectorRegs = NumElts;
NumElts = 1;
}
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
while (NumElts > 1 && !isTypeLegal(
EVT::getVectorVT(Context, EltTy, NumElts))) {
NumElts >>= 1;
NumVectorRegs <<= 1;
}
NumIntermediates = NumVectorRegs;
EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
if (!isTypeLegal(NewVT))
NewVT = EltTy;
IntermediateVT = NewVT;
MVT DestVT = getRegisterType(Context, NewVT);
RegisterVT = DestVT;
unsigned NewVTSize = NewVT.getSizeInBits();
// Convert sizes such as i33 to i64.
if (!isPowerOf2_32(NewVTSize))
NewVTSize = NextPowerOf2(NewVTSize);
if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
// Otherwise, promotion or legal types use the same number of registers as
// the vector decimated to the appropriate level.
return NumVectorRegs;
}
/// Get the EVTs and ArgFlags collections that represent the legalized return
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
for (unsigned j = 0, f = NumValues; j != f; ++j) {
EVT VT = ValueVTs[j];
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
ExtendKind = ISD::SIGN_EXTEND;
else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
ExtendKind = ISD::ZERO_EXTEND;
// FIXME: C calling convention requires the return type to be promoted to
// at least 32-bit. But this is not necessary for non-C calling
// conventions. The frontend should mark functions whose return values
// require promoting with signext or zeroext attributes.
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
if (VT.bitsLT(MinVT))
VT = MinVT;
}
unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg))
Flags.setInReg();
// Propagate extension type if any
if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
Flags.setSExt();
else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i)
Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
}
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
const DataLayout &DL) const {
return DL.getABITypeAlignment(Ty);
}
//===----------------------------------------------------------------------===//
// TargetTransformInfo Helpers
//===----------------------------------------------------------------------===//
int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
enum InstructionOpcodes {
#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
#include "llvm/IR/Instruction.def"
};
switch (static_cast<InstructionOpcodes>(Opcode)) {
case Ret: return 0;
case Br: return 0;
case Switch: return 0;
case IndirectBr: return 0;
case Invoke: return 0;
case Resume: return 0;
case Unreachable: return 0;
case Add: return ISD::ADD;
case FAdd: return ISD::FADD;
case Sub: return ISD::SUB;
case FSub: return ISD::FSUB;
case Mul: return ISD::MUL;
case FMul: return ISD::FMUL;
case UDiv: return ISD::UDIV;
case SDiv: return ISD::SDIV;
case FDiv: return ISD::FDIV;
case URem: return ISD::UREM;
case SRem: return ISD::SREM;
case FRem: return ISD::FREM;
case Shl: return ISD::SHL;
case LShr: return ISD::SRL;
case AShr: return ISD::SRA;
case And: return ISD::AND;
case Or: return ISD::OR;
case Xor: return ISD::XOR;
case Alloca: return 0;
case Load: return ISD::LOAD;
case Store: return ISD::STORE;
case GetElementPtr: return 0;
case Fence: return 0;
case AtomicCmpXchg: return 0;
case AtomicRMW: return 0;
case Trunc: return ISD::TRUNCATE;
case ZExt: return ISD::ZERO_EXTEND;
case SExt: return ISD::SIGN_EXTEND;
case FPToUI: return ISD::FP_TO_UINT;
case FPToSI: return ISD::FP_TO_SINT;
case UIToFP: return ISD::UINT_TO_FP;
case SIToFP: return ISD::SINT_TO_FP;
case FPTrunc: return ISD::FP_ROUND;
case FPExt: return ISD::FP_EXTEND;
case PtrToInt: return ISD::BITCAST;
case IntToPtr: return ISD::BITCAST;
case BitCast: return ISD::BITCAST;
case AddrSpaceCast: return ISD::ADDRSPACECAST;
case ICmp: return ISD::SETCC;
case FCmp: return ISD::SETCC;
case PHI: return 0;
case Call: return 0;
case Select: return ISD::SELECT;
case UserOp1: return 0;
case UserOp2: return 0;
case VAArg: return 0;
case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
case InsertElement: return ISD::INSERT_VECTOR_ELT;
case ShuffleVector: return ISD::VECTOR_SHUFFLE;
case ExtractValue: return ISD::MERGE_VALUES;
case InsertValue: return ISD::MERGE_VALUES;
case LandingPad: return 0;
}
llvm_unreachable("Unknown instruction type encountered!");
}
std::pair<unsigned, MVT>
TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
Type *Ty) const {
LLVMContext &C = Ty->getContext();
EVT MTy = getValueType(DL, Ty);
unsigned Cost = 1;
// We keep legalizing the type until we find a legal kind. We assume that
// the only operation that costs anything is the split. After splitting
// we need to handle two types.
while (true) {
LegalizeKind LK = getTypeConversion(C, MTy);
if (LK.first == TypeLegal)
return std::make_pair(Cost, MTy.getSimpleVT());
if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
Cost *= 2;
// Keep legalizing the type.
MTy = LK.second;
}
}
//===----------------------------------------------------------------------===//
// Loop Strength Reduction hooks
//===----------------------------------------------------------------------===//
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS) const {
// The default implementation of this implements a conservative RISCy, r+r and
// r+i addr mode.
// Allows a sign-extended 16-bit immediate field.
if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
return false;
// No global is ever allowed as a base.
if (AM.BaseGV)
return false;
// Only support r+r,
switch (AM.Scale) {
case 0: // "r+i" or just "i", depending on HasBaseReg.
break;
case 1:
if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
return false;
// Otherwise we have r+r or r+i.
break;
case 2:
if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
return false;
// Allow 2*r as r+r.
break;
default: // Don't allow n * r
return false;
}
return true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MIRPrintingPass.cpp | //===- MIRPrintingPass.cpp - Pass that prints out using the MIR format ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a pass that prints out the LLVM module using the MIR
// serialization format.
//
//===----------------------------------------------------------------------===//
#include "MIRPrinter.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
/// This pass prints out the LLVM IR to an output stream using the MIR
/// serialization format.
struct MIRPrintingPass : public MachineFunctionPass {
static char ID;
raw_ostream &OS;
std::string MachineFunctions;
MIRPrintingPass() : MachineFunctionPass(ID), OS(dbgs()) {}
MIRPrintingPass(raw_ostream &OS) : MachineFunctionPass(ID), OS(OS) {}
StringRef getPassName() const override { return "MIR Printing Pass"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
virtual bool runOnMachineFunction(MachineFunction &MF) override {
std::string Str;
raw_string_ostream StrOS(Str);
printMIR(StrOS, MF);
MachineFunctions.append(StrOS.str());
return false;
}
virtual bool doFinalization(Module &M) override {
printMIR(OS, M);
OS << MachineFunctions;
return false;
}
};
char MIRPrintingPass::ID = 0;
} // end anonymous namespace
char &llvm::MIRPrintingPassID = MIRPrintingPass::ID;
INITIALIZE_PASS(MIRPrintingPass, "mir-printer", "MIR Printer", false, false)
namespace llvm {
MachineFunctionPass *createPrintMIRPass(raw_ostream &OS) {
return new MIRPrintingPass(OS);
}
} // end namespace llvm
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LocalStackSlotAllocation.cpp | //===- LocalStackSlotAllocation.cpp - Pre-allocate locals to stack slots --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass assigns local frame indices to stack slots relative to one another
// and allocates additional base registers to access them when the target
// estimates they are likely to be out of range of stack pointer and frame
// pointer relative addressing.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "localstackalloc"
STATISTIC(NumAllocations, "Number of frame indices allocated into local block");
STATISTIC(NumBaseRegisters, "Number of virtual frame base registers allocated");
STATISTIC(NumReplacements, "Number of frame indices references replaced");
namespace {
class FrameRef {
MachineBasicBlock::iterator MI; // Instr referencing the frame
int64_t LocalOffset; // Local offset of the frame idx referenced
int FrameIdx; // The frame index
public:
FrameRef(MachineBasicBlock::iterator I, int64_t Offset, int Idx) :
MI(I), LocalOffset(Offset), FrameIdx(Idx) {}
bool operator<(const FrameRef &RHS) const {
return LocalOffset < RHS.LocalOffset;
}
MachineBasicBlock::iterator getMachineInstr() const { return MI; }
int64_t getLocalOffset() const { return LocalOffset; }
int getFrameIndex() const { return FrameIdx; }
};
class LocalStackSlotPass: public MachineFunctionPass {
SmallVector<int64_t,16> LocalOffsets;
/// StackObjSet - A set of stack object indexes
typedef SmallSetVector<int, 8> StackObjSet;
void AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx, int64_t &Offset,
bool StackGrowsDown, unsigned &MaxAlign);
void AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
SmallSet<int, 16> &ProtectedObjs,
MachineFrameInfo *MFI, bool StackGrowsDown,
int64_t &Offset, unsigned &MaxAlign);
void calculateFrameObjectOffsets(MachineFunction &Fn);
bool insertFrameReferenceRegisters(MachineFunction &Fn);
public:
static char ID; // Pass identification, replacement for typeid
explicit LocalStackSlotPass() : MachineFunctionPass(ID) {
initializeLocalStackSlotPassPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<StackProtector>();
MachineFunctionPass::getAnalysisUsage(AU);
}
private:
};
} // end anonymous namespace
char LocalStackSlotPass::ID = 0;
char &llvm::LocalStackSlotAllocationID = LocalStackSlotPass::ID;
INITIALIZE_PASS_BEGIN(LocalStackSlotPass, "localstackalloc",
"Local Stack Slot Allocation", false, false)
INITIALIZE_PASS_DEPENDENCY(StackProtector)
INITIALIZE_PASS_END(LocalStackSlotPass, "localstackalloc",
"Local Stack Slot Allocation", false, false)
bool LocalStackSlotPass::runOnMachineFunction(MachineFunction &MF) {
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
unsigned LocalObjectCount = MFI->getObjectIndexEnd();
// If the target doesn't want/need this pass, or if there are no locals
// to consider, early exit.
if (!TRI->requiresVirtualBaseRegisters(MF) || LocalObjectCount == 0)
return true;
// Make sure we have enough space to store the local offsets.
LocalOffsets.resize(MFI->getObjectIndexEnd());
// Lay out the local blob.
calculateFrameObjectOffsets(MF);
// Insert virtual base registers to resolve frame index references.
bool UsedBaseRegs = insertFrameReferenceRegisters(MF);
// Tell MFI whether any base registers were allocated. PEI will only
// want to use the local block allocations from this pass if there were any.
// Otherwise, PEI can do a bit better job of getting the alignment right
// without a hole at the start since it knows the alignment of the stack
// at the start of local allocation, and this pass doesn't.
MFI->setUseLocalStackAllocationBlock(UsedBaseRegs);
return true;
}
/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
void LocalStackSlotPass::AdjustStackOffset(MachineFrameInfo *MFI,
int FrameIdx, int64_t &Offset,
bool StackGrowsDown,
unsigned &MaxAlign) {
// If the stack grows down, add the object size to find the lowest address.
if (StackGrowsDown)
Offset += MFI->getObjectSize(FrameIdx);
unsigned Align = MFI->getObjectAlignment(FrameIdx);
// If the alignment of this object is greater than that of the stack, then
// increase the stack alignment to match.
MaxAlign = std::max(MaxAlign, Align);
// Adjust to alignment boundary.
Offset = (Offset + Align - 1) / Align * Align;
int64_t LocalOffset = StackGrowsDown ? -Offset : Offset;
DEBUG(dbgs() << "Allocate FI(" << FrameIdx << ") to local offset "
<< LocalOffset << "\n");
// Keep the offset available for base register allocation
LocalOffsets[FrameIdx] = LocalOffset;
// And tell MFI about it for PEI to use later
MFI->mapLocalFrameObject(FrameIdx, LocalOffset);
if (!StackGrowsDown)
Offset += MFI->getObjectSize(FrameIdx);
++NumAllocations;
}
/// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
/// those required to be close to the Stack Protector) to stack offsets.
void LocalStackSlotPass::AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
SmallSet<int, 16> &ProtectedObjs,
MachineFrameInfo *MFI,
bool StackGrowsDown, int64_t &Offset,
unsigned &MaxAlign) {
for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
E = UnassignedObjs.end(); I != E; ++I) {
int i = *I;
AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
ProtectedObjs.insert(i);
}
}
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Loop over all of the stack objects, assigning sequential addresses...
MachineFrameInfo *MFI = Fn.getFrameInfo();
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int64_t Offset = 0;
unsigned MaxAlign = 0;
StackProtector *SP = &getAnalysis<StackProtector>();
// Make sure that the stack protector comes before the local variables on the
// stack.
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
StackObjSet SmallArrayObjs;
StackObjSet AddrOfObjs;
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
StackGrowsDown, MaxAlign);
// Assign large stack objects first.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
continue;
case StackProtector::SSPLK_SmallArray:
SmallArrayObjs.insert(i);
continue;
case StackProtector::SSPLK_AddrOf:
AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
continue;
}
llvm_unreachable("Unexpected SSPLayoutKind.");
}
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
// callee saved registers.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
if (ProtectedObjs.count(i))
continue;
AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
}
// Remember how big this blob of stack space is
MFI->setLocalFrameSize(Offset);
MFI->setLocalFrameMaxAlign(MaxAlign);
}
static inline bool
lookupCandidateBaseReg(unsigned BaseReg,
int64_t BaseOffset,
int64_t FrameSizeAdjust,
int64_t LocalFrameOffset,
const MachineInstr *MI,
const TargetRegisterInfo *TRI) {
// Check if the relative offset from the where the base register references
// to the target address is in range for the instruction.
int64_t Offset = FrameSizeAdjust + LocalFrameOffset - BaseOffset;
return TRI->isFrameOffsetLegal(MI, BaseReg, Offset);
}
bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// Scan the function's instructions looking for frame index references.
// For each, ask the target if it wants a virtual base register for it
// based on what we can tell it about where the local will end up in the
// stack frame. If it wants one, re-use a suitable one we've previously
// allocated, or if there isn't one that fits the bill, allocate a new one
// and ask the target to create a defining instruction for it.
bool UsedBaseReg = false;
MachineFrameInfo *MFI = Fn.getFrameInfo();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
// Collect all of the instructions in the block that reference
// a frame index. Also store the frame index referenced to ease later
// lookup. (For any insn that has more than one FI reference, we arbitrarily
// choose the first one).
SmallVector<FrameRef, 64> FrameReferenceInsns;
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
MachineInstr *MI = I;
// Debug value, stackmap and patchpoint instructions can't be out of
// range, so they don't need any updates.
if (MI->isDebugValue() ||
MI->getOpcode() == TargetOpcode::STATEPOINT ||
MI->getOpcode() == TargetOpcode::STACKMAP ||
MI->getOpcode() == TargetOpcode::PATCHPOINT)
continue;
// For now, allocate the base register(s) within the basic block
// where they're used, and don't try to keep them around outside
// of that. It may be beneficial to try sharing them more broadly
// than that, but the increased register pressure makes that a
// tricky thing to balance. Investigate if re-materializing these
// becomes an issue.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
// Consider replacing all frame index operands that reference
// an object allocated in the local block.
if (MI->getOperand(i).isFI()) {
// Don't try this with values not in the local block.
if (!MFI->isObjectPreAllocated(MI->getOperand(i).getIndex()))
break;
int Idx = MI->getOperand(i).getIndex();
int64_t LocalOffset = LocalOffsets[Idx];
if (!TRI->needsFrameBaseReg(MI, LocalOffset))
break;
FrameReferenceInsns.
push_back(FrameRef(MI, LocalOffset, Idx));
break;
}
}
}
}
// Sort the frame references by local offset
array_pod_sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end());
MachineBasicBlock *Entry = Fn.begin();
unsigned BaseReg = 0;
int64_t BaseOffset = 0;
// Loop through the frame references and allocate for them as necessary.
for (int ref = 0, e = FrameReferenceInsns.size(); ref < e ; ++ref) {
FrameRef &FR = FrameReferenceInsns[ref];
MachineBasicBlock::iterator I = FR.getMachineInstr();
MachineInstr *MI = I;
int64_t LocalOffset = FR.getLocalOffset();
int FrameIdx = FR.getFrameIndex();
assert(MFI->isObjectPreAllocated(FrameIdx) &&
"Only pre-allocated locals expected!");
DEBUG(dbgs() << "Considering: " << *MI);
unsigned idx = 0;
for (unsigned f = MI->getNumOperands(); idx != f; ++idx) {
if (!MI->getOperand(idx).isFI())
continue;
if (FrameIdx == I->getOperand(idx).getIndex())
break;
}
assert(idx < MI->getNumOperands() && "Cannot find FI operand");
int64_t Offset = 0;
int64_t FrameSizeAdjust = StackGrowsDown ? MFI->getLocalFrameSize() : 0;
DEBUG(dbgs() << " Replacing FI in: " << *MI);
// If we have a suitable base register available, use it; otherwise
// create a new one. Note that any offset encoded in the
// instruction itself will be taken into account by the target,
// so we don't have to adjust for it here when reusing a base
// register.
if (UsedBaseReg && lookupCandidateBaseReg(BaseReg, BaseOffset,
FrameSizeAdjust, LocalOffset, MI,
TRI)) {
DEBUG(dbgs() << " Reusing base register " << BaseReg << "\n");
// We found a register to reuse.
Offset = FrameSizeAdjust + LocalOffset - BaseOffset;
} else {
// No previously defined register was in range, so create a // new one.
int64_t InstrOffset = TRI->getFrameIndexInstrOffset(MI, idx);
int64_t PrevBaseOffset = BaseOffset;
BaseOffset = FrameSizeAdjust + LocalOffset + InstrOffset;
// We'd like to avoid creating single-use virtual base registers.
// Because the FrameRefs are in sorted order, and we've already
// processed all FrameRefs before this one, just check whether or not
// the next FrameRef will be able to reuse this new register. If not,
// then don't bother creating it.
if (ref + 1 >= e ||
!lookupCandidateBaseReg(
BaseReg, BaseOffset, FrameSizeAdjust,
FrameReferenceInsns[ref + 1].getLocalOffset(),
FrameReferenceInsns[ref + 1].getMachineInstr(), TRI)) {
BaseOffset = PrevBaseOffset;
continue;
}
const MachineFunction *MF = MI->getParent()->getParent();
const TargetRegisterClass *RC = TRI->getPointerRegClass(*MF);
BaseReg = Fn.getRegInfo().createVirtualRegister(RC);
DEBUG(dbgs() << " Materializing base register " << BaseReg <<
" at frame local offset " << LocalOffset + InstrOffset << "\n");
// Tell the target to insert the instruction to initialize
// the base register.
// MachineBasicBlock::iterator InsertionPt = Entry->begin();
TRI->materializeFrameBaseRegister(Entry, BaseReg, FrameIdx,
InstrOffset);
// The base register already includes any offset specified
// by the instruction, so account for that so it doesn't get
// applied twice.
Offset = -InstrOffset;
++NumBaseRegisters;
UsedBaseReg = true;
}
assert(BaseReg != 0 && "Unable to allocate virtual base register!");
// Modify the instruction to use the new base register rather
// than the frame index operand.
TRI->resolveFrameIndex(*I, BaseReg, Offset);
DEBUG(dbgs() << "Resolved: " << *MI);
++NumReplacements;
}
return UsedBaseReg;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/PHIEliminationUtils.cpp | //===-- PHIEliminationUtils.cpp - Helper functions for PHI elimination ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "PHIEliminationUtils.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
// findCopyInsertPoint - Find a safe place in MBB to insert a copy from SrcReg
// when following the CFG edge to SuccMBB. This needs to be after any def of
// SrcReg, but before any subsequent point where control flow might jump out of
// the basic block.
MachineBasicBlock::iterator
llvm::findPHICopyInsertPoint(MachineBasicBlock* MBB, MachineBasicBlock* SuccMBB,
unsigned SrcReg) {
// Handle the trivial case trivially.
if (MBB->empty())
return MBB->begin();
// Usually, we just want to insert the copy before the first terminator
// instruction. However, for the edge going to a landing pad, we must insert
// the copy before the call/invoke instruction.
if (!SuccMBB->isLandingPad())
return MBB->getFirstTerminator();
// Discover any defs/uses in this basic block.
SmallPtrSet<MachineInstr*, 8> DefUsesInMBB;
MachineRegisterInfo& MRI = MBB->getParent()->getRegInfo();
for (MachineInstr &RI : MRI.reg_instructions(SrcReg)) {
if (RI.getParent() == MBB)
DefUsesInMBB.insert(&RI);
}
MachineBasicBlock::iterator InsertPoint;
if (DefUsesInMBB.empty()) {
// No defs. Insert the copy at the start of the basic block.
InsertPoint = MBB->begin();
} else if (DefUsesInMBB.size() == 1) {
// Insert the copy immediately after the def/use.
InsertPoint = *DefUsesInMBB.begin();
++InsertPoint;
} else {
// Insert the copy immediately after the last def/use.
InsertPoint = MBB->end();
while (!DefUsesInMBB.count(&*--InsertPoint)) {}
++InsertPoint;
}
// Make sure the copy goes after any phi nodes however.
return MBB->SkipPHIsAndLabels(InsertPoint);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/AggressiveAntiDepBreaker.cpp | //===----- AggressiveAntiDepBreaker.cpp - Anti-dep breaker ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the AggressiveAntiDepBreaker class, which
// implements register anti-dependence breaking during post-RA
// scheduling. It attempts to break all anti-dependencies within a
// block.
//
//===----------------------------------------------------------------------===//
#include "AggressiveAntiDepBreaker.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "post-RA-sched"
// If DebugDiv > 0 then only break antidep with (ID % DebugDiv) == DebugMod
static cl::opt<int>
DebugDiv("agg-antidep-debugdiv",
cl::desc("Debug control for aggressive anti-dep breaker"),
cl::init(0), cl::Hidden);
static cl::opt<int>
DebugMod("agg-antidep-debugmod",
cl::desc("Debug control for aggressive anti-dep breaker"),
cl::init(0), cl::Hidden);
AggressiveAntiDepState::AggressiveAntiDepState(const unsigned TargetRegs,
MachineBasicBlock *BB) :
NumTargetRegs(TargetRegs), GroupNodes(TargetRegs, 0),
GroupNodeIndices(TargetRegs, 0),
KillIndices(TargetRegs, 0),
DefIndices(TargetRegs, 0)
{
const unsigned BBSize = BB->size();
for (unsigned i = 0; i < NumTargetRegs; ++i) {
// Initialize all registers to be in their own group. Initially we
// assign the register to the same-indexed GroupNode.
GroupNodeIndices[i] = i;
// Initialize the indices to indicate that no registers are live.
KillIndices[i] = ~0u;
DefIndices[i] = BBSize;
}
}
unsigned AggressiveAntiDepState::GetGroup(unsigned Reg) {
unsigned Node = GroupNodeIndices[Reg];
while (GroupNodes[Node] != Node)
Node = GroupNodes[Node];
return Node;
}
void AggressiveAntiDepState::GetGroupRegs(
unsigned Group,
std::vector<unsigned> &Regs,
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference> *RegRefs)
{
for (unsigned Reg = 0; Reg != NumTargetRegs; ++Reg) {
if ((GetGroup(Reg) == Group) && (RegRefs->count(Reg) > 0))
Regs.push_back(Reg);
}
}
unsigned AggressiveAntiDepState::UnionGroups(unsigned Reg1, unsigned Reg2)
{
assert(GroupNodes[0] == 0 && "GroupNode 0 not parent!");
assert(GroupNodeIndices[0] == 0 && "Reg 0 not in Group 0!");
// find group for each register
unsigned Group1 = GetGroup(Reg1);
unsigned Group2 = GetGroup(Reg2);
// if either group is 0, then that must become the parent
unsigned Parent = (Group1 == 0) ? Group1 : Group2;
unsigned Other = (Parent == Group1) ? Group2 : Group1;
GroupNodes.at(Other) = Parent;
return Parent;
}
unsigned AggressiveAntiDepState::LeaveGroup(unsigned Reg)
{
// Create a new GroupNode for Reg. Reg's existing GroupNode must
// stay as is because there could be other GroupNodes referring to
// it.
unsigned idx = GroupNodes.size();
GroupNodes.push_back(idx);
GroupNodeIndices[Reg] = idx;
return idx;
}
bool AggressiveAntiDepState::IsLive(unsigned Reg)
{
// KillIndex must be defined and DefIndex not defined for a register
// to be live.
return((KillIndices[Reg] != ~0u) && (DefIndices[Reg] == ~0u));
}
AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
MachineFunction &MFi, const RegisterClassInfo &RCI,
TargetSubtargetInfo::RegClassVector &CriticalPathRCs)
: AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
TII(MF.getSubtarget().getInstrInfo()),
TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI),
State(nullptr) {
/* Collect a bitset of all registers that are only broken if they
are on the critical path. */
for (unsigned i = 0, e = CriticalPathRCs.size(); i < e; ++i) {
BitVector CPSet = TRI->getAllocatableSet(MF, CriticalPathRCs[i]);
if (CriticalPathSet.none())
CriticalPathSet = CPSet;
else
CriticalPathSet |= CPSet;
}
DEBUG(dbgs() << "AntiDep Critical-Path Registers:");
DEBUG(for (int r = CriticalPathSet.find_first(); r != -1;
r = CriticalPathSet.find_next(r))
dbgs() << " " << TRI->getName(r));
DEBUG(dbgs() << '\n');
}
AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() {
delete State;
}
void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
assert(!State);
State = new AggressiveAntiDepState(TRI->getNumRegs(), BB);
bool IsReturnBlock = (!BB->empty() && BB->back().isReturn());
std::vector<unsigned> &KillIndices = State->GetKillIndices();
std::vector<unsigned> &DefIndices = State->GetDefIndices();
// Examine the live-in regs of all successors.
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
unsigned Reg = *AI;
State->UnionGroups(Reg, 0);
KillIndices[Reg] = BB->size();
DefIndices[Reg] = ~0u;
}
}
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
// callee-saved register that is not saved in the prolog.
const MachineFrameInfo *MFI = MF.getFrameInfo();
BitVector Pristine = MFI->getPristineRegs(MF);
for (const MCPhysReg *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
unsigned Reg = *I;
if (!IsReturnBlock && !Pristine.test(Reg)) continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
unsigned AliasReg = *AI;
State->UnionGroups(AliasReg, 0);
KillIndices[AliasReg] = BB->size();
DefIndices[AliasReg] = ~0u;
}
}
}
void AggressiveAntiDepBreaker::FinishBlock() {
delete State;
State = nullptr;
}
void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
unsigned InsertPosIndex) {
assert(Count < InsertPosIndex && "Instruction index out of expected range!");
std::set<unsigned> PassthruRegs;
GetPassthruRegs(MI, PassthruRegs);
PrescanInstruction(MI, Count, PassthruRegs);
ScanInstruction(MI, Count);
DEBUG(dbgs() << "Observe: ");
DEBUG(MI->dump());
DEBUG(dbgs() << "\tRegs:");
std::vector<unsigned> &DefIndices = State->GetDefIndices();
for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
// If Reg is current live, then mark that it can't be renamed as
// we don't know the extent of its live-range anymore (now that it
// has been scheduled). If it is not live but was defined in the
// previous schedule region, then set its def index to the most
// conservative location (i.e. the beginning of the previous
// schedule region).
if (State->IsLive(Reg)) {
DEBUG(if (State->GetGroup(Reg) != 0)
dbgs() << " " << TRI->getName(Reg) << "=g" <<
State->GetGroup(Reg) << "->g0(region live-out)");
State->UnionGroups(Reg, 0);
} else if ((DefIndices[Reg] < InsertPosIndex)
&& (DefIndices[Reg] >= Count)) {
DefIndices[Reg] = Count;
}
}
DEBUG(dbgs() << '\n');
}
bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr *MI,
MachineOperand& MO)
{
if (!MO.isReg() || !MO.isImplicit())
return false;
unsigned Reg = MO.getReg();
if (Reg == 0)
return false;
MachineOperand *Op = nullptr;
if (MO.isDef())
Op = MI->findRegisterUseOperand(Reg, true);
else
Op = MI->findRegisterDefOperand(Reg);
return(Op && Op->isImplicit());
}
void AggressiveAntiDepBreaker::GetPassthruRegs(MachineInstr *MI,
std::set<unsigned>& PassthruRegs) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
if ((MO.isDef() && MI->isRegTiedToUseOperand(i)) ||
IsImplicitDefUse(MI, MO)) {
const unsigned Reg = MO.getReg();
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
PassthruRegs.insert(*SubRegs);
}
}
}
/// AntiDepEdges - Return in Edges the anti- and output- dependencies
/// in SU that we want to consider for breaking.
static void AntiDepEdges(const SUnit *SU, std::vector<const SDep*>& Edges) {
SmallSet<unsigned, 4> RegSet;
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
if ((P->getKind() == SDep::Anti) || (P->getKind() == SDep::Output)) {
if (RegSet.insert(P->getReg()).second)
Edges.push_back(&*P);
}
}
}
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static const SUnit *CriticalPathStep(const SUnit *SU) {
const SDep *Next = nullptr;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
if (SU) {
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
const SUnit *PredSU = P->getSUnit();
unsigned PredLatency = P->getLatency();
unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
// In the case of a latency tie, prefer an anti-dependency edge over
// other types of edges.
if (NextDepth < PredTotalLatency ||
(NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
NextDepth = PredTotalLatency;
Next = &*P;
}
}
}
return (Next) ? Next->getSUnit() : nullptr;
}
void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
const char *tag,
const char *header,
const char *footer) {
std::vector<unsigned> &KillIndices = State->GetKillIndices();
std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
// FIXME: We must leave subregisters of live super registers as live, so that
// we don't clear out the register tracking information for subregisters of
// super registers we're still tracking (and with which we're unioning
// subregister definitions).
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI)) {
DEBUG(if (!header && footer) dbgs() << footer);
return;
}
if (!State->IsLive(Reg)) {
KillIndices[Reg] = KillIdx;
DefIndices[Reg] = ~0u;
RegRefs.erase(Reg);
State->LeaveGroup(Reg);
DEBUG(if (header) {
dbgs() << header << TRI->getName(Reg); header = nullptr; });
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
}
// Repeat for subregisters.
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubregReg = *SubRegs;
if (!State->IsLive(SubregReg)) {
KillIndices[SubregReg] = KillIdx;
DefIndices[SubregReg] = ~0u;
RegRefs.erase(SubregReg);
State->LeaveGroup(SubregReg);
DEBUG(if (header) {
dbgs() << header << TRI->getName(Reg); header = nullptr; });
DEBUG(dbgs() << " " << TRI->getName(SubregReg) << "->g" <<
State->GetGroup(SubregReg) << tag);
}
}
DEBUG(if (!header && footer) dbgs() << footer);
}
void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
unsigned Count,
std::set<unsigned>& PassthruRegs) {
std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
// Handle dead defs by simulating a last-use of the register just
// after the def. A dead def can occur because the def is truly
// dead, or because only a subregister is live at the def. If we
// don't do this the dead def will be incorrectly merged into the
// previous def.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
HandleLastUse(Reg, Count + 1, "", "\tDead Def: ", "\n");
}
DEBUG(dbgs() << "\tDef Groups:");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
DEBUG(dbgs() << " " << TRI->getName(Reg) << "=g" << State->GetGroup(Reg));
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
if (MI->isCall() || MI->hasExtraDefRegAllocReq() ||
TII->isPredicated(MI)) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
// Any aliased that are live at this point are completely or
// partially defined here, so group those aliases with Reg.
for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
unsigned AliasReg = *AI;
if (State->IsLive(AliasReg)) {
State->UnionGroups(Reg, AliasReg);
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via " <<
TRI->getName(AliasReg) << ")");
}
}
// Note register reference...
const TargetRegisterClass *RC = nullptr;
if (i < MI->getDesc().getNumOperands())
RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
RegRefs.insert(std::make_pair(Reg, RR));
}
DEBUG(dbgs() << '\n');
// Scan the register defs for this instruction and update
// live-ranges.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
// Ignore KILLs and passthru registers for liveness...
if (MI->isKill() || (PassthruRegs.count(Reg) != 0))
continue;
// Update def for Reg and aliases.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
// We need to be careful here not to define already-live super registers.
// If the super register is already live, then this definition is not
// a definition of the whole super register (just a partial insertion
// into it). Earlier subregister definitions (which we've not yet visited
// because we're iterating bottom-up) need to be linked to the same group
// as this definition.
if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI))
continue;
DefIndices[*AI] = Count;
}
}
}
void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
unsigned Count) {
DEBUG(dbgs() << "\tUse Groups:");
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
// If MI's uses have special allocation requirement, don't allow
// any use registers to be changed. Also assume all registers
// used in a call must not be changed (ABI).
// FIXME: The issue with predicated instruction is more complex. We are being
// conservatively here because the kill markers cannot be trusted after
// if-conversion:
// %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
// ...
// STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
// %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
// STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
//
// The first R6 kill is not really a kill since it's killed by a predicated
// instruction which may not be executed. The second R6 def may or may not
// re-define R6 so it's not safe to change it since the last R6 use cannot be
// changed.
bool Special = MI->isCall() ||
MI->hasExtraSrcRegAllocReq() ||
TII->isPredicated(MI);
// Scan the register uses for this instruction and update
// live-ranges, groups and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
DEBUG(dbgs() << " " << TRI->getName(Reg) << "=g" <<
State->GetGroup(Reg));
// It wasn't previously live but now it is, this is a kill. Forget
// the previous live-range information and start a new live-range
// for the register.
HandleLastUse(Reg, Count, "(last-use)");
if (Special) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
// Note register reference...
const TargetRegisterClass *RC = nullptr;
if (i < MI->getDesc().getNumOperands())
RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
RegRefs.insert(std::make_pair(Reg, RR));
}
DEBUG(dbgs() << '\n');
// Form a group of all defs and uses of a KILL instruction to ensure
// that all registers are renamed as a group.
if (MI->isKill()) {
DEBUG(dbgs() << "\tKill Group:");
unsigned FirstReg = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (FirstReg != 0) {
DEBUG(dbgs() << "=" << TRI->getName(Reg));
State->UnionGroups(FirstReg, Reg);
} else {
DEBUG(dbgs() << " " << TRI->getName(Reg));
FirstReg = Reg;
}
}
DEBUG(dbgs() << "->g" << State->GetGroup(FirstReg) << '\n');
}
}
BitVector AggressiveAntiDepBreaker::GetRenameRegisters(unsigned Reg) {
BitVector BV(TRI->getNumRegs(), false);
bool first = true;
// Check all references that need rewriting for Reg. For each, use
// the corresponding register class to narrow the set of registers
// that are appropriate for renaming.
std::pair<std::multimap<unsigned,
AggressiveAntiDepState::RegisterReference>::iterator,
std::multimap<unsigned,
AggressiveAntiDepState::RegisterReference>::iterator>
Range = State->GetRegRefs().equal_range(Reg);
for (std::multimap<unsigned,
AggressiveAntiDepState::RegisterReference>::iterator Q = Range.first,
QE = Range.second; Q != QE; ++Q) {
const TargetRegisterClass *RC = Q->second.RC;
if (!RC) continue;
BitVector RCBV = TRI->getAllocatableSet(MF, RC);
if (first) {
BV |= RCBV;
first = false;
} else {
BV &= RCBV;
}
DEBUG(dbgs() << " " << TRI->getRegClassName(RC));
}
return BV;
}
bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
unsigned AntiDepGroupIndex,
RenameOrderType& RenameOrder,
std::map<unsigned, unsigned> &RenameMap) {
std::vector<unsigned> &KillIndices = State->GetKillIndices();
std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
// Collect all referenced registers in the same group as
// AntiDepReg. These all need to be renamed together if we are to
// break the anti-dependence.
std::vector<unsigned> Regs;
State->GetGroupRegs(AntiDepGroupIndex, Regs, &RegRefs);
assert(Regs.size() > 0 && "Empty register group!");
if (Regs.size() == 0)
return false;
// Find the "superest" register in the group. At the same time,
// collect the BitVector of registers that can be used to rename
// each register.
DEBUG(dbgs() << "\tRename Candidates for Group g" << AntiDepGroupIndex
<< ":\n");
std::map<unsigned, BitVector> RenameRegisterMap;
unsigned SuperReg = 0;
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
unsigned Reg = Regs[i];
if ((SuperReg == 0) || TRI->isSuperRegister(SuperReg, Reg))
SuperReg = Reg;
// If Reg has any references, then collect possible rename regs
if (RegRefs.count(Reg) > 0) {
DEBUG(dbgs() << "\t\t" << TRI->getName(Reg) << ":");
BitVector BV = GetRenameRegisters(Reg);
RenameRegisterMap.insert(std::pair<unsigned, BitVector>(Reg, BV));
DEBUG(dbgs() << " ::");
DEBUG(for (int r = BV.find_first(); r != -1; r = BV.find_next(r))
dbgs() << " " << TRI->getName(r));
DEBUG(dbgs() << "\n");
}
}
// All group registers should be a subreg of SuperReg.
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
unsigned Reg = Regs[i];
if (Reg == SuperReg) continue;
bool IsSub = TRI->isSubRegister(SuperReg, Reg);
// FIXME: remove this once PR18663 has been properly fixed. For now,
// return a conservative answer:
// assert(IsSub && "Expecting group subregister");
if (!IsSub)
return false;
}
#ifndef NDEBUG
// If DebugDiv > 0 then only rename (renamecnt % DebugDiv) == DebugMod
if (DebugDiv > 0) {
static int renamecnt = 0;
if (renamecnt++ % DebugDiv != DebugMod)
return false;
dbgs() << "*** Performing rename " << TRI->getName(SuperReg) <<
" for debug ***\n";
}
#endif
// Check each possible rename register for SuperReg in round-robin
// order. If that register is available, and the corresponding
// registers are available for the other group subregisters, then we
// can use those registers to rename.
// FIXME: Using getMinimalPhysRegClass is very conservative. We should
// check every use of the register and find the largest register class
// that can be used in all of them.
const TargetRegisterClass *SuperRC =
TRI->getMinimalPhysRegClass(SuperReg, MVT::Other);
ArrayRef<MCPhysReg> Order = RegClassInfo.getOrder(SuperRC);
if (Order.empty()) {
DEBUG(dbgs() << "\tEmpty Super Regclass!!\n");
return false;
}
DEBUG(dbgs() << "\tFind Registers:");
RenameOrder.insert(RenameOrderType::value_type(SuperRC, Order.size()));
unsigned OrigR = RenameOrder[SuperRC];
unsigned EndR = ((OrigR == Order.size()) ? 0 : OrigR);
unsigned R = OrigR;
do {
if (R == 0) R = Order.size();
--R;
const unsigned NewSuperReg = Order[R];
// Don't consider non-allocatable registers
if (!MRI.isAllocatable(NewSuperReg)) continue;
// Don't replace a register with itself.
if (NewSuperReg == SuperReg) continue;
DEBUG(dbgs() << " [" << TRI->getName(NewSuperReg) << ':');
RenameMap.clear();
// For each referenced group register (which must be a SuperReg or
// a subregister of SuperReg), find the corresponding subregister
// of NewSuperReg and make sure it is free to be renamed.
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
unsigned Reg = Regs[i];
unsigned NewReg = 0;
if (Reg == SuperReg) {
NewReg = NewSuperReg;
} else {
unsigned NewSubRegIdx = TRI->getSubRegIndex(SuperReg, Reg);
if (NewSubRegIdx != 0)
NewReg = TRI->getSubReg(NewSuperReg, NewSubRegIdx);
}
DEBUG(dbgs() << " " << TRI->getName(NewReg));
// Check if Reg can be renamed to NewReg.
BitVector BV = RenameRegisterMap[Reg];
if (!BV.test(NewReg)) {
DEBUG(dbgs() << "(no rename)");
goto next_super_reg;
}
// If NewReg is dead and NewReg's most recent def is not before
// Regs's kill, it's safe to replace Reg with NewReg. We
// must also check all aliases of NewReg, because we can't define a
// register when any sub or super is already live.
if (State->IsLive(NewReg) || (KillIndices[Reg] > DefIndices[NewReg])) {
DEBUG(dbgs() << "(live)");
goto next_super_reg;
} else {
bool found = false;
for (MCRegAliasIterator AI(NewReg, TRI, false); AI.isValid(); ++AI) {
unsigned AliasReg = *AI;
if (State->IsLive(AliasReg) ||
(KillIndices[Reg] > DefIndices[AliasReg])) {
DEBUG(dbgs() << "(alias " << TRI->getName(AliasReg) << " live)");
found = true;
break;
}
}
if (found)
goto next_super_reg;
}
// We cannot rename 'Reg' to 'NewReg' if one of the uses of 'Reg' also
// defines 'NewReg' via an early-clobber operand.
auto Range = RegRefs.equal_range(Reg);
for (auto Q = Range.first, QE = Range.second; Q != QE; ++Q) {
auto UseMI = Q->second.Operand->getParent();
int Idx = UseMI->findRegisterDefOperandIdx(NewReg, false, true, TRI);
if (Idx == -1)
continue;
if (UseMI->getOperand(Idx).isEarlyClobber()) {
DEBUG(dbgs() << "(ec)");
goto next_super_reg;
}
}
// Record that 'Reg' can be renamed to 'NewReg'.
RenameMap.insert(std::pair<unsigned, unsigned>(Reg, NewReg));
}
// If we fall-out here, then every register in the group can be
// renamed, as recorded in RenameMap.
RenameOrder.erase(SuperRC);
RenameOrder.insert(RenameOrderType::value_type(SuperRC, R));
DEBUG(dbgs() << "]\n");
return true;
next_super_reg:
DEBUG(dbgs() << ']');
} while (R != EndR);
DEBUG(dbgs() << '\n');
// No registers are free and available!
return false;
}
/// BreakAntiDependencies - Identifiy anti-dependencies within the
/// ScheduleDAG and break them by renaming registers.
///
unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
const std::vector<SUnit>& SUnits,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
DbgValueVector &DbgValues) {
std::vector<unsigned> &KillIndices = State->GetKillIndices();
std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
// The code below assumes that there is at least one instruction,
// so just duck out immediately if the block is empty.
if (SUnits.empty()) return 0;
// For each regclass the next register to use for renaming.
RenameOrderType RenameOrder;
// ...need a map from MI to SUnit.
std::map<MachineInstr *, const SUnit *> MISUnitMap;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
MISUnitMap.insert(std::pair<MachineInstr *, const SUnit *>(SU->getInstr(),
SU));
}
// Track progress along the critical path through the SUnit graph as
// we walk the instructions. This is needed for regclasses that only
// break critical-path anti-dependencies.
const SUnit *CriticalPathSU = nullptr;
MachineInstr *CriticalPathMI = nullptr;
if (CriticalPathSet.any()) {
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
if (!CriticalPathSU ||
((SU->getDepth() + SU->Latency) >
(CriticalPathSU->getDepth() + CriticalPathSU->Latency))) {
CriticalPathSU = SU;
}
}
CriticalPathMI = CriticalPathSU->getInstr();
}
#ifndef NDEBUG
DEBUG(dbgs() << "\n===== Aggressive anti-dependency breaking\n");
DEBUG(dbgs() << "Available regs:");
for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
if (!State->IsLive(Reg))
DEBUG(dbgs() << " " << TRI->getName(Reg));
}
DEBUG(dbgs() << '\n');
#endif
// Attempt to break anti-dependence edges. Walk the instructions
// from the bottom up, tracking information about liveness as we go
// to help determine which registers are available.
unsigned Broken = 0;
unsigned Count = InsertPosIndex - 1;
for (MachineBasicBlock::iterator I = End, E = Begin;
I != E; --Count) {
MachineInstr *MI = --I;
if (MI->isDebugValue())
continue;
DEBUG(dbgs() << "Anti: ");
DEBUG(MI->dump());
std::set<unsigned> PassthruRegs;
GetPassthruRegs(MI, PassthruRegs);
// Process the defs in MI...
PrescanInstruction(MI, Count, PassthruRegs);
// The dependence edges that represent anti- and output-
// dependencies that are candidates for breaking.
std::vector<const SDep *> Edges;
const SUnit *PathSU = MISUnitMap[MI];
AntiDepEdges(PathSU, Edges);
// If MI is not on the critical path, then we don't rename
// registers in the CriticalPathSet.
BitVector *ExcludeRegs = nullptr;
if (MI == CriticalPathMI) {
CriticalPathSU = CriticalPathStep(CriticalPathSU);
CriticalPathMI = (CriticalPathSU) ? CriticalPathSU->getInstr() : nullptr;
} else if (CriticalPathSet.any()) {
ExcludeRegs = &CriticalPathSet;
}
// Ignore KILL instructions (they form a group in ScanInstruction
// but don't cause any anti-dependence breaking themselves)
if (!MI->isKill()) {
// Attempt to break each anti-dependency...
for (unsigned i = 0, e = Edges.size(); i != e; ++i) {
const SDep *Edge = Edges[i];
SUnit *NextSU = Edge->getSUnit();
if ((Edge->getKind() != SDep::Anti) &&
(Edge->getKind() != SDep::Output)) continue;
unsigned AntiDepReg = Edge->getReg();
DEBUG(dbgs() << "\tAntidep reg: " << TRI->getName(AntiDepReg));
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
if (!MRI.isAllocatable(AntiDepReg)) {
// Don't break anti-dependencies on non-allocatable registers.
DEBUG(dbgs() << " (non-allocatable)\n");
continue;
} else if (ExcludeRegs && ExcludeRegs->test(AntiDepReg)) {
// Don't break anti-dependencies for critical path registers
// if not on the critical path
DEBUG(dbgs() << " (not critical-path)\n");
continue;
} else if (PassthruRegs.count(AntiDepReg) != 0) {
// If the anti-dep register liveness "passes-thru", then
// don't try to change it. It will be changed along with
// the use if required to break an earlier antidep.
DEBUG(dbgs() << " (passthru)\n");
continue;
} else {
// No anti-dep breaking for implicit deps
MachineOperand *AntiDepOp = MI->findRegisterDefOperand(AntiDepReg);
assert(AntiDepOp && "Can't find index for defined register operand");
if (!AntiDepOp || AntiDepOp->isImplicit()) {
DEBUG(dbgs() << " (implicit)\n");
continue;
}
// If the SUnit has other dependencies on the SUnit that
// it anti-depends on, don't bother breaking the
// anti-dependency since those edges would prevent such
// units from being scheduled past each other
// regardless.
//
// Also, if there are dependencies on other SUnits with the
// same register as the anti-dependency, don't attempt to
// break it.
for (SUnit::const_pred_iterator P = PathSU->Preds.begin(),
PE = PathSU->Preds.end(); P != PE; ++P) {
if (P->getSUnit() == NextSU ?
(P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
(P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
AntiDepReg = 0;
break;
}
}
for (SUnit::const_pred_iterator P = PathSU->Preds.begin(),
PE = PathSU->Preds.end(); P != PE; ++P) {
if ((P->getSUnit() == NextSU) && (P->getKind() != SDep::Anti) &&
(P->getKind() != SDep::Output)) {
DEBUG(dbgs() << " (real dependency)\n");
AntiDepReg = 0;
break;
} else if ((P->getSUnit() != NextSU) &&
(P->getKind() == SDep::Data) &&
(P->getReg() == AntiDepReg)) {
DEBUG(dbgs() << " (other dependency)\n");
AntiDepReg = 0;
break;
}
}
if (AntiDepReg == 0) continue;
}
assert(AntiDepReg != 0);
if (AntiDepReg == 0) continue;
// Determine AntiDepReg's register group.
const unsigned GroupIndex = State->GetGroup(AntiDepReg);
if (GroupIndex == 0) {
DEBUG(dbgs() << " (zero group)\n");
continue;
}
DEBUG(dbgs() << '\n');
// Look for a suitable register to use to break the anti-dependence.
std::map<unsigned, unsigned> RenameMap;
if (FindSuitableFreeRegisters(GroupIndex, RenameOrder, RenameMap)) {
DEBUG(dbgs() << "\tBreaking anti-dependence edge on "
<< TRI->getName(AntiDepReg) << ":");
// Handle each group register...
for (std::map<unsigned, unsigned>::iterator
S = RenameMap.begin(), E = RenameMap.end(); S != E; ++S) {
unsigned CurrReg = S->first;
unsigned NewReg = S->second;
DEBUG(dbgs() << " " << TRI->getName(CurrReg) << "->" <<
TRI->getName(NewReg) << "(" <<
RegRefs.count(CurrReg) << " refs)");
// Update the references to the old register CurrReg to
// refer to the new register NewReg.
std::pair<std::multimap<unsigned,
AggressiveAntiDepState::RegisterReference>::iterator,
std::multimap<unsigned,
AggressiveAntiDepState::RegisterReference>::iterator>
Range = RegRefs.equal_range(CurrReg);
for (std::multimap<unsigned,
AggressiveAntiDepState::RegisterReference>::iterator
Q = Range.first, QE = Range.second; Q != QE; ++Q) {
Q->second.Operand->setReg(NewReg);
// If the SU for the instruction being updated has debug
// information related to the anti-dependency register, make
// sure to update that as well.
const SUnit *SU = MISUnitMap[Q->second.Operand->getParent()];
if (!SU) continue;
for (DbgValueVector::iterator DVI = DbgValues.begin(),
DVE = DbgValues.end(); DVI != DVE; ++DVI)
if (DVI->second == Q->second.Operand->getParent())
UpdateDbgValue(DVI->first, AntiDepReg, NewReg);
}
// We just went back in time and modified history; the
// liveness information for CurrReg is now inconsistent. Set
// the state as if it were dead.
State->UnionGroups(NewReg, 0);
RegRefs.erase(NewReg);
DefIndices[NewReg] = DefIndices[CurrReg];
KillIndices[NewReg] = KillIndices[CurrReg];
State->UnionGroups(CurrReg, 0);
RegRefs.erase(CurrReg);
DefIndices[CurrReg] = KillIndices[CurrReg];
KillIndices[CurrReg] = ~0u;
assert(((KillIndices[CurrReg] == ~0u) !=
(DefIndices[CurrReg] == ~0u)) &&
"Kill and Def maps aren't consistent for AntiDepReg!");
}
++Broken;
DEBUG(dbgs() << '\n');
}
}
}
ScanInstruction(MI, Count);
}
return Broken;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/CalcSpillWeights.cpp | //===------------------------ CalcSpillWeights.cpp ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "calcspillweights"
void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS,
MachineFunction &MF,
const MachineLoopInfo &MLI,
const MachineBlockFrequencyInfo &MBFI,
VirtRegAuxInfo::NormalizingFn norm) {
DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
<< "********** Function: " << MF.getName() << '\n');
MachineRegisterInfo &MRI = MF.getRegInfo();
VirtRegAuxInfo VRAI(MF, LIS, MLI, MBFI, norm);
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (MRI.reg_nodbg_empty(Reg))
continue;
VRAI.calculateSpillWeightAndHint(LIS.getInterval(Reg));
}
}
// Return the preferred allocation register for reg, given a COPY instruction.
static unsigned copyHint(const MachineInstr *mi, unsigned reg,
const TargetRegisterInfo &tri,
const MachineRegisterInfo &mri) {
unsigned sub, hreg, hsub;
if (mi->getOperand(0).getReg() == reg) {
sub = mi->getOperand(0).getSubReg();
hreg = mi->getOperand(1).getReg();
hsub = mi->getOperand(1).getSubReg();
} else {
sub = mi->getOperand(1).getSubReg();
hreg = mi->getOperand(0).getReg();
hsub = mi->getOperand(0).getSubReg();
}
if (!hreg)
return 0;
if (TargetRegisterInfo::isVirtualRegister(hreg))
return sub == hsub ? hreg : 0;
const TargetRegisterClass *rc = mri.getRegClass(reg);
// Only allow physreg hints in rc.
if (sub == 0)
return rc->contains(hreg) ? hreg : 0;
// reg:sub should match the physreg hreg.
return tri.getMatchingSuperReg(hreg, sub, rc);
}
// Check if all values in LI are rematerializable
static bool isRematerializable(const LiveInterval &LI,
const LiveIntervals &LIS,
const TargetInstrInfo &TII) {
for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
I != E; ++I) {
const VNInfo *VNI = *I;
if (VNI->isUnused())
continue;
if (VNI->isPHIDef())
return false;
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
assert(MI && "Dead valno in interval");
if (!TII.isTriviallyReMaterializable(MI, LIS.getAliasAnalysis()))
return false;
}
return true;
}
void
VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
MachineRegisterInfo &mri = MF.getRegInfo();
const TargetRegisterInfo &tri = *MF.getSubtarget().getRegisterInfo();
MachineBasicBlock *mbb = nullptr;
MachineLoop *loop = nullptr;
bool isExiting = false;
float totalWeight = 0;
unsigned numInstr = 0; // Number of instructions using li
SmallPtrSet<MachineInstr*, 8> visited;
// Find the best physreg hint and the best virtreg hint.
float bestPhys = 0, bestVirt = 0;
unsigned hintPhys = 0, hintVirt = 0;
// Don't recompute a target specific hint.
bool noHint = mri.getRegAllocationHint(li.reg).first != 0;
// Don't recompute spill weight for an unspillable register.
bool Spillable = li.isSpillable();
for (MachineRegisterInfo::reg_instr_iterator
I = mri.reg_instr_begin(li.reg), E = mri.reg_instr_end();
I != E; ) {
MachineInstr *mi = &*(I++);
numInstr++;
if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
continue;
if (!visited.insert(mi).second)
continue;
float weight = 1.0f;
if (Spillable) {
// Get loop info for mi.
if (mi->getParent() != mbb) {
mbb = mi->getParent();
loop = Loops.getLoopFor(mbb);
isExiting = loop ? loop->isLoopExiting(mbb) : false;
}
// Calculate instr weight.
bool reads, writes;
std::tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
weight = LiveIntervals::getSpillWeight(
writes, reads, &MBFI, mi);
// Give extra weight to what looks like a loop induction variable update.
if (writes && isExiting && LIS.isLiveOutOfMBB(li, mbb))
weight *= 3;
totalWeight += weight;
}
// Get allocation hints from copies.
if (noHint || !mi->isCopy())
continue;
unsigned hint = copyHint(mi, li.reg, tri, mri);
if (!hint)
continue;
// Force hweight onto the stack so that x86 doesn't add hidden precision,
// making the comparison incorrectly pass (i.e., 1 > 1 == true??).
//
// FIXME: we probably shouldn't use floats at all.
volatile float hweight = Hint[hint] += weight;
if (TargetRegisterInfo::isPhysicalRegister(hint)) {
if (hweight > bestPhys && mri.isAllocatable(hint))
bestPhys = hweight, hintPhys = hint;
} else {
if (hweight > bestVirt)
bestVirt = hweight, hintVirt = hint;
}
}
Hint.clear();
// Always prefer the physreg hint.
if (unsigned hint = hintPhys ? hintPhys : hintVirt) {
mri.setRegAllocationHint(li.reg, 0, hint);
// Weakly boost the spill weight of hinted registers.
totalWeight *= 1.01F;
}
// If the live interval was already unspillable, leave it that way.
if (!Spillable)
return;
// Mark li as unspillable if all live ranges are tiny.
if (li.isZeroLength(LIS.getSlotIndexes())) {
li.markNotSpillable();
return;
}
// If all of the definitions of the interval are re-materializable,
// it is a preferred candidate for spilling.
// FIXME: this gets much more complicated once we support non-trivial
// re-materialization.
if (isRematerializable(li, LIS, *MF.getSubtarget().getInstrInfo()))
totalWeight *= 0.5F;
li.weight = normalize(totalWeight, li.getSize(), numInstr);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/BranchFolding.cpp | //===-- BranchFolding.cpp - Fold machine code branch instructions ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass forwards branches to unconditional branches to make them branch
// directly to the target block. This pass often results in dead MBB's, which
// it then removes.
//
// Note that this pass must be run after register allocation, it cannot handle
// SSA form.
//
//===----------------------------------------------------------------------===//
#include "BranchFolding.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "branchfolding"
STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
STATISTIC(NumBranchOpts, "Number of branches optimized");
STATISTIC(NumTailMerge , "Number of block tails merged");
STATISTIC(NumHoist , "Number of times common instructions are hoisted");
static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge",
cl::init(cl::BOU_UNSET), cl::Hidden);
// Throttle for huge numbers of predecessors (compile speed problems)
static cl::opt<unsigned>
TailMergeThreshold("tail-merge-threshold",
cl::desc("Max number of predecessors to consider tail merging"),
cl::init(150), cl::Hidden);
// Heuristic for tail merging (and, inversely, tail duplication).
// TODO: This should be replaced with a target query.
static cl::opt<unsigned>
TailMergeSize("tail-merge-size",
cl::desc("Min number of instructions to consider tail merging"),
cl::init(3), cl::Hidden);
namespace {
/// BranchFolderPass - Wrap branch folder in a machine function pass.
class BranchFolderPass : public MachineFunctionPass {
public:
static char ID;
explicit BranchFolderPass(): MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
}
char BranchFolderPass::ID = 0;
char &llvm::BranchFolderPassID = BranchFolderPass::ID;
INITIALIZE_PASS(BranchFolderPass, "branch-folder",
"Control Flow Optimizer", false, false)
bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
// TailMerge can create jump into if branches that make CFG irreducible for
// HW that requires structurized CFG.
bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
PassConfig->getEnableTailMerge();
BranchFolder Folder(EnableTailMerge, /*CommonHoist=*/true,
getAnalysis<MachineBlockFrequencyInfo>(),
getAnalysis<MachineBranchProbabilityInfo>());
return Folder.OptimizeFunction(MF, MF.getSubtarget().getInstrInfo(),
MF.getSubtarget().getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>());
}
BranchFolder::BranchFolder(bool defaultEnableTailMerge, bool CommonHoist,
const MachineBlockFrequencyInfo &FreqInfo,
const MachineBranchProbabilityInfo &ProbInfo)
: EnableHoistCommonCode(CommonHoist), MBBFreqInfo(FreqInfo),
MBPI(ProbInfo) {
switch (FlagEnableTailMerge) {
case cl::BOU_UNSET: EnableTailMerge = defaultEnableTailMerge; break;
case cl::BOU_TRUE: EnableTailMerge = true; break;
case cl::BOU_FALSE: EnableTailMerge = false; break;
}
}
/// RemoveDeadBlock - Remove the specified dead machine basic block from the
/// function, updating the CFG.
void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
assert(MBB->pred_empty() && "MBB must be dead!");
DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
MachineFunction *MF = MBB->getParent();
// drop all successors.
while (!MBB->succ_empty())
MBB->removeSuccessor(MBB->succ_end()-1);
// Avoid matching if this pointer gets reused.
TriedMerging.erase(MBB);
// Remove the block.
MF->erase(MBB);
}
/// OptimizeImpDefsBlock - If a basic block is just a bunch of implicit_def
/// followed by terminators, and if the implicitly defined registers are not
/// used by the terminators, remove those implicit_def's. e.g.
/// BB1:
/// r0 = implicit_def
/// r1 = implicit_def
/// br
/// This block can be optimized away later if the implicit instructions are
/// removed.
bool BranchFolder::OptimizeImpDefsBlock(MachineBasicBlock *MBB) {
SmallSet<unsigned, 4> ImpDefRegs;
MachineBasicBlock::iterator I = MBB->begin();
while (I != MBB->end()) {
if (!I->isImplicitDef())
break;
unsigned Reg = I->getOperand(0).getReg();
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
ImpDefRegs.insert(*SubRegs);
++I;
}
if (ImpDefRegs.empty())
return false;
MachineBasicBlock::iterator FirstTerm = I;
while (I != MBB->end()) {
if (!TII->isUnpredicatedTerminator(I))
return false;
// See if it uses any of the implicitly defined registers.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
MachineOperand &MO = I->getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (ImpDefRegs.count(Reg))
return false;
}
++I;
}
I = MBB->begin();
while (I != FirstTerm) {
MachineInstr *ImpDefMI = &*I;
++I;
MBB->erase(ImpDefMI);
}
return true;
}
/// OptimizeFunction - Perhaps branch folding, tail merging and other
/// CFG optimizations on the given function.
bool BranchFolder::OptimizeFunction(MachineFunction &MF,
const TargetInstrInfo *tii,
const TargetRegisterInfo *tri,
MachineModuleInfo *mmi) {
if (!tii) return false;
TriedMerging.clear();
TII = tii;
TRI = tri;
MMI = mmi;
RS = nullptr;
// Use a RegScavenger to help update liveness when required.
MachineRegisterInfo &MRI = MF.getRegInfo();
if (MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF))
RS = new RegScavenger();
else
MRI.invalidateLiveness();
// Fix CFG. The later algorithms expect it to be right.
bool MadeChange = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; I++) {
MachineBasicBlock *MBB = I, *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, true))
MadeChange |= MBB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
MadeChange |= OptimizeImpDefsBlock(MBB);
}
bool MadeChangeThisIteration = true;
while (MadeChangeThisIteration) {
MadeChangeThisIteration = TailMergeBlocks(MF);
MadeChangeThisIteration |= OptimizeBranches(MF);
if (EnableHoistCommonCode)
MadeChangeThisIteration |= HoistCommonCode(MF);
MadeChange |= MadeChangeThisIteration;
}
// See if any jump tables have become dead as the code generator
// did its thing.
MachineJumpTableInfo *JTI = MF.getJumpTableInfo();
if (!JTI) {
delete RS;
return MadeChange;
}
// Walk the function to find jump tables that are live.
BitVector JTIsLive(JTI->getJumpTables().size());
for (MachineFunction::iterator BB = MF.begin(), E = MF.end();
BB != E; ++BB) {
for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
I != E; ++I)
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
MachineOperand &Op = I->getOperand(op);
if (!Op.isJTI()) continue;
// Remember that this JT is live.
JTIsLive.set(Op.getIndex());
}
}
// Finally, remove dead jump tables. This happens when the
// indirect jump was unreachable (and thus deleted).
for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i)
if (!JTIsLive.test(i)) {
JTI->RemoveJumpTable(i);
MadeChange = true;
}
delete RS;
return MadeChange;
}
//===----------------------------------------------------------------------===//
// Tail Merging of Blocks
//===----------------------------------------------------------------------===//
/// HashMachineInstr - Compute a hash value for MI and its operands.
static unsigned HashMachineInstr(const MachineInstr *MI) {
unsigned Hash = MI->getOpcode();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &Op = MI->getOperand(i);
// Merge in bits from the operand if easy. We can't use MachineOperand's
// hash_code here because it's not deterministic and we sort by hash value
// later.
unsigned OperandHash = 0;
switch (Op.getType()) {
case MachineOperand::MO_Register:
OperandHash = Op.getReg();
break;
case MachineOperand::MO_Immediate:
OperandHash = Op.getImm();
break;
case MachineOperand::MO_MachineBasicBlock:
OperandHash = Op.getMBB()->getNumber();
break;
case MachineOperand::MO_FrameIndex:
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_JumpTableIndex:
OperandHash = Op.getIndex();
break;
case MachineOperand::MO_GlobalAddress:
case MachineOperand::MO_ExternalSymbol:
// Global address / external symbol are too hard, don't bother, but do
// pull in the offset.
OperandHash = Op.getOffset();
break;
default:
break;
}
Hash += ((OperandHash << 3) | Op.getType()) << (i & 31);
}
return Hash;
}
/// HashEndOfMBB - Hash the last instruction in the MBB.
static unsigned HashEndOfMBB(const MachineBasicBlock *MBB) {
MachineBasicBlock::const_iterator I = MBB->getLastNonDebugInstr();
if (I == MBB->end())
return 0;
return HashMachineInstr(I);
}
/// ComputeCommonTailLength - Given two machine basic blocks, compute the number
/// of instructions they actually have in common together at their end. Return
/// iterators for the first shared instruction in each block.
static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
MachineBasicBlock *MBB2,
MachineBasicBlock::iterator &I1,
MachineBasicBlock::iterator &I2) {
I1 = MBB1->end();
I2 = MBB2->end();
unsigned TailLen = 0;
while (I1 != MBB1->begin() && I2 != MBB2->begin()) {
--I1; --I2;
// Skip debugging pseudos; necessary to avoid changing the code.
while (I1->isDebugValue()) {
if (I1==MBB1->begin()) {
while (I2->isDebugValue()) {
if (I2==MBB2->begin())
// I1==DBG at begin; I2==DBG at begin
return TailLen;
--I2;
}
++I2;
// I1==DBG at begin; I2==non-DBG, or first of DBGs not at begin
return TailLen;
}
--I1;
}
// I1==first (untested) non-DBG preceding known match
while (I2->isDebugValue()) {
if (I2==MBB2->begin()) {
++I1;
// I1==non-DBG, or first of DBGs not at begin; I2==DBG at begin
return TailLen;
}
--I2;
}
// I1, I2==first (untested) non-DBGs preceding known match
if (!I1->isIdenticalTo(I2) ||
// FIXME: This check is dubious. It's used to get around a problem where
// people incorrectly expect inline asm directives to remain in the same
// relative order. This is untenable because normal compiler
// optimizations (like this one) may reorder and/or merge these
// directives.
I1->isInlineAsm()) {
++I1; ++I2;
break;
}
++TailLen;
}
// Back past possible debugging pseudos at beginning of block. This matters
// when one block differs from the other only by whether debugging pseudos
// are present at the beginning. (This way, the various checks later for
// I1==MBB1->begin() work as expected.)
if (I1 == MBB1->begin() && I2 != MBB2->begin()) {
--I2;
while (I2->isDebugValue()) {
if (I2 == MBB2->begin())
return TailLen;
--I2;
}
++I2;
}
if (I2 == MBB2->begin() && I1 != MBB1->begin()) {
--I1;
while (I1->isDebugValue()) {
if (I1 == MBB1->begin())
return TailLen;
--I1;
}
++I1;
}
return TailLen;
}
void BranchFolder::MaintainLiveIns(MachineBasicBlock *CurMBB,
MachineBasicBlock *NewMBB) {
if (RS) {
RS->enterBasicBlock(CurMBB);
if (!CurMBB->empty())
RS->forward(std::prev(CurMBB->end()));
for (unsigned int i = 1, e = TRI->getNumRegs(); i != e; i++)
if (RS->isRegUsed(i, false))
NewMBB->addLiveIn(i);
}
}
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
/// after it, replacing it with an unconditional branch to NewDest.
void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
MachineBasicBlock *NewDest) {
MachineBasicBlock *CurMBB = OldInst->getParent();
TII->ReplaceTailWithBranchTo(OldInst, NewDest);
// For targets that use the register scavenger, we must maintain LiveIns.
MaintainLiveIns(CurMBB, NewDest);
++NumTailMerge;
}
/// SplitMBBAt - Given a machine basic block and an iterator into it, split the
/// MBB so that the part before the iterator falls into the part starting at the
/// iterator. This returns the new MBB.
MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
MachineBasicBlock::iterator BBI1,
const BasicBlock *BB) {
if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
return nullptr;
MachineFunction &MF = *CurMBB.getParent();
// Create the fall-through block.
MachineFunction::iterator MBBI = &CurMBB;
MachineBasicBlock *NewMBB =MF.CreateMachineBasicBlock(BB);
CurMBB.getParent()->insert(++MBBI, NewMBB);
// Move all the successors of this block to the specified block.
NewMBB->transferSuccessors(&CurMBB);
// Add an edge from CurMBB to NewMBB for the fall-through.
CurMBB.addSuccessor(NewMBB);
// Splice the code over.
NewMBB->splice(NewMBB->end(), &CurMBB, BBI1, CurMBB.end());
// NewMBB inherits CurMBB's block frequency.
MBBFreqInfo.setBlockFreq(NewMBB, MBBFreqInfo.getBlockFreq(&CurMBB));
// For targets that use the register scavenger, we must maintain LiveIns.
MaintainLiveIns(&CurMBB, NewMBB);
return NewMBB;
}
/// EstimateRuntime - Make a rough estimate for how long it will take to run
/// the specified code.
static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E) {
unsigned Time = 0;
for (; I != E; ++I) {
if (I->isDebugValue())
continue;
if (I->isCall())
Time += 10;
else if (I->mayLoad() || I->mayStore())
Time += 2;
else
++Time;
}
return Time;
}
// CurMBB needs to add an unconditional branch to SuccMBB (we removed these
// branches temporarily for tail merging). In the case where CurMBB ends
// with a conditional branch to the next block, optimize by reversing the
// test and conditionally branching to SuccMBB instead.
static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
const TargetInstrInfo *TII) {
MachineFunction *MF = CurMBB->getParent();
MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB));
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
DebugLoc dl; // FIXME: this is nowhere
if (I != MF->end() &&
!TII->AnalyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
MachineBasicBlock *NextBB = I;
if (TBB == NextBB && !Cond.empty() && !FBB) {
if (!TII->ReverseBranchCondition(Cond)) {
TII->RemoveBranch(*CurMBB);
TII->InsertBranch(*CurMBB, SuccBB, nullptr, Cond, dl);
return;
}
}
}
TII->InsertBranch(*CurMBB, SuccBB, nullptr,
SmallVector<MachineOperand, 0>(), dl);
}
bool
BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
if (getHash() < o.getHash())
return true;
if (getHash() > o.getHash())
return false;
if (getBlock()->getNumber() < o.getBlock()->getNumber())
return true;
if (getBlock()->getNumber() > o.getBlock()->getNumber())
return false;
// _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
// an object with itself.
#ifndef _GLIBCXX_DEBUG
llvm_unreachable("Predecessor appears twice");
#else
return false;
#endif
}
BlockFrequency
BranchFolder::MBFIWrapper::getBlockFreq(const MachineBasicBlock *MBB) const {
auto I = MergedBBFreq.find(MBB);
if (I != MergedBBFreq.end())
return I->second;
return MBFI.getBlockFreq(MBB);
}
void BranchFolder::MBFIWrapper::setBlockFreq(const MachineBasicBlock *MBB,
BlockFrequency F) {
MergedBBFreq[MBB] = F;
}
/// CountTerminators - Count the number of terminators in the given
/// block and set I to the position of the first non-terminator, if there
/// is one, or MBB->end() otherwise.
static unsigned CountTerminators(MachineBasicBlock *MBB,
MachineBasicBlock::iterator &I) {
I = MBB->end();
unsigned NumTerms = 0;
for (;;) {
if (I == MBB->begin()) {
I = MBB->end();
break;
}
--I;
if (!I->isTerminator()) break;
++NumTerms;
}
return NumTerms;
}
/// ProfitableToMerge - Check if two machine basic blocks have a common tail
/// and decide if it would be profitable to merge those tails. Return the
/// length of the common tail and iterators to the first common instruction
/// in each block.
static bool ProfitableToMerge(MachineBasicBlock *MBB1,
MachineBasicBlock *MBB2,
unsigned minCommonTailLength,
unsigned &CommonTailLen,
MachineBasicBlock::iterator &I1,
MachineBasicBlock::iterator &I2,
MachineBasicBlock *SuccBB,
MachineBasicBlock *PredBB) {
CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2);
if (CommonTailLen == 0)
return false;
DEBUG(dbgs() << "Common tail length of BB#" << MBB1->getNumber()
<< " and BB#" << MBB2->getNumber() << " is " << CommonTailLen
<< '\n');
// It's almost always profitable to merge any number of non-terminator
// instructions with the block that falls through into the common successor.
if (MBB1 == PredBB || MBB2 == PredBB) {
MachineBasicBlock::iterator I;
unsigned NumTerms = CountTerminators(MBB1 == PredBB ? MBB2 : MBB1, I);
if (CommonTailLen > NumTerms)
return true;
}
// If one of the blocks can be completely merged and happens to be in
// a position where the other could fall through into it, merge any number
// of instructions, because it can be done without a branch.
// TODO: If the blocks are not adjacent, move one of them so that they are?
if (MBB1->isLayoutSuccessor(MBB2) && I2 == MBB2->begin())
return true;
if (MBB2->isLayoutSuccessor(MBB1) && I1 == MBB1->begin())
return true;
// If both blocks have an unconditional branch temporarily stripped out,
// count that as an additional common instruction for the following
// heuristics.
unsigned EffectiveTailLen = CommonTailLen;
if (SuccBB && MBB1 != PredBB && MBB2 != PredBB &&
!MBB1->back().isBarrier() &&
!MBB2->back().isBarrier())
++EffectiveTailLen;
// Check if the common tail is long enough to be worthwhile.
if (EffectiveTailLen >= minCommonTailLength)
return true;
// If we are optimizing for code size, 2 instructions in common is enough if
// we don't have to split a block. At worst we will be introducing 1 new
// branch instruction, which is likely to be smaller than the 2
// instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent();
if (EffectiveTailLen >= 2 &&
MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
(I1 == MBB1->begin() || I2 == MBB2->begin()))
return true;
return false;
}
/// ComputeSameTails - Look through all the blocks in MergePotentials that have
/// hash CurHash (guaranteed to match the last element). Build the vector
/// SameTails of all those that have the (same) largest number of instructions
/// in common of any pair of these blocks. SameTails entries contain an
/// iterator into MergePotentials (from which the MachineBasicBlock can be
/// found) and a MachineBasicBlock::iterator into that MBB indicating the
/// instruction where the matching code sequence begins.
/// Order of elements in SameTails is the reverse of the order in which
/// those blocks appear in MergePotentials (where they are not necessarily
/// consecutive).
unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
unsigned minCommonTailLength,
MachineBasicBlock *SuccBB,
MachineBasicBlock *PredBB) {
unsigned maxCommonTailLength = 0U;
SameTails.clear();
MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
MPIterator HighestMPIter = std::prev(MergePotentials.end());
for (MPIterator CurMPIter = std::prev(MergePotentials.end()),
B = MergePotentials.begin();
CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) {
for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) {
unsigned CommonTailLen;
if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(),
minCommonTailLength,
CommonTailLen, TrialBBI1, TrialBBI2,
SuccBB, PredBB)) {
if (CommonTailLen > maxCommonTailLength) {
SameTails.clear();
maxCommonTailLength = CommonTailLen;
HighestMPIter = CurMPIter;
SameTails.push_back(SameTailElt(CurMPIter, TrialBBI1));
}
if (HighestMPIter == CurMPIter &&
CommonTailLen == maxCommonTailLength)
SameTails.push_back(SameTailElt(I, TrialBBI2));
}
if (I == B)
break;
}
}
return maxCommonTailLength;
}
/// RemoveBlocksWithHash - Remove all blocks with hash CurHash from
/// MergePotentials, restoring branches at ends of blocks as appropriate.
void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
MachineBasicBlock *SuccBB,
MachineBasicBlock *PredBB) {
MPIterator CurMPIter, B;
for (CurMPIter = std::prev(MergePotentials.end()),
B = MergePotentials.begin();
CurMPIter->getHash() == CurHash; --CurMPIter) {
// Put the unconditional branch back, if we need one.
MachineBasicBlock *CurMBB = CurMPIter->getBlock();
if (SuccBB && CurMBB != PredBB)
FixTail(CurMBB, SuccBB, TII);
if (CurMPIter == B)
break;
}
if (CurMPIter->getHash() != CurHash)
CurMPIter++;
MergePotentials.erase(CurMPIter, MergePotentials.end());
}
/// CreateCommonTailOnlyBlock - None of the blocks to be tail-merged consist
/// only of the common tail. Create a block that does by splitting one.
bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
MachineBasicBlock *SuccBB,
unsigned maxCommonTailLength,
unsigned &commonTailIndex) {
commonTailIndex = 0;
unsigned TimeEstimate = ~0U;
for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
// Use PredBB if possible; that doesn't require a new branch.
if (SameTails[i].getBlock() == PredBB) {
commonTailIndex = i;
break;
}
// Otherwise, make a (fairly bogus) choice based on estimate of
// how long it will take the various blocks to execute.
unsigned t = EstimateRuntime(SameTails[i].getBlock()->begin(),
SameTails[i].getTailStartPos());
if (t <= TimeEstimate) {
TimeEstimate = t;
commonTailIndex = i;
}
}
MachineBasicBlock::iterator BBI =
SameTails[commonTailIndex].getTailStartPos();
MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
// If the common tail includes any debug info we will take it pretty
// randomly from one of the inputs. Might be better to remove it?
DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size "
<< maxCommonTailLength);
// If the split block unconditionally falls-thru to SuccBB, it will be
// merged. In control flow terms it should then take SuccBB's name. e.g. If
// SuccBB is an inner loop, the common tail is still part of the inner loop.
const BasicBlock *BB = (SuccBB && MBB->succ_size() == 1) ?
SuccBB->getBasicBlock() : MBB->getBasicBlock();
MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI, BB);
if (!newMBB) {
DEBUG(dbgs() << "... failed!");
return false;
}
SameTails[commonTailIndex].setBlock(newMBB);
SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
// If we split PredBB, newMBB is the new predecessor.
if (PredBB == MBB)
PredBB = newMBB;
return true;
}
static bool hasIdenticalMMOs(const MachineInstr *MI1, const MachineInstr *MI2) {
auto I1 = MI1->memoperands_begin(), E1 = MI1->memoperands_end();
auto I2 = MI2->memoperands_begin(), E2 = MI2->memoperands_end();
if ((E1 - I1) != (E2 - I2))
return false;
for (; I1 != E1; ++I1, ++I2) {
if (**I1 != **I2)
return false;
}
return true;
}
static void
removeMMOsFromMemoryOperations(MachineBasicBlock::iterator MBBIStartPos,
MachineBasicBlock &MBBCommon) {
// Remove MMOs from memory operations in the common block
// when they do not match the ones from the block being tail-merged.
// This ensures later passes conservatively compute dependencies.
MachineBasicBlock *MBB = MBBIStartPos->getParent();
// Note CommonTailLen does not necessarily matches the size of
// the common BB nor all its instructions because of debug
// instructions differences.
unsigned CommonTailLen = 0;
for (auto E = MBB->end(); MBBIStartPos != E; ++MBBIStartPos)
++CommonTailLen;
MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin();
MachineBasicBlock::reverse_iterator MBBIE = MBB->rend();
MachineBasicBlock::reverse_iterator MBBICommon = MBBCommon.rbegin();
MachineBasicBlock::reverse_iterator MBBIECommon = MBBCommon.rend();
while (CommonTailLen--) {
assert(MBBI != MBBIE && "Reached BB end within common tail length!");
(void)MBBIE;
if (MBBI->isDebugValue()) {
++MBBI;
continue;
}
while ((MBBICommon != MBBIECommon) && MBBICommon->isDebugValue())
++MBBICommon;
assert(MBBICommon != MBBIECommon &&
"Reached BB end within common tail length!");
assert(MBBICommon->isIdenticalTo(&*MBBI) && "Expected matching MIIs!");
if (MBBICommon->mayLoad() || MBBICommon->mayStore())
if (!hasIdenticalMMOs(&*MBBI, &*MBBICommon))
MBBICommon->clearMemRefs();
++MBBI;
++MBBICommon;
}
}
// See if any of the blocks in MergePotentials (which all have a common single
// successor, or all have no successor) can be tail-merged. If there is a
// successor, any blocks in MergePotentials that are not tail-merged and
// are not immediately before Succ must have an unconditional branch to
// Succ added (but the predecessor/successor lists need no adjustment).
// The lone predecessor of Succ that falls through into Succ,
// if any, is given in PredBB.
bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
MachineBasicBlock *PredBB) {
bool MadeChange = false;
// Except for the special cases below, tail-merge if there are at least
// this many instructions in common.
unsigned minCommonTailLength = TailMergeSize;
DEBUG(dbgs() << "\nTryTailMergeBlocks: ";
for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
dbgs() << "BB#" << MergePotentials[i].getBlock()->getNumber()
<< (i == e-1 ? "" : ", ");
dbgs() << "\n";
if (SuccBB) {
dbgs() << " with successor BB#" << SuccBB->getNumber() << '\n';
if (PredBB)
dbgs() << " which has fall-through from BB#"
<< PredBB->getNumber() << "\n";
}
dbgs() << "Looking for common tails of at least "
<< minCommonTailLength << " instruction"
<< (minCommonTailLength == 1 ? "" : "s") << '\n';
);
// Sort by hash value so that blocks with identical end sequences sort
// together.
array_pod_sort(MergePotentials.begin(), MergePotentials.end());
// Walk through equivalence sets looking for actual exact matches.
while (MergePotentials.size() > 1) {
unsigned CurHash = MergePotentials.back().getHash();
// Build SameTails, identifying the set of blocks with this hash code
// and with the maximum number of instructions in common.
unsigned maxCommonTailLength = ComputeSameTails(CurHash,
minCommonTailLength,
SuccBB, PredBB);
// If we didn't find any pair that has at least minCommonTailLength
// instructions in common, remove all blocks with this hash code and retry.
if (SameTails.empty()) {
RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
continue;
}
// If one of the blocks is the entire common tail (and not the entry
// block, which we can't jump to), we can treat all blocks with this same
// tail at once. Use PredBB if that is one of the possibilities, as that
// will not introduce any extra branches.
MachineBasicBlock *EntryBB = MergePotentials.begin()->getBlock()->
getParent()->begin();
unsigned commonTailIndex = SameTails.size();
// If there are two blocks, check to see if one can be made to fall through
// into the other.
if (SameTails.size() == 2 &&
SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
SameTails[1].tailIsWholeBlock())
commonTailIndex = 1;
else if (SameTails.size() == 2 &&
SameTails[1].getBlock()->isLayoutSuccessor(
SameTails[0].getBlock()) &&
SameTails[0].tailIsWholeBlock())
commonTailIndex = 0;
else {
// Otherwise just pick one, favoring the fall-through predecessor if
// there is one.
for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
MachineBasicBlock *MBB = SameTails[i].getBlock();
if (MBB == EntryBB && SameTails[i].tailIsWholeBlock())
continue;
if (MBB == PredBB) {
commonTailIndex = i;
break;
}
if (SameTails[i].tailIsWholeBlock())
commonTailIndex = i;
}
}
if (commonTailIndex == SameTails.size() ||
(SameTails[commonTailIndex].getBlock() == PredBB &&
!SameTails[commonTailIndex].tailIsWholeBlock())) {
// None of the blocks consist entirely of the common tail.
// Split a block so that one does.
if (!CreateCommonTailOnlyBlock(PredBB, SuccBB,
maxCommonTailLength, commonTailIndex)) {
RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
continue;
}
}
MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
// Recompute commont tail MBB's edge weights and block frequency.
setCommonTailEdgeWeights(*MBB);
// MBB is common tail. Adjust all other BB's to jump to this one.
// Traversal must be forwards so erases work.
DEBUG(dbgs() << "\nUsing common tail in BB#" << MBB->getNumber()
<< " for ");
for (unsigned int i=0, e = SameTails.size(); i != e; ++i) {
if (commonTailIndex == i)
continue;
DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber()
<< (i == e-1 ? "" : ", "));
// Remove MMOs from memory operations as needed.
removeMMOsFromMemoryOperations(SameTails[i].getTailStartPos(), *MBB);
// Hack the end off BB i, making it jump to BB commonTailIndex instead.
ReplaceTailWithBranchTo(SameTails[i].getTailStartPos(), MBB);
// BB i is no longer a predecessor of SuccBB; remove it from the worklist.
MergePotentials.erase(SameTails[i].getMPIter());
}
DEBUG(dbgs() << "\n");
// We leave commonTailIndex in the worklist in case there are other blocks
// that match it with a smaller number of instructions.
MadeChange = true;
}
return MadeChange;
}
bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
bool MadeChange = false;
if (!EnableTailMerge) return MadeChange;
// First find blocks with no successors.
MergePotentials.clear();
for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E && MergePotentials.size() < TailMergeThreshold; ++I) {
if (TriedMerging.count(I))
continue;
if (I->succ_empty())
MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(I), I));
}
// If this is a large problem, avoid visiting the same basic blocks
// multiple times.
if (MergePotentials.size() == TailMergeThreshold)
for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
TriedMerging.insert(MergePotentials[i].getBlock());
// See if we can do any tail merging on those.
if (MergePotentials.size() >= 2)
MadeChange |= TryTailMergeBlocks(nullptr, nullptr);
// Look at blocks (IBB) with multiple predecessors (PBB).
// We change each predecessor to a canonical form, by
// (1) temporarily removing any unconditional branch from the predecessor
// to IBB, and
// (2) alter conditional branches so they branch to the other block
// not IBB; this may require adding back an unconditional branch to IBB
// later, where there wasn't one coming in. E.g.
// Bcc IBB
// fallthrough to QBB
// here becomes
// Bncc QBB
// with a conceptual B to IBB after that, which never actually exists.
// With those changes, we see whether the predecessors' tails match,
// and merge them if so. We change things out of canonical form and
// back to the way they were later in the process. (OptimizeBranches
// would undo some of this, but we can't use it, because we'd get into
// a compile-time infinite loop repeatedly doing and undoing the same
// transformations.)
for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
I != E; ++I) {
if (I->pred_size() < 2) continue;
SmallPtrSet<MachineBasicBlock *, 8> UniquePreds;
MachineBasicBlock *IBB = I;
MachineBasicBlock *PredBB = std::prev(I);
MergePotentials.clear();
for (MachineBasicBlock::pred_iterator P = I->pred_begin(),
E2 = I->pred_end();
P != E2 && MergePotentials.size() < TailMergeThreshold; ++P) {
MachineBasicBlock *PBB = *P;
if (TriedMerging.count(PBB))
continue;
// Skip blocks that loop to themselves, can't tail merge these.
if (PBB == IBB)
continue;
// Visit each predecessor only once.
if (!UniquePreds.insert(PBB).second)
continue;
// Skip blocks which may jump to a landing pad. Can't tail merge these.
if (PBB->getLandingPadSuccessor())
continue;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*PBB, TBB, FBB, Cond, true)) {
// Failing case: IBB is the target of a cbr, and we cannot reverse the
// branch.
SmallVector<MachineOperand, 4> NewCond(Cond);
if (!Cond.empty() && TBB == IBB) {
if (TII->ReverseBranchCondition(NewCond))
continue;
// This is the QBB case described above
if (!FBB)
FBB = std::next(MachineFunction::iterator(PBB));
}
// Failing case: the only way IBB can be reached from PBB is via
// exception handling. Happens for landing pads. Would be nice to have
// a bit in the edge so we didn't have to do all this.
if (IBB->isLandingPad()) {
MachineFunction::iterator IP = PBB; IP++;
MachineBasicBlock *PredNextBB = nullptr;
if (IP != MF.end())
PredNextBB = IP;
if (!TBB) {
if (IBB != PredNextBB) // fallthrough
continue;
} else if (FBB) {
if (TBB != IBB && FBB != IBB) // cbr then ubr
continue;
} else if (Cond.empty()) {
if (TBB != IBB) // ubr
continue;
} else {
if (TBB != IBB && IBB != PredNextBB) // cbr
continue;
}
}
// Remove the unconditional branch at the end, if any.
if (TBB && (Cond.empty() || FBB)) {
DebugLoc dl; // FIXME: this is nowhere
TII->RemoveBranch(*PBB);
if (!Cond.empty())
// reinsert conditional branch only, for now
TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr,
NewCond, dl);
}
MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
}
}
// If this is a large problem, avoid visiting the same basic blocks multiple
// times.
if (MergePotentials.size() == TailMergeThreshold)
for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
TriedMerging.insert(MergePotentials[i].getBlock());
if (MergePotentials.size() >= 2)
MadeChange |= TryTailMergeBlocks(IBB, PredBB);
// Reinsert an unconditional branch if needed. The 1 below can occur as a
// result of removing blocks in TryTailMergeBlocks.
PredBB = std::prev(I); // this may have been changed in TryTailMergeBlocks
if (MergePotentials.size() == 1 &&
MergePotentials.begin()->getBlock() != PredBB)
FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
}
return MadeChange;
}
void BranchFolder::setCommonTailEdgeWeights(MachineBasicBlock &TailMBB) {
SmallVector<BlockFrequency, 2> EdgeFreqLs(TailMBB.succ_size());
BlockFrequency AccumulatedMBBFreq;
// Aggregate edge frequency of successor edge j:
// edgeFreq(j) = sum (freq(bb) * edgeProb(bb, j)),
// where bb is a basic block that is in SameTails.
for (const auto &Src : SameTails) {
const MachineBasicBlock *SrcMBB = Src.getBlock();
BlockFrequency BlockFreq = MBBFreqInfo.getBlockFreq(SrcMBB);
AccumulatedMBBFreq += BlockFreq;
// It is not necessary to recompute edge weights if TailBB has less than two
// successors.
if (TailMBB.succ_size() <= 1)
continue;
auto EdgeFreq = EdgeFreqLs.begin();
for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
SuccI != SuccE; ++SuccI, ++EdgeFreq)
*EdgeFreq += BlockFreq * MBPI.getEdgeProbability(SrcMBB, *SuccI);
}
MBBFreqInfo.setBlockFreq(&TailMBB, AccumulatedMBBFreq);
if (TailMBB.succ_size() <= 1)
return;
auto MaxEdgeFreq = *std::max_element(EdgeFreqLs.begin(), EdgeFreqLs.end());
uint64_t Scale = MaxEdgeFreq.getFrequency() / UINT32_MAX + 1;
auto EdgeFreq = EdgeFreqLs.begin();
for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
SuccI != SuccE; ++SuccI, ++EdgeFreq)
TailMBB.setSuccWeight(SuccI, EdgeFreq->getFrequency() / Scale);
}
//===----------------------------------------------------------------------===//
// Branch Optimization
//===----------------------------------------------------------------------===//
bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
bool MadeChange = false;
// Make sure blocks are numbered in order
MF.RenumberBlocks();
for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
I != E; ) {
MachineBasicBlock *MBB = I++;
MadeChange |= OptimizeBlock(MBB);
// If it is dead, remove it.
if (MBB->pred_empty()) {
RemoveDeadBlock(MBB);
MadeChange = true;
++NumDeadBlocks;
}
}
return MadeChange;
}
// Blocks should be considered empty if they contain only debug info;
// else the debug info would affect codegen.
static bool IsEmptyBlock(MachineBasicBlock *MBB) {
return MBB->getFirstNonDebugInstr() == MBB->end();
}
// Blocks with only debug info and branches should be considered the same
// as blocks with only branches.
static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) {
MachineBasicBlock::iterator I = MBB->getFirstNonDebugInstr();
assert(I != MBB->end() && "empty block!");
return I->isBranch();
}
/// IsBetterFallthrough - Return true if it would be clearly better to
/// fall-through to MBB1 than to fall through into MBB2. This has to return
/// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will
/// result in infinite loops.
static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
MachineBasicBlock *MBB2) {
// Right now, we use a simple heuristic. If MBB2 ends with a call, and
// MBB1 doesn't, we prefer to fall through into MBB1. This allows us to
// optimize branches that branch to either a return block or an assert block
// into a fallthrough to the return.
MachineBasicBlock::iterator MBB1I = MBB1->getLastNonDebugInstr();
MachineBasicBlock::iterator MBB2I = MBB2->getLastNonDebugInstr();
if (MBB1I == MBB1->end() || MBB2I == MBB2->end())
return false;
// If there is a clear successor ordering we make sure that one block
// will fall through to the next
if (MBB1->isSuccessor(MBB2)) return true;
if (MBB2->isSuccessor(MBB1)) return false;
return MBB2I->isCall() && !MBB1I->isCall();
}
/// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch
/// instructions on the block.
static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB) {
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I != MBB.end() && I->isBranch())
return I->getDebugLoc();
return DebugLoc();
}
/// OptimizeBlock - Analyze and optimize control flow related to the specified
/// block. This is never called on the entry block.
bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
bool MadeChange = false;
MachineFunction &MF = *MBB->getParent();
ReoptimizeBlock:
MachineFunction::iterator FallThrough = MBB;
++FallThrough;
// If this block is empty, make everyone use its fall-through, not the block
// explicitly. Landing pads should not do this since the landing-pad table
// points to this block. Blocks with their addresses taken shouldn't be
// optimized away.
if (IsEmptyBlock(MBB) && !MBB->isLandingPad() && !MBB->hasAddressTaken()) {
// Dead block? Leave for cleanup later.
if (MBB->pred_empty()) return MadeChange;
if (FallThrough == MF.end()) {
// TODO: Simplify preds to not branch here if possible!
} else if (FallThrough->isLandingPad()) {
// Don't rewrite to a landing pad fallthough. That could lead to the case
// where a BB jumps to more than one landing pad.
// TODO: Is it ever worth rewriting predecessors which don't already
// jump to a landing pad, and so can safely jump to the fallthrough?
} else {
// Rewrite all predecessors of the old block to go to the fallthrough
// instead.
while (!MBB->pred_empty()) {
MachineBasicBlock *Pred = *(MBB->pred_end()-1);
Pred->ReplaceUsesOfBlockWith(MBB, FallThrough);
}
// If MBB was the target of a jump table, update jump tables to go to the
// fallthrough instead.
if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
MJTI->ReplaceMBBInJumpTables(MBB, FallThrough);
MadeChange = true;
}
return MadeChange;
}
// Check to see if we can simplify the terminator of the block before this
// one.
MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
SmallVector<MachineOperand, 4> PriorCond;
bool PriorUnAnalyzable =
TII->AnalyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
if (!PriorUnAnalyzable) {
// If the CFG for the prior block has extra edges, remove them.
MadeChange |= PrevBB.CorrectExtraCFGEdges(PriorTBB, PriorFBB,
!PriorCond.empty());
// If the previous branch is conditional and both conditions go to the same
// destination, remove the branch, replacing it with an unconditional one or
// a fall-through.
if (PriorTBB && PriorTBB == PriorFBB) {
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
PriorCond.clear();
if (PriorTBB != MBB)
TII->InsertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
}
// If the previous block unconditionally falls through to this block and
// this block has no other predecessors, move the contents of this block
// into the prior block. This doesn't usually happen when SimplifyCFG
// has been used, but it can happen if tail merging splits a fall-through
// predecessor of a block.
// This has to check PrevBB->succ_size() because EH edges are ignored by
// AnalyzeBranch.
if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 &&
PrevBB.succ_size() == 1 &&
!MBB->hasAddressTaken() && !MBB->isLandingPad()) {
DEBUG(dbgs() << "\nMerging into block: " << PrevBB
<< "From MBB: " << *MBB);
// Remove redundant DBG_VALUEs first.
if (PrevBB.begin() != PrevBB.end()) {
MachineBasicBlock::iterator PrevBBIter = PrevBB.end();
--PrevBBIter;
MachineBasicBlock::iterator MBBIter = MBB->begin();
// Check if DBG_VALUE at the end of PrevBB is identical to the
// DBG_VALUE at the beginning of MBB.
while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end()
&& PrevBBIter->isDebugValue() && MBBIter->isDebugValue()) {
if (!MBBIter->isIdenticalTo(PrevBBIter))
break;
MachineInstr *DuplicateDbg = MBBIter;
++MBBIter; -- PrevBBIter;
DuplicateDbg->eraseFromParent();
}
}
PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end());
PrevBB.removeSuccessor(PrevBB.succ_begin());
assert(PrevBB.succ_empty());
PrevBB.transferSuccessors(MBB);
MadeChange = true;
return MadeChange;
}
// If the previous branch *only* branches to *this* block (conditional or
// not) remove the branch.
if (PriorTBB == MBB && !PriorFBB) {
TII->RemoveBranch(PrevBB);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
}
// If the prior block branches somewhere else on the condition and here if
// the condition is false, remove the uncond second branch.
if (PriorFBB == MBB) {
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
}
// If the prior block branches here on true and somewhere else on false, and
// if the branch condition is reversible, reverse the branch to create a
// fall-through.
if (PriorTBB == MBB) {
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
if (!TII->ReverseBranchCondition(NewPriorCond)) {
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
}
}
// If this block has no successors (e.g. it is a return block or ends with
// a call to a no-return function like abort or __cxa_throw) and if the pred
// falls through into this block, and if it would otherwise fall through
// into the block after this, move this block to the end of the function.
//
// We consider it more likely that execution will stay in the function (e.g.
// due to loops) than it is to exit it. This asserts in loops etc, moving
// the assert condition out of the loop body.
if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB &&
MachineFunction::iterator(PriorTBB) == FallThrough &&
!MBB->canFallThrough()) {
bool DoTransform = true;
// We have to be careful that the succs of PredBB aren't both no-successor
// blocks. If neither have successors and if PredBB is the second from
// last block in the function, we'd just keep swapping the two blocks for
// last. Only do the swap if one is clearly better to fall through than
// the other.
if (FallThrough == --MF.end() &&
!IsBetterFallthrough(PriorTBB, MBB))
DoTransform = false;
if (DoTransform) {
// Reverse the branch so we will fall through on the previous true cond.
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
if (!TII->ReverseBranchCondition(NewPriorCond)) {
DEBUG(dbgs() << "\nMoving MBB: " << *MBB
<< "To make fallthrough to: " << *PriorTBB << "\n");
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl);
// Move this block to the end of the function.
MBB->moveAfter(--MF.end());
MadeChange = true;
++NumBranchOpts;
return MadeChange;
}
}
}
}
// Analyze the branch in the current block.
MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
SmallVector<MachineOperand, 4> CurCond;
bool CurUnAnalyzable= TII->AnalyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
if (!CurUnAnalyzable) {
// If the CFG for the prior block has extra edges, remove them.
MadeChange |= MBB->CorrectExtraCFGEdges(CurTBB, CurFBB, !CurCond.empty());
// If this is a two-way branch, and the FBB branches to this block, reverse
// the condition so the single-basic-block loop is faster. Instead of:
// Loop: xxx; jcc Out; jmp Loop
// we want:
// Loop: xxx; jncc Loop; jmp Out
if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) {
SmallVector<MachineOperand, 4> NewCond(CurCond);
if (!TII->ReverseBranchCondition(NewCond)) {
DebugLoc dl = getBranchDebugLoc(*MBB);
TII->RemoveBranch(*MBB);
TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
}
}
// If this branch is the only thing in its block, see if we can forward
// other blocks across it.
if (CurTBB && CurCond.empty() && !CurFBB &&
IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
!MBB->hasAddressTaken()) {
DebugLoc dl = getBranchDebugLoc(*MBB);
// This block may contain just an unconditional branch. Because there can
// be 'non-branch terminators' in the block, try removing the branch and
// then seeing if the block is empty.
TII->RemoveBranch(*MBB);
// If the only things remaining in the block are debug info, remove these
// as well, so this will behave the same as an empty block in non-debug
// mode.
if (IsEmptyBlock(MBB)) {
// Make the block empty, losing the debug info (we could probably
// improve this in some cases.)
MBB->erase(MBB->begin(), MBB->end());
}
// If this block is just an unconditional branch to CurTBB, we can
// usually completely eliminate the block. The only case we cannot
// completely eliminate the block is when the block before this one
// falls through into MBB and we can't understand the prior block's branch
// condition.
if (MBB->empty()) {
bool PredHasNoFallThrough = !PrevBB.canFallThrough();
if (PredHasNoFallThrough || !PriorUnAnalyzable ||
!PrevBB.isSuccessor(MBB)) {
// If the prior block falls through into us, turn it into an
// explicit branch to us to make updates simpler.
if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
PriorTBB != MBB && PriorFBB != MBB) {
if (!PriorTBB) {
assert(PriorCond.empty() && !PriorFBB &&
"Bad branch analysis");
PriorTBB = MBB;
} else {
assert(!PriorFBB && "Machine CFG out of date!");
PriorFBB = MBB;
}
DebugLoc pdl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, pdl);
}
// Iterate through all the predecessors, revectoring each in-turn.
size_t PI = 0;
bool DidChange = false;
bool HasBranchToSelf = false;
while(PI != MBB->pred_size()) {
MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI);
if (PMBB == MBB) {
// If this block has an uncond branch to itself, leave it.
++PI;
HasBranchToSelf = true;
} else {
DidChange = true;
PMBB->ReplaceUsesOfBlockWith(MBB, CurTBB);
// If this change resulted in PMBB ending in a conditional
// branch where both conditions go to the same destination,
// change this to an unconditional branch (and fix the CFG).
MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr;
SmallVector<MachineOperand, 4> NewCurCond;
bool NewCurUnAnalyzable = TII->AnalyzeBranch(*PMBB, NewCurTBB,
NewCurFBB, NewCurCond, true);
if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
DebugLoc pdl = getBranchDebugLoc(*PMBB);
TII->RemoveBranch(*PMBB);
NewCurCond.clear();
TII->InsertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl);
MadeChange = true;
++NumBranchOpts;
PMBB->CorrectExtraCFGEdges(NewCurTBB, nullptr, false);
}
}
}
// Change any jumptables to go to the new MBB.
if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
MJTI->ReplaceMBBInJumpTables(MBB, CurTBB);
if (DidChange) {
++NumBranchOpts;
MadeChange = true;
if (!HasBranchToSelf) return MadeChange;
}
}
}
// Add the branch back if the block is more than just an uncond branch.
TII->InsertBranch(*MBB, CurTBB, nullptr, CurCond, dl);
}
}
// If the prior block doesn't fall through into this block, and if this
// block doesn't fall through into some other block, see if we can find a
// place to move this block where a fall-through will happen.
if (!PrevBB.canFallThrough()) {
// Now we know that there was no fall-through into this block, check to
// see if it has a fall-through into its successor.
bool CurFallsThru = MBB->canFallThrough();
if (!MBB->isLandingPad()) {
// Check all the predecessors of this block. If one of them has no fall
// throughs, move this block right after it.
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
E = MBB->pred_end(); PI != E; ++PI) {
// Analyze the branch at the end of the pred.
MachineBasicBlock *PredBB = *PI;
MachineFunction::iterator PredFallthrough = PredBB; ++PredFallthrough;
MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
if (PredBB != MBB && !PredBB->canFallThrough() &&
!TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true)
&& (!CurFallsThru || !CurTBB || !CurFBB)
&& (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) {
// If the current block doesn't fall through, just move it.
// If the current block can fall through and does not end with a
// conditional branch, we need to append an unconditional jump to
// the (current) next block. To avoid a possible compile-time
// infinite loop, move blocks only backward in this case.
// Also, if there are already 2 branches here, we cannot add a third;
// this means we have the case
// Bcc next
// B elsewhere
// next:
if (CurFallsThru) {
MachineBasicBlock *NextBB =
std::next(MachineFunction::iterator(MBB));
CurCond.clear();
TII->InsertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc());
}
MBB->moveAfter(PredBB);
MadeChange = true;
goto ReoptimizeBlock;
}
}
}
if (!CurFallsThru) {
// Check all successors to see if we can move this block before it.
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
E = MBB->succ_end(); SI != E; ++SI) {
// Analyze the branch at the end of the block before the succ.
MachineBasicBlock *SuccBB = *SI;
MachineFunction::iterator SuccPrev = SuccBB; --SuccPrev;
// If this block doesn't already fall-through to that successor, and if
// the succ doesn't already have a block that can fall through into it,
// and if the successor isn't an EH destination, we can arrange for the
// fallthrough to happen.
if (SuccBB != MBB && &*SuccPrev != MBB &&
!SuccPrev->canFallThrough() && !CurUnAnalyzable &&
!SuccBB->isLandingPad()) {
MBB->moveBefore(SuccBB);
MadeChange = true;
goto ReoptimizeBlock;
}
}
// Okay, there is no really great place to put this block. If, however,
// the block before this one would be a fall-through if this block were
// removed, move this block to the end of the function.
MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr;
SmallVector<MachineOperand, 4> PrevCond;
if (FallThrough != MF.end() &&
!TII->AnalyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) &&
PrevBB.isSuccessor(FallThrough)) {
MBB->moveAfter(--MF.end());
MadeChange = true;
return MadeChange;
}
}
}
return MadeChange;
}
//===----------------------------------------------------------------------===//
// Hoist Common Code
//===----------------------------------------------------------------------===//
/// HoistCommonCode - Hoist common instruction sequences at the start of basic
/// blocks to their common predecessor.
bool BranchFolder::HoistCommonCode(MachineFunction &MF) {
bool MadeChange = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ) {
MachineBasicBlock *MBB = I++;
MadeChange |= HoistCommonCodeInSuccs(MBB);
}
return MadeChange;
}
/// findFalseBlock - BB has a fallthrough. Find its 'false' successor given
/// its 'true' successor.
static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
MachineBasicBlock *TrueBB) {
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
E = BB->succ_end(); SI != E; ++SI) {
MachineBasicBlock *SuccBB = *SI;
if (SuccBB != TrueBB)
return SuccBB;
}
return nullptr;
}
/// findHoistingInsertPosAndDeps - Find the location to move common instructions
/// in successors to. The location is usually just before the terminator,
/// however if the terminator is a conditional branch and its previous
/// instruction is the flag setting instruction, the previous instruction is
/// the preferred location. This function also gathers uses and defs of the
/// instructions from the insertion point to the end of the block. The data is
/// used by HoistCommonCodeInSuccs to ensure safety.
static
MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI,
SmallSet<unsigned,4> &Uses,
SmallSet<unsigned,4> &Defs) {
MachineBasicBlock::iterator Loc = MBB->getFirstTerminator();
if (!TII->isUnpredicatedTerminator(Loc))
return MBB->end();
for (unsigned i = 0, e = Loc->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = Loc->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isUse()) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Uses.insert(*AI);
} else {
if (!MO.isDead())
// Don't try to hoist code in the rare case the terminator defines a
// register that is later used.
return MBB->end();
// If the terminator defines a register, make sure we don't hoist
// the instruction whose def might be clobbered by the terminator.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Defs.insert(*AI);
}
}
if (Uses.empty())
return Loc;
if (Loc == MBB->begin())
return MBB->end();
// The terminator is probably a conditional branch, try not to separate the
// branch from condition setting instruction.
MachineBasicBlock::iterator PI = Loc;
--PI;
while (PI != MBB->begin() && PI->isDebugValue())
--PI;
bool IsDef = false;
for (unsigned i = 0, e = PI->getNumOperands(); !IsDef && i != e; ++i) {
const MachineOperand &MO = PI->getOperand(i);
// If PI has a regmask operand, it is probably a call. Separate away.
if (MO.isRegMask())
return Loc;
if (!MO.isReg() || MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (Uses.count(Reg))
IsDef = true;
}
if (!IsDef)
// The condition setting instruction is not just before the conditional
// branch.
return Loc;
// Be conservative, don't insert instruction above something that may have
// side-effects. And since it's potentially bad to separate flag setting
// instruction from the conditional branch, just abort the optimization
// completely.
// Also avoid moving code above predicated instruction since it's hard to
// reason about register liveness with predicated instruction.
bool DontMoveAcrossStore = true;
if (!PI->isSafeToMove(nullptr, DontMoveAcrossStore) || TII->isPredicated(PI))
return MBB->end();
// Find out what registers are live. Note this routine is ignoring other live
// registers which are only used by instructions in successor blocks.
for (unsigned i = 0, e = PI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = PI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isUse()) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Uses.insert(*AI);
} else {
if (Uses.erase(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
Uses.erase(*SubRegs); // Use sub-registers to be conservative
}
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Defs.insert(*AI);
}
}
return PI;
}
/// HoistCommonCodeInSuccs - If the successors of MBB has common instruction
/// sequence at the start of the function, move the instructions before MBB
/// terminator if it's legal.
bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty())
return false;
if (!FBB) FBB = findFalseBlock(MBB, TBB);
if (!FBB)
// Malformed bcc? True and false blocks are the same?
return false;
// Restrict the optimization to cases where MBB is the only predecessor,
// it is an obvious win.
if (TBB->pred_size() > 1 || FBB->pred_size() > 1)
return false;
// Find a suitable position to hoist the common instructions to. Also figure
// out which registers are used or defined by instructions from the insertion
// point to the end of the block.
SmallSet<unsigned, 4> Uses, Defs;
MachineBasicBlock::iterator Loc =
findHoistingInsertPosAndDeps(MBB, TII, TRI, Uses, Defs);
if (Loc == MBB->end())
return false;
bool HasDups = false;
SmallVector<unsigned, 4> LocalDefs;
SmallSet<unsigned, 4> LocalDefsSet;
MachineBasicBlock::iterator TIB = TBB->begin();
MachineBasicBlock::iterator FIB = FBB->begin();
MachineBasicBlock::iterator TIE = TBB->end();
MachineBasicBlock::iterator FIE = FBB->end();
while (TIB != TIE && FIB != FIE) {
// Skip dbg_value instructions. These do not count.
if (TIB->isDebugValue()) {
while (TIB != TIE && TIB->isDebugValue())
++TIB;
if (TIB == TIE)
break;
}
if (FIB->isDebugValue()) {
while (FIB != FIE && FIB->isDebugValue())
++FIB;
if (FIB == FIE)
break;
}
if (!TIB->isIdenticalTo(FIB, MachineInstr::CheckKillDead))
break;
if (TII->isPredicated(TIB))
// Hard to reason about register liveness with predicated instruction.
break;
bool IsSafe = true;
for (unsigned i = 0, e = TIB->getNumOperands(); i != e; ++i) {
MachineOperand &MO = TIB->getOperand(i);
// Don't attempt to hoist instructions with register masks.
if (MO.isRegMask()) {
IsSafe = false;
break;
}
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isDef()) {
if (Uses.count(Reg)) {
// Avoid clobbering a register that's used by the instruction at
// the point of insertion.
IsSafe = false;
break;
}
if (Defs.count(Reg) && !MO.isDead()) {
// Don't hoist the instruction if the def would be clobber by the
// instruction at the point insertion. FIXME: This is overly
// conservative. It should be possible to hoist the instructions
// in BB2 in the following example:
// BB1:
// r1, eflag = op1 r2, r3
// brcc eflag
//
// BB2:
// r1 = op2, ...
// = op3, r1<kill>
IsSafe = false;
break;
}
} else if (!LocalDefsSet.count(Reg)) {
if (Defs.count(Reg)) {
// Use is defined by the instruction at the point of insertion.
IsSafe = false;
break;
}
if (MO.isKill() && Uses.count(Reg))
// Kills a register that's read by the instruction at the point of
// insertion. Remove the kill marker.
MO.setIsKill(false);
}
}
if (!IsSafe)
break;
bool DontMoveAcrossStore = true;
if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore))
break;
// Remove kills from LocalDefsSet, these registers had short live ranges.
for (unsigned i = 0, e = TIB->getNumOperands(); i != e; ++i) {
MachineOperand &MO = TIB->getOperand(i);
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned Reg = MO.getReg();
if (!Reg || !LocalDefsSet.count(Reg))
continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
LocalDefsSet.erase(*AI);
}
// Track local defs so we can update liveins.
for (unsigned i = 0, e = TIB->getNumOperands(); i != e; ++i) {
MachineOperand &MO = TIB->getOperand(i);
if (!MO.isReg() || !MO.isDef() || MO.isDead())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
LocalDefs.push_back(Reg);
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
LocalDefsSet.insert(*AI);
}
HasDups = true;
++TIB;
++FIB;
}
if (!HasDups)
return false;
MBB->splice(Loc, TBB, TBB->begin(), TIB);
FBB->erase(FBB->begin(), FIB);
// Update livein's.
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Def = LocalDefs[i];
if (LocalDefsSet.count(Def)) {
TBB->addLiveIn(Def);
FBB->addLiveIn(Def);
}
}
++NumHoist;
return true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/FaultMaps.cpp | //===---------------------------- FaultMaps.cpp ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/FaultMaps.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
#define DEBUG_TYPE "faultmaps"
static const int FaultMapVersion = 1;
const char *FaultMaps::WFMP = "Fault Maps: ";
FaultMaps::FaultMaps(AsmPrinter &AP) : AP(AP) {}
void FaultMaps::recordFaultingOp(FaultKind FaultTy,
const MCSymbol *HandlerLabel) {
MCContext &OutContext = AP.OutStreamer->getContext();
MCSymbol *FaultingLabel = OutContext.createTempSymbol();
AP.OutStreamer->EmitLabel(FaultingLabel);
const MCExpr *FaultingOffset = MCBinaryExpr::createSub(
MCSymbolRefExpr::create(FaultingLabel, OutContext),
MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
const MCExpr *HandlerOffset = MCBinaryExpr::createSub(
MCSymbolRefExpr::create(HandlerLabel, OutContext),
MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
FunctionInfos[AP.CurrentFnSym].emplace_back(FaultTy, FaultingOffset,
HandlerOffset);
}
void FaultMaps::serializeToFaultMapSection() {
if (FunctionInfos.empty())
return;
MCContext &OutContext = AP.OutStreamer->getContext();
MCStreamer &OS = *AP.OutStreamer;
// Create the section.
MCSection *FaultMapSection =
OutContext.getObjectFileInfo()->getFaultMapSection();
OS.SwitchSection(FaultMapSection);
// Emit a dummy symbol to force section inclusion.
OS.EmitLabel(OutContext.getOrCreateSymbol(Twine("__LLVM_FaultMaps")));
DEBUG(dbgs() << "********** Fault Map Output **********\n");
// Header
OS.EmitIntValue(FaultMapVersion, 1); // Version.
OS.EmitIntValue(0, 1); // Reserved.
OS.EmitIntValue(0, 2); // Reserved.
DEBUG(dbgs() << WFMP << "#functions = " << FunctionInfos.size() << "\n");
OS.EmitIntValue(FunctionInfos.size(), 4);
DEBUG(dbgs() << WFMP << "functions:\n");
for (const auto &FFI : FunctionInfos)
emitFunctionInfo(FFI.first, FFI.second);
}
void FaultMaps::emitFunctionInfo(const MCSymbol *FnLabel,
const FunctionFaultInfos &FFI) {
MCStreamer &OS = *AP.OutStreamer;
DEBUG(dbgs() << WFMP << " function addr: " << *FnLabel << "\n");
OS.EmitSymbolValue(FnLabel, 8);
DEBUG(dbgs() << WFMP << " #faulting PCs: " << FFI.size() << "\n");
OS.EmitIntValue(FFI.size(), 4);
OS.EmitIntValue(0, 4); // Reserved
for (auto &Fault : FFI) {
DEBUG(dbgs() << WFMP << " fault type: "
<< faultTypeToString(Fault.Kind) << "\n");
OS.EmitIntValue(Fault.Kind, 4);
DEBUG(dbgs() << WFMP << " faulting PC offset: "
<< *Fault.FaultingOffsetExpr << "\n");
OS.EmitValue(Fault.FaultingOffsetExpr, 4);
DEBUG(dbgs() << WFMP << " fault handler PC offset: "
<< *Fault.HandlerOffsetExpr << "\n");
OS.EmitValue(Fault.HandlerOffsetExpr, 4);
}
}
const char *FaultMaps::faultTypeToString(FaultMaps::FaultKind FT) {
switch (FT) {
default:
llvm_unreachable("unhandled fault type!");
case FaultMaps::FaultingLoad:
return "FaultingLoad";
}
}
raw_ostream &llvm::
operator<<(raw_ostream &OS,
const FaultMapParser::FunctionFaultInfoAccessor &FFI) {
OS << "Fault kind: "
<< FaultMaps::faultTypeToString((FaultMaps::FaultKind)FFI.getFaultKind())
<< ", faulting PC offset: " << FFI.getFaultingPCOffset()
<< ", handling PC offset: " << FFI.getHandlerPCOffset();
return OS;
}
raw_ostream &llvm::
operator<<(raw_ostream &OS, const FaultMapParser::FunctionInfoAccessor &FI) {
OS << "FunctionAddress: " << format_hex(FI.getFunctionAddr(), 8)
<< ", NumFaultingPCs: " << FI.getNumFaultingPCs() << "\n";
for (unsigned i = 0, e = FI.getNumFaultingPCs(); i != e; ++i)
OS << FI.getFunctionFaultInfoAt(i) << "\n";
return OS;
}
raw_ostream &llvm::operator<<(raw_ostream &OS, const FaultMapParser &FMP) {
OS << "Version: " << format_hex(FMP.getFaultMapVersion(), 2) << "\n";
OS << "NumFunctions: " << FMP.getNumFunctions() << "\n";
if (FMP.getNumFunctions() == 0)
return OS;
FaultMapParser::FunctionInfoAccessor FI;
for (unsigned i = 0, e = FMP.getNumFunctions(); i != e; ++i) {
FI = (i == 0) ? FMP.getFirstFunctionInfo() : FI.getNextFunctionInfo();
OS << FI;
}
return OS;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveRangeCalc.cpp | //===---- LiveRangeCalc.cpp - Calculate live ranges -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of the LiveRangeCalc class.
//
//===----------------------------------------------------------------------===//
#include "LiveRangeCalc.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
void LiveRangeCalc::resetLiveOutMap() {
unsigned NumBlocks = MF->getNumBlockIDs();
Seen.clear();
Seen.resize(NumBlocks);
Map.resize(NumBlocks);
}
void LiveRangeCalc::reset(const MachineFunction *mf,
SlotIndexes *SI,
MachineDominatorTree *MDT,
VNInfo::Allocator *VNIA) {
MF = mf;
MRI = &MF->getRegInfo();
Indexes = SI;
DomTree = MDT;
Alloc = VNIA;
resetLiveOutMap();
LiveIn.clear();
}
static void createDeadDef(SlotIndexes &Indexes, VNInfo::Allocator &Alloc,
LiveRange &LR, const MachineOperand &MO) {
const MachineInstr *MI = MO.getParent();
SlotIndex DefIdx =
Indexes.getInstructionIndex(MI).getRegSlot(MO.isEarlyClobber());
// Create the def in LR. This may find an existing def.
LR.createDeadDef(DefIdx, Alloc);
}
void LiveRangeCalc::calculate(LiveInterval &LI, bool TrackSubRegs) {
assert(MRI && Indexes && "call reset() first");
// Step 1: Create minimal live segments for every definition of Reg.
// Visit all def operands. If the same instruction has multiple defs of Reg,
// createDeadDef() will deduplicate.
const TargetRegisterInfo &TRI = *MRI->getTargetRegisterInfo();
unsigned Reg = LI.reg;
for (const MachineOperand &MO : MRI->reg_nodbg_operands(Reg)) {
if (!MO.isDef() && !MO.readsReg())
continue;
unsigned SubReg = MO.getSubReg();
if (LI.hasSubRanges() || (SubReg != 0 && TrackSubRegs)) {
unsigned Mask = SubReg != 0 ? TRI.getSubRegIndexLaneMask(SubReg)
: MRI->getMaxLaneMaskForVReg(Reg);
// If this is the first time we see a subregister def, initialize
// subranges by creating a copy of the main range.
if (!LI.hasSubRanges() && !LI.empty()) {
unsigned ClassMask = MRI->getMaxLaneMaskForVReg(Reg);
LI.createSubRangeFrom(*Alloc, ClassMask, LI);
}
for (LiveInterval::SubRange &S : LI.subranges()) {
// A Mask for subregs common to the existing subrange and current def.
unsigned Common = S.LaneMask & Mask;
if (Common == 0)
continue;
// A Mask for subregs covered by the subrange but not the current def.
unsigned LRest = S.LaneMask & ~Mask;
LiveInterval::SubRange *CommonRange;
if (LRest != 0) {
// Split current subrange into Common and LRest ranges.
S.LaneMask = LRest;
CommonRange = LI.createSubRangeFrom(*Alloc, Common, S);
} else {
assert(Common == S.LaneMask);
CommonRange = &S;
}
if (MO.isDef())
createDeadDef(*Indexes, *Alloc, *CommonRange, MO);
Mask &= ~Common;
}
// Create a new SubRange for subregs we did not cover yet.
if (Mask != 0) {
LiveInterval::SubRange *NewRange = LI.createSubRange(*Alloc, Mask);
if (MO.isDef())
createDeadDef(*Indexes, *Alloc, *NewRange, MO);
}
}
// Create the def in the main liverange. We do not have to do this if
// subranges are tracked as we recreate the main range later in this case.
if (MO.isDef() && !LI.hasSubRanges())
createDeadDef(*Indexes, *Alloc, LI, MO);
}
// We may have created empty live ranges for partially undefined uses, we
// can't keep them because we won't find defs in them later.
LI.removeEmptySubRanges();
// Step 2: Extend live segments to all uses, constructing SSA form as
// necessary.
if (LI.hasSubRanges()) {
for (LiveInterval::SubRange &S : LI.subranges()) {
resetLiveOutMap();
extendToUses(S, Reg, S.LaneMask);
}
LI.clear();
LI.constructMainRangeFromSubranges(*Indexes, *Alloc);
} else {
resetLiveOutMap();
extendToUses(LI, Reg, ~0u);
}
}
void LiveRangeCalc::createDeadDefs(LiveRange &LR, unsigned Reg) {
assert(MRI && Indexes && "call reset() first");
// Visit all def operands. If the same instruction has multiple defs of Reg,
// LR.createDeadDef() will deduplicate.
for (MachineOperand &MO : MRI->def_operands(Reg))
createDeadDef(*Indexes, *Alloc, LR, MO);
}
void LiveRangeCalc::extendToUses(LiveRange &LR, unsigned Reg, unsigned Mask) {
// Visit all operands that read Reg. This may include partial defs.
const TargetRegisterInfo &TRI = *MRI->getTargetRegisterInfo();
for (MachineOperand &MO : MRI->reg_nodbg_operands(Reg)) {
// Clear all kill flags. They will be reinserted after register allocation
// by LiveIntervalAnalysis::addKillFlags().
if (MO.isUse())
MO.setIsKill(false);
else {
// We only care about uses, but on the main range (mask ~0u) this includes
// the "virtual" reads happening for subregister defs.
if (Mask != ~0u)
continue;
}
if (!MO.readsReg())
continue;
unsigned SubReg = MO.getSubReg();
if (SubReg != 0) {
unsigned SubRegMask = TRI.getSubRegIndexLaneMask(SubReg);
// Ignore uses not covering the current subrange.
if ((SubRegMask & Mask) == 0)
continue;
}
// Determine the actual place of the use.
const MachineInstr *MI = MO.getParent();
unsigned OpNo = (&MO - &MI->getOperand(0));
SlotIndex UseIdx;
if (MI->isPHI()) {
assert(!MO.isDef() && "Cannot handle PHI def of partial register.");
// The actual place where a phi operand is used is the end of the pred
// MBB. PHI operands are paired: (Reg, PredMBB).
UseIdx = Indexes->getMBBEndIdx(MI->getOperand(OpNo+1).getMBB());
} else {
// Check for early-clobber redefs.
bool isEarlyClobber = false;
unsigned DefIdx;
if (MO.isDef())
isEarlyClobber = MO.isEarlyClobber();
else if (MI->isRegTiedToDefOperand(OpNo, &DefIdx)) {
// FIXME: This would be a lot easier if tied early-clobber uses also
// had an early-clobber flag.
isEarlyClobber = MI->getOperand(DefIdx).isEarlyClobber();
}
UseIdx = Indexes->getInstructionIndex(MI).getRegSlot(isEarlyClobber);
}
// MI is reading Reg. We may have visited MI before if it happens to be
// reading Reg multiple times. That is OK, extend() is idempotent.
extend(LR, UseIdx, Reg);
}
}
void LiveRangeCalc::updateFromLiveIns() {
LiveRangeUpdater Updater;
for (const LiveInBlock &I : LiveIn) {
if (!I.DomNode)
continue;
MachineBasicBlock *MBB = I.DomNode->getBlock();
assert(I.Value && "No live-in value found");
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(MBB);
if (I.Kill.isValid())
// Value is killed inside this block.
End = I.Kill;
else {
// The value is live-through, update LiveOut as well.
// Defer the Domtree lookup until it is needed.
assert(Seen.test(MBB->getNumber()));
Map[MBB] = LiveOutPair(I.Value, nullptr);
}
Updater.setDest(&I.LR);
Updater.add(Start, End, I.Value);
}
LiveIn.clear();
}
void LiveRangeCalc::extend(LiveRange &LR, SlotIndex Use, unsigned PhysReg) {
assert(Use.isValid() && "Invalid SlotIndex");
assert(Indexes && "Missing SlotIndexes");
assert(DomTree && "Missing dominator tree");
MachineBasicBlock *UseMBB = Indexes->getMBBFromIndex(Use.getPrevSlot());
assert(UseMBB && "No MBB at Use");
// Is there a def in the same MBB we can extend?
if (LR.extendInBlock(Indexes->getMBBStartIdx(UseMBB), Use))
return;
// Find the single reaching def, or determine if Use is jointly dominated by
// multiple values, and we may need to create even more phi-defs to preserve
// VNInfo SSA form. Perform a search for all predecessor blocks where we
// know the dominating VNInfo.
if (findReachingDefs(LR, *UseMBB, Use, PhysReg))
return;
// When there were multiple different values, we may need new PHIs.
calculateValues();
}
// This function is called by a client after using the low-level API to add
// live-out and live-in blocks. The unique value optimization is not
// available, SplitEditor::transferValues handles that case directly anyway.
void LiveRangeCalc::calculateValues() {
assert(Indexes && "Missing SlotIndexes");
assert(DomTree && "Missing dominator tree");
updateSSA();
updateFromLiveIns();
}
bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
SlotIndex Use, unsigned PhysReg) {
unsigned UseMBBNum = UseMBB.getNumber();
// Block numbers where LR should be live-in.
SmallVector<unsigned, 16> WorkList(1, UseMBBNum);
// Remember if we have seen more than one value.
bool UniqueVNI = true;
VNInfo *TheVNI = nullptr;
// Using Seen as a visited set, perform a BFS for all reaching defs.
for (unsigned i = 0; i != WorkList.size(); ++i) {
MachineBasicBlock *MBB = MF->getBlockNumbered(WorkList[i]);
#ifndef NDEBUG
if (MBB->pred_empty()) {
MBB->getParent()->verify();
errs() << "Use of " << PrintReg(PhysReg)
<< " does not have a corresponding definition on every path:\n";
const MachineInstr *MI = Indexes->getInstructionFromIndex(Use);
if (MI != nullptr)
errs() << Use << " " << *MI;
llvm_unreachable("Use not jointly dominated by defs.");
}
if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
!MBB->isLiveIn(PhysReg)) {
MBB->getParent()->verify();
errs() << "The register " << PrintReg(PhysReg)
<< " needs to be live in to BB#" << MBB->getNumber()
<< ", but is missing from the live-in list.\n";
llvm_unreachable("Invalid global physical register");
}
#endif
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
MachineBasicBlock *Pred = *PI;
// Is this a known live-out block?
if (Seen.test(Pred->getNumber())) {
if (VNInfo *VNI = Map[Pred].first) {
if (TheVNI && TheVNI != VNI)
UniqueVNI = false;
TheVNI = VNI;
}
continue;
}
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(Pred);
// First time we see Pred. Try to determine the live-out value, but set
// it as null if Pred is live-through with an unknown value.
VNInfo *VNI = LR.extendInBlock(Start, End);
setLiveOutValue(Pred, VNI);
if (VNI) {
if (TheVNI && TheVNI != VNI)
UniqueVNI = false;
TheVNI = VNI;
continue;
}
// No, we need a live-in value for Pred as well
if (Pred != &UseMBB)
WorkList.push_back(Pred->getNumber());
else
// Loopback to UseMBB, so value is really live through.
Use = SlotIndex();
}
}
LiveIn.clear();
// Both updateSSA() and LiveRangeUpdater benefit from ordered blocks, but
// neither require it. Skip the sorting overhead for small updates.
if (WorkList.size() > 4)
array_pod_sort(WorkList.begin(), WorkList.end());
// If a unique reaching def was found, blit in the live ranges immediately.
if (UniqueVNI) {
LiveRangeUpdater Updater(&LR);
for (SmallVectorImpl<unsigned>::const_iterator I = WorkList.begin(),
E = WorkList.end(); I != E; ++I) {
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(*I);
// Trim the live range in UseMBB.
if (*I == UseMBBNum && Use.isValid())
End = Use;
else
Map[MF->getBlockNumbered(*I)] = LiveOutPair(TheVNI, nullptr);
Updater.add(Start, End, TheVNI);
}
return true;
}
// Multiple values were found, so transfer the work list to the LiveIn array
// where UpdateSSA will use it as a work list.
LiveIn.reserve(WorkList.size());
for (SmallVectorImpl<unsigned>::const_iterator
I = WorkList.begin(), E = WorkList.end(); I != E; ++I) {
MachineBasicBlock *MBB = MF->getBlockNumbered(*I);
addLiveInBlock(LR, DomTree->getNode(MBB));
if (MBB == &UseMBB)
LiveIn.back().Kill = Use;
}
return false;
}
// This is essentially the same iterative algorithm that SSAUpdater uses,
// except we already have a dominator tree, so we don't have to recompute it.
void LiveRangeCalc::updateSSA() {
assert(Indexes && "Missing SlotIndexes");
assert(DomTree && "Missing dominator tree");
// Interate until convergence.
unsigned Changes;
do {
Changes = 0;
// Propagate live-out values down the dominator tree, inserting phi-defs
// when necessary.
for (LiveInBlock &I : LiveIn) {
MachineDomTreeNode *Node = I.DomNode;
// Skip block if the live-in value has already been determined.
if (!Node)
continue;
MachineBasicBlock *MBB = Node->getBlock();
MachineDomTreeNode *IDom = Node->getIDom();
LiveOutPair IDomValue;
// We need a live-in value to a block with no immediate dominator?
// This is probably an unreachable block that has survived somehow.
bool needPHI = !IDom || !Seen.test(IDom->getBlock()->getNumber());
// IDom dominates all of our predecessors, but it may not be their
// immediate dominator. Check if any of them have live-out values that are
// properly dominated by IDom. If so, we need a phi-def here.
if (!needPHI) {
IDomValue = Map[IDom->getBlock()];
// Cache the DomTree node that defined the value.
if (IDomValue.first && !IDomValue.second)
Map[IDom->getBlock()].second = IDomValue.second =
DomTree->getNode(Indexes->getMBBFromIndex(IDomValue.first->def));
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
LiveOutPair &Value = Map[*PI];
if (!Value.first || Value.first == IDomValue.first)
continue;
// Cache the DomTree node that defined the value.
if (!Value.second)
Value.second =
DomTree->getNode(Indexes->getMBBFromIndex(Value.first->def));
// This predecessor is carrying something other than IDomValue.
// It could be because IDomValue hasn't propagated yet, or it could be
// because MBB is in the dominance frontier of that value.
if (DomTree->dominates(IDom, Value.second)) {
needPHI = true;
break;
}
}
}
// The value may be live-through even if Kill is set, as can happen when
// we are called from extendRange. In that case LiveOutSeen is true, and
// LiveOut indicates a foreign or missing value.
LiveOutPair &LOP = Map[MBB];
// Create a phi-def if required.
if (needPHI) {
++Changes;
assert(Alloc && "Need VNInfo allocator to create PHI-defs");
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(MBB);
LiveRange &LR = I.LR;
VNInfo *VNI = LR.getNextValue(Start, *Alloc);
I.Value = VNI;
// This block is done, we know the final value.
I.DomNode = nullptr;
// Add liveness since updateFromLiveIns now skips this node.
if (I.Kill.isValid())
LR.addSegment(LiveInterval::Segment(Start, I.Kill, VNI));
else {
LR.addSegment(LiveInterval::Segment(Start, End, VNI));
LOP = LiveOutPair(VNI, Node);
}
} else if (IDomValue.first) {
// No phi-def here. Remember incoming value.
I.Value = IDomValue.first;
// If the IDomValue is killed in the block, don't propagate through.
if (I.Kill.isValid())
continue;
// Propagate IDomValue if it isn't killed:
// MBB is live-out and doesn't define its own value.
if (LOP.first == IDomValue.first)
continue;
++Changes;
LOP = IDomValue;
}
}
} while (Changes);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineCopyPropagation.cpp | //===- MachineCopyPropagation.cpp - Machine Copy Propagation Pass ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is an extremely simple MachineInstr-level copy propagation pass.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "codegen-cp"
STATISTIC(NumDeletes, "Number of dead copies deleted");
namespace {
class MachineCopyPropagation : public MachineFunctionPass {
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
MachineRegisterInfo *MRI;
public:
static char ID; // Pass identification, replacement for typeid
MachineCopyPropagation() : MachineFunctionPass(ID) {
initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
private:
typedef SmallVector<unsigned, 4> DestList;
typedef DenseMap<unsigned, DestList> SourceMap;
void SourceNoLongerAvailable(unsigned Reg,
SourceMap &SrcMap,
DenseMap<unsigned, MachineInstr*> &AvailCopyMap);
bool CopyPropagateBlock(MachineBasicBlock &MBB);
};
}
char MachineCopyPropagation::ID = 0;
char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
INITIALIZE_PASS(MachineCopyPropagation, "machine-cp",
"Machine Copy Propagation Pass", false, false)
void
MachineCopyPropagation::SourceNoLongerAvailable(unsigned Reg,
SourceMap &SrcMap,
DenseMap<unsigned, MachineInstr*> &AvailCopyMap) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
SourceMap::iterator SI = SrcMap.find(*AI);
if (SI != SrcMap.end()) {
const DestList& Defs = SI->second;
for (DestList::const_iterator I = Defs.begin(), E = Defs.end();
I != E; ++I) {
unsigned MappedDef = *I;
// Source of copy is no longer available for propagation.
AvailCopyMap.erase(MappedDef);
for (MCSubRegIterator SR(MappedDef, TRI); SR.isValid(); ++SR)
AvailCopyMap.erase(*SR);
}
}
}
}
static bool NoInterveningSideEffect(const MachineInstr *CopyMI,
const MachineInstr *MI) {
const MachineBasicBlock *MBB = CopyMI->getParent();
if (MI->getParent() != MBB)
return false;
MachineBasicBlock::const_iterator I = CopyMI;
MachineBasicBlock::const_iterator E = MBB->end();
MachineBasicBlock::const_iterator E2 = MI;
++I;
while (I != E && I != E2) {
if (I->hasUnmodeledSideEffects() || I->isCall() ||
I->isTerminator())
return false;
++I;
}
return true;
}
/// isNopCopy - Return true if the specified copy is really a nop. That is
/// if the source of the copy is the same of the definition of the copy that
/// supplied the source. If the source of the copy is a sub-register than it
/// must check the sub-indices match. e.g.
/// ecx = mov eax
/// al = mov cl
/// But not
/// ecx = mov eax
/// al = mov ch
static bool isNopCopy(MachineInstr *CopyMI, unsigned Def, unsigned Src,
const TargetRegisterInfo *TRI) {
unsigned SrcSrc = CopyMI->getOperand(1).getReg();
if (Def == SrcSrc)
return true;
if (TRI->isSubRegister(SrcSrc, Def)) {
unsigned SrcDef = CopyMI->getOperand(0).getReg();
unsigned SubIdx = TRI->getSubRegIndex(SrcSrc, Def);
if (!SubIdx)
return false;
return SubIdx == TRI->getSubRegIndex(SrcDef, Src);
}
return false;
}
bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
SmallSetVector<MachineInstr*, 8> MaybeDeadCopies; // Candidates for deletion
DenseMap<unsigned, MachineInstr*> AvailCopyMap; // Def -> available copies map
DenseMap<unsigned, MachineInstr*> CopyMap; // Def -> copies map
SourceMap SrcMap; // Src -> Def map
DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ) {
MachineInstr *MI = &*I;
++I;
if (MI->isCopy()) {
unsigned Def = MI->getOperand(0).getReg();
unsigned Src = MI->getOperand(1).getReg();
if (TargetRegisterInfo::isVirtualRegister(Def) ||
TargetRegisterInfo::isVirtualRegister(Src))
report_fatal_error("MachineCopyPropagation should be run after"
" register allocation!");
DenseMap<unsigned, MachineInstr*>::iterator CI = AvailCopyMap.find(Src);
if (CI != AvailCopyMap.end()) {
MachineInstr *CopyMI = CI->second;
if (!MRI->isReserved(Def) &&
(!MRI->isReserved(Src) || NoInterveningSideEffect(CopyMI, MI)) &&
isNopCopy(CopyMI, Def, Src, TRI)) {
// The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g.
// %ECX<def> = COPY %EAX<kill>
// ... nothing clobbered EAX.
// %EAX<def> = COPY %ECX
// =>
// %ECX<def> = COPY %EAX
//
// Also avoid eliminating a copy from reserved registers unless the
// definition is proven not clobbered. e.g.
// %RSP<def> = COPY %RAX
// CALL
// %RAX<def> = COPY %RSP
DEBUG(dbgs() << "MCP: copy is a NOP, removing: "; MI->dump());
// Clear any kills of Def between CopyMI and MI. This extends the
// live range.
for (MachineBasicBlock::iterator I = CopyMI, E = MI; I != E; ++I)
I->clearRegisterKills(Def, TRI);
MI->eraseFromParent();
Changed = true;
++NumDeletes;
continue;
}
}
// If Src is defined by a previous copy, it cannot be eliminated.
for (MCRegAliasIterator AI(Src, TRI, true); AI.isValid(); ++AI) {
CI = CopyMap.find(*AI);
if (CI != CopyMap.end()) {
DEBUG(dbgs() << "MCP: Copy is no longer dead: "; CI->second->dump());
MaybeDeadCopies.remove(CI->second);
}
}
DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
// Copy is now a candidate for deletion.
MaybeDeadCopies.insert(MI);
// If 'Src' is previously source of another copy, then this earlier copy's
// source is no longer available. e.g.
// %xmm9<def> = copy %xmm2
// ...
// %xmm2<def> = copy %xmm0
// ...
// %xmm2<def> = copy %xmm9
SourceNoLongerAvailable(Def, SrcMap, AvailCopyMap);
// Remember Def is defined by the copy.
// ... Make sure to clear the def maps of aliases first.
for (MCRegAliasIterator AI(Def, TRI, false); AI.isValid(); ++AI) {
CopyMap.erase(*AI);
AvailCopyMap.erase(*AI);
}
for (MCSubRegIterator SR(Def, TRI, /*IncludeSelf=*/true); SR.isValid();
++SR) {
CopyMap[*SR] = MI;
AvailCopyMap[*SR] = MI;
}
// Remember source that's copied to Def. Once it's clobbered, then
// it's no longer available for copy propagation.
if (std::find(SrcMap[Src].begin(), SrcMap[Src].end(), Def) ==
SrcMap[Src].end()) {
SrcMap[Src].push_back(Def);
}
continue;
}
// Not a copy.
SmallVector<unsigned, 2> Defs;
int RegMaskOpNum = -1;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask())
RegMaskOpNum = i;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
report_fatal_error("MachineCopyPropagation should be run after"
" register allocation!");
if (MO.isDef()) {
Defs.push_back(Reg);
continue;
}
// If 'Reg' is defined by a copy, the copy is no longer a candidate
// for elimination.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
DenseMap<unsigned, MachineInstr*>::iterator CI = CopyMap.find(*AI);
if (CI != CopyMap.end()) {
DEBUG(dbgs() << "MCP: Copy is used - not dead: "; CI->second->dump());
MaybeDeadCopies.remove(CI->second);
}
}
// Treat undef use like defs for copy propagation but not for
// dead copy. We would need to do a liveness check to be sure the copy
// is dead for undef uses.
// The backends are allowed to do whatever they want with undef value
// and we cannot be sure this register will not be rewritten to break
// some false dependencies for the hardware for instance.
if (MO.isUndef())
Defs.push_back(Reg);
}
// The instruction has a register mask operand which means that it clobbers
// a large set of registers. It is possible to use the register mask to
// prune the available copies, but treat it like a basic block boundary for
// now.
if (RegMaskOpNum >= 0) {
// Erase any MaybeDeadCopies whose destination register is clobbered.
const MachineOperand &MaskMO = MI->getOperand(RegMaskOpNum);
for (SmallSetVector<MachineInstr*, 8>::iterator
DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end();
DI != DE; ++DI) {
unsigned Reg = (*DI)->getOperand(0).getReg();
if (MRI->isReserved(Reg) || !MaskMO.clobbersPhysReg(Reg))
continue;
DEBUG(dbgs() << "MCP: Removing copy due to regmask clobbering: ";
(*DI)->dump());
(*DI)->eraseFromParent();
Changed = true;
++NumDeletes;
}
// Clear all data structures as if we were beginning a new basic block.
MaybeDeadCopies.clear();
AvailCopyMap.clear();
CopyMap.clear();
SrcMap.clear();
continue;
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
unsigned Reg = Defs[i];
// No longer defined by a copy.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
CopyMap.erase(*AI);
AvailCopyMap.erase(*AI);
}
// If 'Reg' is previously source of a copy, it is no longer available for
// copy propagation.
SourceNoLongerAvailable(Reg, SrcMap, AvailCopyMap);
}
}
// If MBB doesn't have successors, delete the copies whose defs are not used.
// If MBB does have successors, then conservative assume the defs are live-out
// since we don't want to trust live-in lists.
if (MBB.succ_empty()) {
for (SmallSetVector<MachineInstr*, 8>::iterator
DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end();
DI != DE; ++DI) {
if (!MRI->isReserved((*DI)->getOperand(0).getReg())) {
(*DI)->eraseFromParent();
Changed = true;
++NumDeletes;
}
}
}
return Changed;
}
bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
bool Changed = false;
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo();
MRI = &MF.getRegInfo();
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
Changed |= CopyPropagateBlock(*I);
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/PostRASchedulerList.cpp | //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements a top-down list scheduler, using standard algorithms.
// The basic approach uses a priority queue of available nodes to schedule.
// One at a time, nodes are taken from the priority queue (thus in priority
// order), checked for legality to schedule, and emitted if legal.
//
// Nodes may not be legal to schedule either due to structural hazards (e.g.
// pipeline or resource constraints) or because an input to the instruction has
// not completed execution.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "AggressiveAntiDepBreaker.h"
#include "AntiDepBreaker.h"
#include "CriticalAntiDepBreaker.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "post-RA-sched"
STATISTIC(NumNoops, "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
// Post-RA scheduling is enabled with
// TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
// override the target.
static cl::opt<bool>
EnablePostRAScheduler("post-RA-scheduler",
cl::desc("Enable scheduling after register allocation"),
cl::init(false), cl::Hidden);
static cl::opt<std::string>
EnableAntiDepBreaking("break-anti-dependencies",
cl::desc("Break post-RA scheduling anti-dependencies: "
"\"critical\", \"all\", or \"none\""),
cl::init("none"), cl::Hidden);
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
static cl::opt<int>
DebugDiv("postra-sched-debugdiv",
cl::desc("Debug control MBBs that are scheduled"),
cl::init(0), cl::Hidden);
static cl::opt<int>
DebugMod("postra-sched-debugmod",
cl::desc("Debug control MBBs that are scheduled"),
cl::init(0), cl::Hidden);
AntiDepBreaker::~AntiDepBreaker() { }
namespace {
class PostRAScheduler : public MachineFunctionPass {
const TargetInstrInfo *TII;
RegisterClassInfo RegClassInfo;
public:
static char ID;
PostRAScheduler() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetPassConfig>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &Fn) override;
bool enablePostRAScheduler(
const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode &Mode,
TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
};
char PostRAScheduler::ID = 0;
class SchedulePostRATDList : public ScheduleDAGInstrs {
/// AvailableQueue - The priority queue to use for the available SUnits.
///
LatencyPriorityQueue AvailableQueue;
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands becomes available, the instruction is
/// added to the AvailableQueue.
std::vector<SUnit*> PendingQueue;
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
/// AntiDepBreak - Anti-dependence breaking object, or NULL if none
AntiDepBreaker *AntiDepBreak;
/// AA - AliasAnalysis for making memory reference queries.
AliasAnalysis *AA;
/// The schedule. Null SUnit*'s represent noop instructions.
std::vector<SUnit*> Sequence;
/// The index in BB of RegionEnd.
///
/// This is the instruction number from the top of the current block, not
/// the SlotIndex. It is only used by the AntiDepBreaker.
unsigned EndIndex;
public:
SchedulePostRATDList(
MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
const RegisterClassInfo &,
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs);
~SchedulePostRATDList() override;
/// startBlock - Initialize register live-range state for scheduling in
/// this block.
///
void startBlock(MachineBasicBlock *BB) override;
// Set the index of RegionEnd within the current BB.
void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
/// Initialize the scheduler state for the next scheduling region.
void enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs) override;
/// Notify that the scheduler has finished scheduling the current region.
void exitRegion() override;
/// Schedule - Schedule the instruction range using list scheduling.
///
void schedule() override;
void EmitSchedule();
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void Observe(MachineInstr *MI, unsigned Count);
/// finishBlock - Clean up register live-range state.
///
void finishBlock() override;
private:
void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
void dumpSchedule() const;
void emitNoop(unsigned CurCycle);
};
}
char &llvm::PostRASchedulerID = PostRAScheduler::ID;
INITIALIZE_PASS(PostRAScheduler, "post-RA-sched",
"Post RA top-down list latency scheduler", false, false)
SchedulePostRATDList::SchedulePostRATDList(
MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
const RegisterClassInfo &RCI,
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs)
: ScheduleDAGInstrs(MF, &MLI, /*IsPostRA=*/true), AA(AA), EndIndex(0) {
const InstrItineraryData *InstrItins =
MF.getSubtarget().getInstrItineraryData();
HazardRec =
MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer(
InstrItins, this);
assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
MRI.tracksLiveness()) &&
"Live-ins must be accurate for anti-dependency breaking");
AntiDepBreak =
((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
(AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
(AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr));
}
SchedulePostRATDList::~SchedulePostRATDList() {
delete HazardRec;
delete AntiDepBreak;
}
/// Initialize state associated with the next scheduling region.
void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs) {
ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
Sequence.clear();
}
/// Print the schedule before exiting the region.
void SchedulePostRATDList::exitRegion() {
DEBUG({
dbgs() << "*** Final schedule ***\n";
dumpSchedule();
dbgs() << '\n';
});
ScheduleDAGInstrs::exitRegion();
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dumpSchedule - dump the scheduled Sequence.
void SchedulePostRATDList::dumpSchedule() const {
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
if (SUnit *SU = Sequence[i])
SU->dump(this);
else
dbgs() << "**** NOOP ****\n";
}
}
#endif
bool PostRAScheduler::enablePostRAScheduler(
const TargetSubtargetInfo &ST,
CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode &Mode,
TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
Mode = ST.getAntiDepBreakMode();
ST.getCriticalPathRCs(CriticalPathRCs);
return ST.enablePostRAScheduler() &&
OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
}
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
if (skipOptnoneFunction(*Fn.getFunction()))
return false;
TII = Fn.getSubtarget().getInstrInfo();
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
RegClassInfo.runOnMachineFunction(Fn);
// Check for explicit enable/disable of post-ra scheduling.
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
TargetSubtargetInfo::ANTIDEP_NONE;
SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
if (EnablePostRAScheduler.getPosition() > 0) {
if (!EnablePostRAScheduler)
return false;
} else {
// Check that post-RA scheduling is enabled for this target.
// This may upgrade the AntiDepMode.
if (!enablePostRAScheduler(Fn.getSubtarget(), PassConfig->getOptLevel(),
AntiDepMode, CriticalPathRCs))
return false;
}
// Check for antidep breaking override...
if (EnableAntiDepBreaking.getPosition() > 0) {
AntiDepMode = (EnableAntiDepBreaking == "all")
? TargetSubtargetInfo::ANTIDEP_ALL
: ((EnableAntiDepBreaking == "critical")
? TargetSubtargetInfo::ANTIDEP_CRITICAL
: TargetSubtargetInfo::ANTIDEP_NONE);
}
DEBUG(dbgs() << "PostRAScheduler\n");
SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
CriticalPathRCs);
// Loop over all of the basic blocks
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
#ifndef NDEBUG
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
if (DebugDiv > 0) {
static int bbcnt = 0;
if (bbcnt++ % DebugDiv != DebugMod)
continue;
dbgs() << "*** DEBUG scheduling " << Fn.getName()
<< ":BB#" << MBB->getNumber() << " ***\n";
}
#endif
// Initialize register live-range state for scheduling in this block.
Scheduler.startBlock(MBB);
// Schedule each sequence of instructions not interrupted by a label
// or anything else that effectively needs to shut down scheduling.
MachineBasicBlock::iterator Current = MBB->end();
unsigned Count = MBB->size(), CurrentCount = Count;
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
MachineInstr *MI = std::prev(I);
--Count;
// Calls are not scheduling boundaries before register allocation, but
// post-ra we don't gain anything by scheduling across calls since we
// don't need to worry about register pressure.
if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) {
Scheduler.enterRegion(MBB, I, Current, CurrentCount - Count);
Scheduler.setEndIndex(CurrentCount);
Scheduler.schedule();
Scheduler.exitRegion();
Scheduler.EmitSchedule();
Current = MI;
CurrentCount = Count;
Scheduler.Observe(MI, CurrentCount);
}
I = MI;
if (MI->isBundle())
Count -= MI->getBundleSize();
}
assert(Count == 0 && "Instruction count mismatch!");
assert((MBB->begin() == Current || CurrentCount != 0) &&
"Instruction count mismatch!");
Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount);
Scheduler.setEndIndex(CurrentCount);
Scheduler.schedule();
Scheduler.exitRegion();
Scheduler.EmitSchedule();
// Clean up register live-range state.
Scheduler.finishBlock();
// Update register kills
Scheduler.fixupKills(MBB);
}
return true;
}
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
// Call the superclass.
ScheduleDAGInstrs::startBlock(BB);
// Reset the hazard recognizer and anti-dep breaker.
HazardRec->Reset();
if (AntiDepBreak)
AntiDepBreak->StartBlock(BB);
}
/// Schedule - Schedule the instruction range using list scheduling.
///
void SchedulePostRATDList::schedule() {
// Build the scheduling graph.
buildSchedGraph(AA);
if (AntiDepBreak) {
unsigned Broken =
AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
EndIndex, DbgValues);
if (Broken != 0) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
// When a live range is changed to use a different register, remove
// the def's anti-dependence *and* output-dependence edges due to
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
ScheduleDAG::clearDAG();
buildSchedGraph(AA);
NumFixedAnti += Broken;
}
}
DEBUG(dbgs() << "********** List Scheduling **********\n");
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
AvailableQueue.initNodes(SUnits);
ListScheduleTopDown();
AvailableQueue.releaseState();
}
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
if (AntiDepBreak)
AntiDepBreak->Observe(MI, Count, EndIndex);
}
/// FinishBlock - Clean up register live-range state.
///
void SchedulePostRATDList::finishBlock() {
if (AntiDepBreak)
AntiDepBreak->FinishBlock();
// Call the superclass.
ScheduleDAGInstrs::finishBlock();
}
//===----------------------------------------------------------------------===//
// Top-Down Scheduling
//===----------------------------------------------------------------------===//
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
/// the PendingQueue if the count reaches zero.
void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
if (SuccEdge->isWeak()) {
--SuccSU->WeakPredsLeft;
return;
}
#ifndef NDEBUG
if (SuccSU->NumPredsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
--SuccSU->NumPredsLeft;
// Standard scheduler algorithms will recompute the depth of the successor
// here as such:
// SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
//
// However, we lazily compute node depth instead. Note that
// ScheduleNodeTopDown has already updated the depth of this node which causes
// all descendents to be marked dirty. Setting the successor depth explicitly
// here would cause depth to be recomputed for all its ancestors. If the
// successor is not yet ready (because of a transitively redundant edge) then
// this causes depth computation to be quadratic in the size of the DAG.
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
PendingQueue.push_back(SuccSU);
}
/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
ReleaseSucc(SU, &*I);
}
}
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
Sequence.push_back(SU);
assert(CurCycle >= SU->getDepth() &&
"Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue.scheduledNode(SU);
}
/// emitNoop - Add a noop to the current instruction sequence.
void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
HazardRec->EmitNoop();
Sequence.push_back(nullptr); // NULL here means noop
++NumNoops;
}
/// ListScheduleTopDown - The main loop of list scheduling for top-down
/// schedulers.
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
// We're scheduling top-down but we're visiting the regions in
// bottom-up order, so we don't know the hazards at the start of a
// region. So assume no hazards (this should usually be ok as most
// blocks are a single region).
HazardRec->Reset();
// Release any successors of the special Entry node.
ReleaseSuccessors(&EntrySU);
// Add all leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
AvailableQueue.push(&SUnits[i]);
SUnits[i].isAvailable = true;
}
}
// In any cycle where we can't schedule any instructions, we must
// stall or emit a noop, depending on the target.
bool CycleHasInsts = false;
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;
Sequence.reserve(SUnits.size());
while (!AvailableQueue.empty() || !PendingQueue.empty()) {
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
unsigned MinDepth = ~0u;
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
if (PendingQueue[i]->getDepth() <= CurCycle) {
AvailableQueue.push(PendingQueue[i]);
PendingQueue[i]->isAvailable = true;
PendingQueue[i] = PendingQueue.back();
PendingQueue.pop_back();
--i; --e;
} else if (PendingQueue[i]->getDepth() < MinDepth)
MinDepth = PendingQueue[i]->getDepth();
}
DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
bool HasNoopHazards = false;
while (!AvailableQueue.empty()) {
SUnit *CurSUnit = AvailableQueue.pop();
ScheduleHazardRecognizer::HazardType HT =
HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
if (HT == ScheduleHazardRecognizer::NoHazard) {
if (HazardRec->ShouldPreferAnother(CurSUnit)) {
if (!NotPreferredSUnit) {
// If this is the first non-preferred node for this cycle, then
// record it and continue searching for a preferred node. If this
// is not the first non-preferred node, then treat it as though
// there had been a hazard.
NotPreferredSUnit = CurSUnit;
continue;
}
} else {
FoundSUnit = CurSUnit;
break;
}
}
// Remember if this is a noop hazard.
HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
NotReady.push_back(CurSUnit);
}
// If we have a non-preferred node, push it back onto the available list.
// If we did not find a preferred node, then schedule this first
// non-preferred node.
if (NotPreferredSUnit) {
if (!FoundSUnit) {
DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n");
FoundSUnit = NotPreferredSUnit;
} else {
AvailableQueue.push(NotPreferredSUnit);
}
NotPreferredSUnit = nullptr;
}
// Add the nodes that aren't ready back onto the available list.
if (!NotReady.empty()) {
AvailableQueue.push_all(NotReady);
NotReady.clear();
}
// If we found a node to schedule...
if (FoundSUnit) {
// If we need to emit noops prior to this instruction, then do so.
unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
for (unsigned i = 0; i != NumPreNoops; ++i)
emitNoop(CurCycle);
// ... schedule the node...
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
CycleHasInsts = true;
if (HazardRec->atIssueLimit()) {
DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n');
HazardRec->AdvanceCycle();
++CurCycle;
CycleHasInsts = false;
}
} else {
if (CycleHasInsts) {
DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
HazardRec->AdvanceCycle();
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem,
// just advance the current cycle and try again.
DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
HazardRec->AdvanceCycle();
++NumStalls;
} else {
// Otherwise, we have no instructions to issue and we have instructions
// that will fault if we don't do this right. This is the case for
// processors without pipeline interlocks and other cases.
emitNoop(CurCycle);
}
++CurCycle;
CycleHasInsts = false;
}
}
#ifndef NDEBUG
unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
unsigned Noops = 0;
for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
if (!Sequence[i])
++Noops;
assert(Sequence.size() - Noops == ScheduledNodes &&
"The number of nodes scheduled doesn't match the expected number!");
#endif // NDEBUG
}
// EmitSchedule - Emit the machine code in scheduled order.
void SchedulePostRATDList::EmitSchedule() {
RegionBegin = RegionEnd;
// If first instruction was a DBG_VALUE then put it back.
if (FirstDbgValue)
BB->splice(RegionEnd, BB, FirstDbgValue);
// Then re-insert them according to the given schedule.
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
if (SUnit *SU = Sequence[i])
BB->splice(RegionEnd, BB, SU->getInstr());
else
// Null SUnit* is a noop.
TII->insertNoop(*BB, RegionEnd);
// Update the Begin iterator, as the first instruction in the block
// may have been scheduled later.
if (i == 0)
RegionBegin = std::prev(RegionEnd);
}
// Reinsert any remaining debug_values.
for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
MachineInstr *DbgValue = P.first;
MachineBasicBlock::iterator OrigPrivMI = P.second;
BB->splice(++OrigPrivMI, BB, DbgValue);
}
DbgValues.clear();
FirstDbgValue = nullptr;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveDebugVariables.h | //===- LiveDebugVariables.h - Tracking debug info variables ----*- c++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface to the LiveDebugVariables analysis.
//
// The analysis removes DBG_VALUE instructions for virtual registers and tracks
// live user variables in a data structure that can be updated during register
// allocation.
//
// After register allocation new DBG_VALUE instructions are emitted to reflect
// the new locations of user variables.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_LIVEDEBUGVARIABLES_H
#define LLVM_LIB_CODEGEN_LIVEDEBUGVARIABLES_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/IR/DebugInfo.h"
namespace llvm {
class LiveInterval;
class LiveIntervals;
class VirtRegMap;
class LLVM_LIBRARY_VISIBILITY LiveDebugVariables : public MachineFunctionPass {
void *pImpl;
DenseMap<const Function *, DISubprogram *> FunctionDIs;
public:
static char ID; // Pass identification, replacement for typeid
LiveDebugVariables();
~LiveDebugVariables() override;
/// renameRegister - Move any user variables in OldReg to NewReg:SubIdx.
/// @param OldReg Old virtual register that is going away.
/// @param NewReg New register holding the user variables.
/// @param SubIdx If NewReg is a virtual register, SubIdx may indicate a sub-
/// register.
void renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx);
/// splitRegister - Move any user variables in OldReg to the live ranges in
/// NewRegs where they are live. Mark the values as unavailable where no new
/// register is live.
void splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs,
LiveIntervals &LIS);
/// emitDebugValues - Emit new DBG_VALUE instructions reflecting the changes
/// that happened during register allocation.
/// @param VRM Rename virtual registers according to map.
void emitDebugValues(VirtRegMap *VRM);
/// dump - Print data structures to dbgs().
void dump();
private:
bool runOnMachineFunction(MachineFunction &) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &) const override;
bool doInitialization(Module &) override;
};
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveRangeCalc.h | //===---- LiveRangeCalc.h - Calculate live ranges ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The LiveRangeCalc class can be used to compute live ranges from scratch. It
// caches information about values in the CFG to speed up repeated operations
// on the same live range. The cache can be shared by non-overlapping live
// ranges. SplitKit uses that when computing the live range of split products.
//
// A low-level interface is available to clients that know where a variable is
// live, but don't know which value it has as every point. LiveRangeCalc will
// propagate values down the dominator tree, and even insert PHI-defs where
// needed. SplitKit uses this faster interface when possible.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_LIVERANGECALC_H
#define LLVM_LIB_CODEGEN_LIVERANGECALC_H
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/CodeGen/LiveInterval.h"
namespace llvm {
/// Forward declarations for MachineDominators.h:
class MachineDominatorTree;
template <class NodeT> class DomTreeNodeBase;
typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
class LiveRangeCalc {
const MachineFunction *MF;
const MachineRegisterInfo *MRI;
SlotIndexes *Indexes;
MachineDominatorTree *DomTree;
VNInfo::Allocator *Alloc;
/// LiveOutPair - A value and the block that defined it. The domtree node is
/// redundant, it can be computed as: MDT[Indexes.getMBBFromIndex(VNI->def)].
typedef std::pair<VNInfo*, MachineDomTreeNode*> LiveOutPair;
/// LiveOutMap - Map basic blocks to the value leaving the block.
typedef IndexedMap<LiveOutPair, MBB2NumberFunctor> LiveOutMap;
/// Bit vector of active entries in LiveOut, also used as a visited set by
/// findReachingDefs. One entry per basic block, indexed by block number.
/// This is kept as a separate bit vector because it can be cleared quickly
/// when switching live ranges.
BitVector Seen;
/// Map each basic block where a live range is live out to the live-out value
/// and its defining block.
///
/// For every basic block, MBB, one of these conditions shall be true:
///
/// 1. !Seen.count(MBB->getNumber())
/// Blocks without a Seen bit are ignored.
/// 2. LiveOut[MBB].second.getNode() == MBB
/// The live-out value is defined in MBB.
/// 3. forall P in preds(MBB): LiveOut[P] == LiveOut[MBB]
/// The live-out value passses through MBB. All predecessors must carry
/// the same value.
///
/// The domtree node may be null, it can be computed.
///
/// The map can be shared by multiple live ranges as long as no two are
/// live-out of the same block.
LiveOutMap Map;
/// LiveInBlock - Information about a basic block where a live range is known
/// to be live-in, but the value has not yet been determined.
struct LiveInBlock {
// The live range set that is live-in to this block. The algorithms can
// handle multiple non-overlapping live ranges simultaneously.
LiveRange &LR;
// DomNode - Dominator tree node for the block.
// Cleared when the final value has been determined and LI has been updated.
MachineDomTreeNode *DomNode;
// Position in block where the live-in range ends, or SlotIndex() if the
// range passes through the block. When the final value has been
// determined, the range from the block start to Kill will be added to LI.
SlotIndex Kill;
// Live-in value filled in by updateSSA once it is known.
VNInfo *Value;
LiveInBlock(LiveRange &LR, MachineDomTreeNode *node, SlotIndex kill)
: LR(LR), DomNode(node), Kill(kill), Value(nullptr) {}
};
/// LiveIn - Work list of blocks where the live-in value has yet to be
/// determined. This list is typically computed by findReachingDefs() and
/// used as a work list by updateSSA(). The low-level interface may also be
/// used to add entries directly.
SmallVector<LiveInBlock, 16> LiveIn;
/// Assuming that @p LR is live-in to @p UseMBB, find the set of defs that can
/// reach it.
///
/// If only one def can reach @p UseMBB, all paths from the def to @p UseMBB
/// are added to @p LR, and the function returns true.
///
/// If multiple values can reach @p UseMBB, the blocks that need @p LR to be
/// live in are added to the LiveIn array, and the function returns false.
///
/// PhysReg, when set, is used to verify live-in lists on basic blocks.
bool findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
SlotIndex Kill, unsigned PhysReg);
/// updateSSA - Compute the values that will be live in to all requested
/// blocks in LiveIn. Create PHI-def values as required to preserve SSA form.
///
/// Every live-in block must be jointly dominated by the added live-out
/// blocks. No values are read from the live ranges.
void updateSSA();
/// Transfer information from the LiveIn vector to the live ranges and update
/// the given @p LiveOuts.
void updateFromLiveIns();
/// Extend the live range of @p LR to reach all uses of Reg.
///
/// All uses must be jointly dominated by existing liveness. PHI-defs are
/// inserted as needed to preserve SSA form.
void extendToUses(LiveRange &LR, unsigned Reg, unsigned LaneMask);
/// Reset Map and Seen fields.
void resetLiveOutMap();
public:
LiveRangeCalc() : MF(nullptr), MRI(nullptr), Indexes(nullptr),
DomTree(nullptr), Alloc(nullptr) {}
//===--------------------------------------------------------------------===//
// High-level interface.
//===--------------------------------------------------------------------===//
//
// Calculate live ranges from scratch.
//
/// reset - Prepare caches for a new set of non-overlapping live ranges. The
/// caches must be reset before attempting calculations with a live range
/// that may overlap a previously computed live range, and before the first
/// live range in a function. If live ranges are not known to be
/// non-overlapping, call reset before each.
void reset(const MachineFunction *MF,
SlotIndexes*,
MachineDominatorTree*,
VNInfo::Allocator*);
//===--------------------------------------------------------------------===//
// Mid-level interface.
//===--------------------------------------------------------------------===//
//
// Modify existing live ranges.
//
/// Extend the live range of @p LR to reach @p Use.
///
/// The existing values in @p LR must be live so they jointly dominate @p Use.
/// If @p Use is not dominated by a single existing value, PHI-defs are
/// inserted as required to preserve SSA form.
///
/// PhysReg, when set, is used to verify live-in lists on basic blocks.
void extend(LiveRange &LR, SlotIndex Use, unsigned PhysReg = 0);
/// createDeadDefs - Create a dead def in LI for every def operand of Reg.
/// Each instruction defining Reg gets a new VNInfo with a corresponding
/// minimal live range.
void createDeadDefs(LiveRange &LR, unsigned Reg);
/// Extend the live range of @p LR to reach all uses of Reg.
///
/// All uses must be jointly dominated by existing liveness. PHI-defs are
/// inserted as needed to preserve SSA form.
void extendToUses(LiveRange &LR, unsigned PhysReg) {
extendToUses(LR, PhysReg, ~0u);
}
/// Calculates liveness for the register specified in live interval @p LI.
/// Creates subregister live ranges as needed if subreg liveness tracking is
/// enabled.
void calculate(LiveInterval &LI, bool TrackSubRegs);
//===--------------------------------------------------------------------===//
// Low-level interface.
//===--------------------------------------------------------------------===//
//
// These functions can be used to compute live ranges where the live-in and
// live-out blocks are already known, but the SSA value in each block is
// unknown.
//
// After calling reset(), add known live-out values and known live-in blocks.
// Then call calculateValues() to compute the actual value that is
// live-in to each block, and add liveness to the live ranges.
//
/// setLiveOutValue - Indicate that VNI is live out from MBB. The
/// calculateValues() function will not add liveness for MBB, the caller
/// should take care of that.
///
/// VNI may be null only if MBB is a live-through block also passed to
/// addLiveInBlock().
void setLiveOutValue(MachineBasicBlock *MBB, VNInfo *VNI) {
Seen.set(MBB->getNumber());
Map[MBB] = LiveOutPair(VNI, nullptr);
}
/// addLiveInBlock - Add a block with an unknown live-in value. This
/// function can only be called once per basic block. Once the live-in value
/// has been determined, calculateValues() will add liveness to LI.
///
/// @param LR The live range that is live-in to the block.
/// @param DomNode The domtree node for the block.
/// @param Kill Index in block where LI is killed. If the value is
/// live-through, set Kill = SLotIndex() and also call
/// setLiveOutValue(MBB, 0).
void addLiveInBlock(LiveRange &LR,
MachineDomTreeNode *DomNode,
SlotIndex Kill = SlotIndex()) {
LiveIn.push_back(LiveInBlock(LR, DomNode, Kill));
}
/// calculateValues - Calculate the value that will be live-in to each block
/// added with addLiveInBlock. Add PHI-def values as needed to preserve SSA
/// form. Add liveness to all live-in blocks up to the Kill point, or the
/// whole block for live-through blocks.
///
/// Every predecessor of a live-in block must have been given a value with
/// setLiveOutValue, the value may be null for live-trough blocks.
void calculateValues();
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/StackProtector.cpp | //===-- StackProtector.cpp - Stack Protector Insertion --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass inserts stack protectors into functions which need them. A variable
// with a random value in it is stored onto the stack before the local variables
// are allocated. Upon exiting the block, the stored value is checked. If it's
// changed, then there was some sort of violation and the program aborts.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cstdlib>
using namespace llvm;
#define DEBUG_TYPE "stack-protector"
STATISTIC(NumFunProtected, "Number of functions protected");
STATISTIC(NumAddrTaken, "Number of local variables that have their address"
" taken.");
static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
cl::init(true), cl::Hidden);
char StackProtector::ID = 0;
INITIALIZE_PASS(StackProtector, "stack-protector", "Insert stack protectors",
false, true)
FunctionPass *llvm::createStackProtectorPass(const TargetMachine *TM) {
return new StackProtector(TM);
}
StackProtector::SSPLayoutKind
StackProtector::getSSPLayout(const AllocaInst *AI) const {
return AI ? Layout.lookup(AI) : SSPLK_None;
}
void StackProtector::adjustForColoring(const AllocaInst *From,
const AllocaInst *To) {
// When coloring replaces one alloca with another, transfer the SSPLayoutKind
// tag from the remapped to the target alloca. The remapped alloca should
// have a size smaller than or equal to the replacement alloca.
SSPLayoutMap::iterator I = Layout.find(From);
if (I != Layout.end()) {
SSPLayoutKind Kind = I->second;
Layout.erase(I);
// Transfer the tag, but make sure that SSPLK_AddrOf does not overwrite
// SSPLK_SmallArray or SSPLK_LargeArray, and make sure that
// SSPLK_SmallArray does not overwrite SSPLK_LargeArray.
I = Layout.find(To);
if (I == Layout.end())
Layout.insert(std::make_pair(To, Kind));
else if (I->second != SSPLK_LargeArray && Kind != SSPLK_AddrOf)
I->second = Kind;
}
}
bool StackProtector::runOnFunction(Function &Fn) {
F = &Fn;
M = F->getParent();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
Attribute Attr = Fn.getFnAttribute("stack-protector-buffer-size");
if (Attr.isStringAttribute() &&
Attr.getValueAsString().getAsInteger(10, SSPBufferSize))
return false; // Invalid integer string
if (!RequiresStackProtector())
return false;
++NumFunProtected;
return InsertStackProtectors();
}
/// \param [out] IsLarge is set to true if a protectable array is found and
/// it is "large" ( >= ssp-buffer-size). In the case of a structure with
/// multiple arrays, this gets set if any of them is large.
bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
bool Strong,
bool InStruct) const {
if (!Ty)
return false;
if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
if (!AT->getElementType()->isIntegerTy(8)) {
// If we're on a non-Darwin platform or we're inside of a structure, don't
// add stack protectors unless the array is a character array.
// However, in strong mode any array, regardless of type and size,
// triggers a protector.
if (!Strong && (InStruct || !Trip.isOSDarwin()))
return false;
}
// If an array has more than SSPBufferSize bytes of allocated space, then we
// emit stack protectors.
if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
IsLarge = true;
return true;
}
if (Strong)
// Require a protector for all arrays in strong mode
return true;
}
const StructType *ST = dyn_cast<StructType>(Ty);
if (!ST)
return false;
bool NeedsProtector = false;
for (StructType::element_iterator I = ST->element_begin(),
E = ST->element_end();
I != E; ++I)
if (ContainsProtectableArray(*I, IsLarge, Strong, true)) {
// If the element is a protectable array and is large (>= SSPBufferSize)
// then we are done. If the protectable array is not large, then
// keep looking in case a subsequent element is a large array.
if (IsLarge)
return true;
NeedsProtector = true;
}
return NeedsProtector;
}
bool StackProtector::HasAddressTaken(const Instruction *AI) {
for (const User *U : AI->users()) {
if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (AI == SI->getValueOperand())
return true;
} else if (const PtrToIntInst *SI = dyn_cast<PtrToIntInst>(U)) {
if (AI == SI->getOperand(0))
return true;
} else if (isa<CallInst>(U)) {
return true;
} else if (isa<InvokeInst>(U)) {
return true;
} else if (const SelectInst *SI = dyn_cast<SelectInst>(U)) {
if (HasAddressTaken(SI))
return true;
} else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
// Keep track of what PHI nodes we have already visited to ensure
// they are only visited once.
if (VisitedPHIs.insert(PN).second)
if (HasAddressTaken(PN))
return true;
} else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
if (HasAddressTaken(GEP))
return true;
} else if (const BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
if (HasAddressTaken(BI))
return true;
}
}
return false;
}
/// \brief Check whether or not this function needs a stack protector based
/// upon the stack protector level.
///
/// We use two heuristics: a standard (ssp) and strong (sspstrong).
/// The standard heuristic which will add a guard variable to functions that
/// call alloca with a either a variable size or a size >= SSPBufferSize,
/// functions with character buffers larger than SSPBufferSize, and functions
/// with aggregates containing character buffers larger than SSPBufferSize. The
/// strong heuristic will add a guard variables to functions that call alloca
/// regardless of size, functions with any buffer regardless of type and size,
/// functions with aggregates that contain any buffer regardless of type and
/// size, and functions that contain stack-based variables that have had their
/// address taken.
bool StackProtector::RequiresStackProtector() {
bool Strong = false;
bool NeedsProtector = false;
if (F->hasFnAttribute(Attribute::StackProtectReq)) {
NeedsProtector = true;
Strong = true; // Use the same heuristic as strong to determine SSPLayout
} else if (F->hasFnAttribute(Attribute::StackProtectStrong))
Strong = true;
else if (!F->hasFnAttribute(Attribute::StackProtect))
return false;
for (const BasicBlock &BB : *F) {
for (const Instruction &I : BB) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
if (AI->isArrayAllocation()) {
// SSP-Strong: Enable protectors for any call to alloca, regardless
// of size.
if (Strong)
return true;
if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
// A call to alloca with size >= SSPBufferSize requires
// stack protectors.
Layout.insert(std::make_pair(AI, SSPLK_LargeArray));
NeedsProtector = true;
} else if (Strong) {
// Require protectors for all alloca calls in strong mode.
Layout.insert(std::make_pair(AI, SSPLK_SmallArray));
NeedsProtector = true;
}
} else {
// A call to alloca with a variable size requires protectors.
Layout.insert(std::make_pair(AI, SSPLK_LargeArray));
NeedsProtector = true;
}
continue;
}
bool IsLarge = false;
if (ContainsProtectableArray(AI->getAllocatedType(), IsLarge, Strong)) {
Layout.insert(std::make_pair(AI, IsLarge ? SSPLK_LargeArray
: SSPLK_SmallArray));
NeedsProtector = true;
continue;
}
if (Strong && HasAddressTaken(AI)) {
++NumAddrTaken;
Layout.insert(std::make_pair(AI, SSPLK_AddrOf));
NeedsProtector = true;
}
}
}
}
return NeedsProtector;
}
static bool InstructionWillNotHaveChain(const Instruction *I) {
return !I->mayHaveSideEffects() && !I->mayReadFromMemory() &&
isSafeToSpeculativelyExecute(I);
}
/// Identify if RI has a previous instruction in the "Tail Position" and return
/// it. Otherwise return 0.
///
/// This is based off of the code in llvm::isInTailCallPosition. The difference
/// is that it inverts the first part of llvm::isInTailCallPosition since
/// isInTailCallPosition is checking if a call is in a tail call position, and
/// we are searching for an unknown tail call that might be in the tail call
/// position. Once we find the call though, the code uses the same refactored
/// code, returnTypeIsEligibleForTailCall.
static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI,
const TargetLoweringBase *TLI) {
// Establish a reasonable upper bound on the maximum amount of instructions we
// will look through to find a tail call.
unsigned SearchCounter = 0;
const unsigned MaxSearch = 4;
bool NoInterposingChain = true;
for (BasicBlock::reverse_iterator I = std::next(BB->rbegin()), E = BB->rend();
I != E && SearchCounter < MaxSearch; ++I) {
Instruction *Inst = &*I;
// Skip over debug intrinsics and do not allow them to affect our MaxSearch
// counter.
if (isa<DbgInfoIntrinsic>(Inst))
continue;
// If we find a call and the following conditions are satisifed, then we
// have found a tail call that satisfies at least the target independent
// requirements of a tail call:
//
// 1. The call site has the tail marker.
//
// 2. The call site either will not cause the creation of a chain or if a
// chain is necessary there are no instructions in between the callsite and
// the call which would create an interposing chain.
//
// 3. The return type of the function does not impede tail call
// optimization.
if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
if (CI->isTailCall() &&
(InstructionWillNotHaveChain(CI) || NoInterposingChain) &&
returnTypeIsEligibleForTailCall(BB->getParent(), CI, RI, *TLI))
return CI;
}
// If we did not find a call see if we have an instruction that may create
// an interposing chain.
NoInterposingChain =
NoInterposingChain && InstructionWillNotHaveChain(Inst);
// Increment max search.
SearchCounter++;
}
return nullptr;
}
/// Insert code into the entry block that stores the __stack_chk_guard
/// variable onto the stack:
///
/// entry:
/// StackGuardSlot = alloca i8*
/// StackGuard = load __stack_chk_guard
/// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
///
/// Returns true if the platform/triple supports the stackprotectorcreate pseudo
/// node.
static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
const TargetLoweringBase *TLI, const Triple &TT,
AllocaInst *&AI, Value *&StackGuardVar) {
bool SupportsSelectionDAGSP = false;
PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
unsigned AddressSpace, Offset;
if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
Constant *OffsetVal =
ConstantInt::get(Type::getInt32Ty(RI->getContext()), Offset);
StackGuardVar =
ConstantExpr::getIntToPtr(OffsetVal, PointerType::get(PtrTy,
AddressSpace));
} else if (TT.isOSOpenBSD()) {
StackGuardVar = M->getOrInsertGlobal("__guard_local", PtrTy);
cast<GlobalValue>(StackGuardVar)
->setVisibility(GlobalValue::HiddenVisibility);
} else {
SupportsSelectionDAGSP = true;
StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
}
IRBuilder<> B(&F->getEntryBlock().front());
AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
LoadInst *LI = B.CreateLoad(StackGuardVar, "StackGuard");
B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
{LI, AI});
return SupportsSelectionDAGSP;
}
/// InsertStackProtectors - Insert code into the prologue and epilogue of the
/// function.
///
/// - The prologue code loads and stores the stack guard onto the stack.
/// - The epilogue checks the value stored in the prologue against the original
/// value. It calls __stack_chk_fail if they differ.
bool StackProtector::InsertStackProtectors() {
bool HasPrologue = false;
bool SupportsSelectionDAGSP =
EnableSelectionDAGSP && !TM->Options.EnableFastISel;
AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
Value *StackGuardVar = nullptr; // The stack guard variable.
for (Function::iterator I = F->begin(), E = F->end(); I != E;) {
BasicBlock *BB = I++;
ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
if (!RI)
continue;
if (!HasPrologue) {
HasPrologue = true;
SupportsSelectionDAGSP &=
CreatePrologue(F, M, RI, TLI, Trip, AI, StackGuardVar);
}
if (SupportsSelectionDAGSP) {
// Since we have a potential tail call, insert the special stack check
// intrinsic.
Instruction *InsertionPt = nullptr;
if (CallInst *CI = FindPotentialTailCall(BB, RI, TLI)) {
InsertionPt = CI;
} else {
InsertionPt = RI;
// At this point we know that BB has a return statement so it *DOES*
// have a terminator.
assert(InsertionPt != nullptr &&
"BB must have a terminator instruction at this point.");
}
Function *Intrinsic =
Intrinsic::getDeclaration(M, Intrinsic::stackprotectorcheck);
CallInst::Create(Intrinsic, StackGuardVar, "", InsertionPt);
} else {
// If we do not support SelectionDAG based tail calls, generate IR level
// tail calls.
//
// For each block with a return instruction, convert this:
//
// return:
// ...
// ret ...
//
// into this:
//
// return:
// ...
// %1 = load __stack_chk_guard
// %2 = load StackGuardSlot
// %3 = cmp i1 %1, %2
// br i1 %3, label %SP_return, label %CallStackCheckFailBlk
//
// SP_return:
// ret ...
//
// CallStackCheckFailBlk:
// call void @__stack_chk_fail()
// unreachable
// Create the FailBB. We duplicate the BB every time since the MI tail
// merge pass will merge together all of the various BB into one including
// fail BB generated by the stack protector pseudo instruction.
BasicBlock *FailBB = CreateFailBB();
// Split the basic block before the return instruction.
BasicBlock *NewBB = BB->splitBasicBlock(RI, "SP_return");
// Update the dominator tree if we need to.
if (DT && DT->isReachableFromEntry(BB)) {
DT->addNewBlock(NewBB, BB);
DT->addNewBlock(FailBB, BB);
}
// Remove default branch instruction to the new BB.
BB->getTerminator()->eraseFromParent();
// Move the newly created basic block to the point right after the old
// basic block so that it's in the "fall through" position.
NewBB->moveAfter(BB);
// Generate the stack protector instructions in the old basic block.
IRBuilder<> B(BB);
LoadInst *LI1 = B.CreateLoad(StackGuardVar);
LoadInst *LI2 = B.CreateLoad(AI);
Value *Cmp = B.CreateICmpEQ(LI1, LI2);
unsigned SuccessWeight =
BranchProbabilityInfo::getBranchWeightStackProtector(true);
unsigned FailureWeight =
BranchProbabilityInfo::getBranchWeightStackProtector(false);
MDNode *Weights = MDBuilder(F->getContext())
.createBranchWeights(SuccessWeight, FailureWeight);
B.CreateCondBr(Cmp, NewBB, FailBB, Weights);
}
}
// Return if we didn't modify any basic blocks. i.e., there are no return
// statements in the function.
if (!HasPrologue)
return false;
return true;
}
/// CreateFailBB - Create a basic block to jump to when the stack protector
/// check fails.
BasicBlock *StackProtector::CreateFailBB() {
LLVMContext &Context = F->getContext();
BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F);
IRBuilder<> B(FailBB);
if (Trip.isOSOpenBSD()) {
Constant *StackChkFail =
M->getOrInsertFunction("__stack_smash_handler",
Type::getVoidTy(Context),
Type::getInt8PtrTy(Context), nullptr);
B.CreateCall(StackChkFail, B.CreateGlobalStringPtr(F->getName(), "SSH"));
} else {
Constant *StackChkFail =
M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context),
nullptr);
B.CreateCall(StackChkFail, {});
}
B.CreateUnreachable();
return FailBB;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ShadowStackGCLowering.cpp | //===-- ShadowStackGCLowering.cpp - Custom lowering for shadow-stack gc ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the custom lowering code required by the shadow-stack GC
// strategy.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
using namespace llvm;
#define DEBUG_TYPE "shadowstackgclowering"
namespace {
class ShadowStackGCLowering : public FunctionPass {
/// RootChain - This is the global linked-list that contains the chain of GC
/// roots.
GlobalVariable *Head;
/// StackEntryTy - Abstract type of a link in the shadow stack.
///
StructType *StackEntryTy;
StructType *FrameMapTy;
/// Roots - GC roots in the current function. Each is a pair of the
/// intrinsic call and its corresponding alloca.
std::vector<std::pair<CallInst *, AllocaInst *>> Roots;
public:
static char ID;
ShadowStackGCLowering();
bool doInitialization(Module &M) override;
bool runOnFunction(Function &F) override;
private:
bool IsNullValue(Value *V);
Constant *GetFrameMap(Function &F);
Type *GetConcreteStackEntryType(Function &F);
void CollectRoots(Function &F);
static GetElementPtrInst *CreateGEP(LLVMContext &Context, IRBuilder<> &B,
Type *Ty, Value *BasePtr, int Idx1,
const char *Name);
static GetElementPtrInst *CreateGEP(LLVMContext &Context, IRBuilder<> &B,
Type *Ty, Value *BasePtr, int Idx1, int Idx2,
const char *Name);
};
}
INITIALIZE_PASS_BEGIN(ShadowStackGCLowering, "shadow-stack-gc-lowering",
"Shadow Stack GC Lowering", false, false)
INITIALIZE_PASS_DEPENDENCY(GCModuleInfo)
INITIALIZE_PASS_END(ShadowStackGCLowering, "shadow-stack-gc-lowering",
"Shadow Stack GC Lowering", false, false)
FunctionPass *llvm::createShadowStackGCLoweringPass() { return new ShadowStackGCLowering(); }
char ShadowStackGCLowering::ID = 0;
ShadowStackGCLowering::ShadowStackGCLowering()
: FunctionPass(ID), Head(nullptr), StackEntryTy(nullptr),
FrameMapTy(nullptr) {
initializeShadowStackGCLoweringPass(*PassRegistry::getPassRegistry());
}
namespace {
/// EscapeEnumerator - This is a little algorithm to find all escape points
/// from a function so that "finally"-style code can be inserted. In addition
/// to finding the existing return and unwind instructions, it also (if
/// necessary) transforms any call instructions into invokes and sends them to
/// a landing pad.
///
/// It's wrapped up in a state machine using the same transform C# uses for
/// 'yield return' enumerators, This transform allows it to be non-allocating.
class EscapeEnumerator {
Function &F;
const char *CleanupBBName;
// State.
int State;
Function::iterator StateBB, StateE;
IRBuilder<> Builder;
public:
EscapeEnumerator(Function &F, const char *N = "cleanup")
: F(F), CleanupBBName(N), State(0), Builder(F.getContext()) {}
IRBuilder<> *Next() {
switch (State) {
default:
return nullptr;
case 0:
StateBB = F.begin();
StateE = F.end();
State = 1;
case 1:
// Find all 'return', 'resume', and 'unwind' instructions.
while (StateBB != StateE) {
BasicBlock *CurBB = StateBB++;
// Branches and invokes do not escape, only unwind, resume, and return
// do.
TerminatorInst *TI = CurBB->getTerminator();
if (!isa<ReturnInst>(TI) && !isa<ResumeInst>(TI))
continue;
Builder.SetInsertPoint(TI->getParent(), TI);
return &Builder;
}
State = 2;
// Find all 'call' instructions.
SmallVector<Instruction *, 16> Calls;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (BasicBlock::iterator II = BB->begin(), EE = BB->end(); II != EE;
++II)
if (CallInst *CI = dyn_cast<CallInst>(II))
if (!CI->getCalledFunction() ||
!CI->getCalledFunction()->getIntrinsicID())
Calls.push_back(CI);
if (Calls.empty())
return nullptr;
// Create a cleanup block.
LLVMContext &C = F.getContext();
BasicBlock *CleanupBB = BasicBlock::Create(C, CleanupBBName, &F);
Type *ExnTy =
StructType::get(Type::getInt8PtrTy(C), Type::getInt32Ty(C), nullptr);
if (!F.hasPersonalityFn()) {
Constant *PersFn = F.getParent()->getOrInsertFunction(
"__gcc_personality_v0",
FunctionType::get(Type::getInt32Ty(C), true));
F.setPersonalityFn(PersFn);
}
LandingPadInst *LPad =
LandingPadInst::Create(ExnTy, 1, "cleanup.lpad", CleanupBB);
LPad->setCleanup(true);
ResumeInst *RI = ResumeInst::Create(LPad, CleanupBB);
// Transform the 'call' instructions into 'invoke's branching to the
// cleanup block. Go in reverse order to make prettier BB names.
SmallVector<Value *, 16> Args;
for (unsigned I = Calls.size(); I != 0;) {
CallInst *CI = cast<CallInst>(Calls[--I]);
// Split the basic block containing the function call.
BasicBlock *CallBB = CI->getParent();
BasicBlock *NewBB =
CallBB->splitBasicBlock(CI, CallBB->getName() + ".cont");
// Remove the unconditional branch inserted at the end of CallBB.
CallBB->getInstList().pop_back();
NewBB->getInstList().remove(CI);
// Create a new invoke instruction.
Args.clear();
CallSite CS(CI);
Args.append(CS.arg_begin(), CS.arg_end());
InvokeInst *II =
InvokeInst::Create(CI->getCalledValue(), NewBB, CleanupBB, Args,
CI->getName(), CallBB);
II->setCallingConv(CI->getCallingConv());
II->setAttributes(CI->getAttributes());
CI->replaceAllUsesWith(II);
delete CI;
}
Builder.SetInsertPoint(RI->getParent(), RI);
return &Builder;
}
}
};
}
Constant *ShadowStackGCLowering::GetFrameMap(Function &F) {
// doInitialization creates the abstract type of this value.
Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
// Truncate the ShadowStackDescriptor if some metadata is null.
unsigned NumMeta = 0;
SmallVector<Constant *, 16> Metadata;
for (unsigned I = 0; I != Roots.size(); ++I) {
Constant *C = cast<Constant>(Roots[I].first->getArgOperand(1));
if (!C->isNullValue())
NumMeta = I + 1;
Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
}
Metadata.resize(NumMeta);
Type *Int32Ty = Type::getInt32Ty(F.getContext());
Constant *BaseElts[] = {
ConstantInt::get(Int32Ty, Roots.size(), false),
ConstantInt::get(Int32Ty, NumMeta, false),
};
Constant *DescriptorElts[] = {
ConstantStruct::get(FrameMapTy, BaseElts),
ConstantArray::get(ArrayType::get(VoidPtr, NumMeta), Metadata)};
Type *EltTys[] = {DescriptorElts[0]->getType(), DescriptorElts[1]->getType()};
StructType *STy = StructType::create(EltTys, "gc_map." + utostr(NumMeta));
Constant *FrameMap = ConstantStruct::get(STy, DescriptorElts);
// FIXME: Is this actually dangerous as WritingAnLLVMPass.html claims? Seems
// that, short of multithreaded LLVM, it should be safe; all that is
// necessary is that a simple Module::iterator loop not be invalidated.
// Appending to the GlobalVariable list is safe in that sense.
//
// All of the output passes emit globals last. The ExecutionEngine
// explicitly supports adding globals to the module after
// initialization.
//
// Still, if it isn't deemed acceptable, then this transformation needs
// to be a ModulePass (which means it cannot be in the 'llc' pipeline
// (which uses a FunctionPassManager (which segfaults (not asserts) if
// provided a ModulePass))).
Constant *GV = new GlobalVariable(*F.getParent(), FrameMap->getType(), true,
GlobalVariable::InternalLinkage, FrameMap,
"__gc_" + F.getName());
Constant *GEPIndices[2] = {
ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
ConstantInt::get(Type::getInt32Ty(F.getContext()), 0)};
return ConstantExpr::getGetElementPtr(FrameMap->getType(), GV, GEPIndices);
}
Type *ShadowStackGCLowering::GetConcreteStackEntryType(Function &F) {
// doInitialization creates the generic version of this type.
std::vector<Type *> EltTys;
EltTys.push_back(StackEntryTy);
for (size_t I = 0; I != Roots.size(); I++)
EltTys.push_back(Roots[I].second->getAllocatedType());
return StructType::create(EltTys, ("gc_stackentry." + F.getName()).str());
}
/// doInitialization - If this module uses the GC intrinsics, find them now. If
/// not, exit fast.
bool ShadowStackGCLowering::doInitialization(Module &M) {
bool Active = false;
for (Function &F : M) {
if (F.hasGC() && F.getGC() == std::string("shadow-stack")) {
Active = true;
break;
}
}
if (!Active)
return false;
// struct FrameMap {
// int32_t NumRoots; // Number of roots in stack frame.
// int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots.
// void *Meta[]; // May be absent for roots without metadata.
// };
std::vector<Type *> EltTys;
// 32 bits is ok up to a 32GB stack frame. :)
EltTys.push_back(Type::getInt32Ty(M.getContext()));
// Specifies length of variable length array.
EltTys.push_back(Type::getInt32Ty(M.getContext()));
FrameMapTy = StructType::create(EltTys, "gc_map");
PointerType *FrameMapPtrTy = PointerType::getUnqual(FrameMapTy);
// struct StackEntry {
// ShadowStackEntry *Next; // Caller's stack entry.
// FrameMap *Map; // Pointer to constant FrameMap.
// void *Roots[]; // Stack roots (in-place array, so we pretend).
// };
StackEntryTy = StructType::create(M.getContext(), "gc_stackentry");
EltTys.clear();
EltTys.push_back(PointerType::getUnqual(StackEntryTy));
EltTys.push_back(FrameMapPtrTy);
StackEntryTy->setBody(EltTys);
PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
// Get the root chain if it already exists.
Head = M.getGlobalVariable("llvm_gc_root_chain");
if (!Head) {
// If the root chain does not exist, insert a new one with linkonce
// linkage!
Head = new GlobalVariable(
M, StackEntryPtrTy, false, GlobalValue::LinkOnceAnyLinkage,
Constant::getNullValue(StackEntryPtrTy), "llvm_gc_root_chain");
} else if (Head->hasExternalLinkage() && Head->isDeclaration()) {
Head->setInitializer(Constant::getNullValue(StackEntryPtrTy));
Head->setLinkage(GlobalValue::LinkOnceAnyLinkage);
}
return true;
}
bool ShadowStackGCLowering::IsNullValue(Value *V) {
if (Constant *C = dyn_cast<Constant>(V))
return C->isNullValue();
return false;
}
void ShadowStackGCLowering::CollectRoots(Function &F) {
// FIXME: Account for original alignment. Could fragment the root array.
// Approach 1: Null initialize empty slots at runtime. Yuck.
// Approach 2: Emit a map of the array instead of just a count.
assert(Roots.empty() && "Not cleaned up?");
SmallVector<std::pair<CallInst *, AllocaInst *>, 16> MetaRoots;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
if (Function *F = CI->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::gcroot) {
std::pair<CallInst *, AllocaInst *> Pair = std::make_pair(
CI,
cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
if (IsNullValue(CI->getArgOperand(1)))
Roots.push_back(Pair);
else
MetaRoots.push_back(Pair);
}
// Number roots with metadata (usually empty) at the beginning, so that the
// FrameMap::Meta array can be elided.
Roots.insert(Roots.begin(), MetaRoots.begin(), MetaRoots.end());
}
GetElementPtrInst *ShadowStackGCLowering::CreateGEP(LLVMContext &Context,
IRBuilder<> &B, Type *Ty,
Value *BasePtr, int Idx,
int Idx2,
const char *Name) {
Value *Indices[] = {ConstantInt::get(Type::getInt32Ty(Context), 0),
ConstantInt::get(Type::getInt32Ty(Context), Idx),
ConstantInt::get(Type::getInt32Ty(Context), Idx2)};
Value *Val = B.CreateGEP(Ty, BasePtr, Indices, Name);
assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
return dyn_cast<GetElementPtrInst>(Val);
}
GetElementPtrInst *ShadowStackGCLowering::CreateGEP(LLVMContext &Context,
IRBuilder<> &B, Type *Ty, Value *BasePtr,
int Idx, const char *Name) {
Value *Indices[] = {ConstantInt::get(Type::getInt32Ty(Context), 0),
ConstantInt::get(Type::getInt32Ty(Context), Idx)};
Value *Val = B.CreateGEP(Ty, BasePtr, Indices, Name);
assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
return dyn_cast<GetElementPtrInst>(Val);
}
/// runOnFunction - Insert code to maintain the shadow stack.
bool ShadowStackGCLowering::runOnFunction(Function &F) {
// Quick exit for functions that do not use the shadow stack GC.
if (!F.hasGC() ||
F.getGC() != std::string("shadow-stack"))
return false;
LLVMContext &Context = F.getContext();
// Find calls to llvm.gcroot.
CollectRoots(F);
// If there are no roots in this function, then there is no need to add a
// stack map entry for it.
if (Roots.empty())
return false;
// Build the constant map and figure the type of the shadow stack entry.
Value *FrameMap = GetFrameMap(F);
Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
// Build the shadow stack entry at the very start of the function.
BasicBlock::iterator IP = F.getEntryBlock().begin();
IRBuilder<> AtEntry(IP->getParent(), IP);
Instruction *StackEntry =
AtEntry.CreateAlloca(ConcreteStackEntryTy, nullptr, "gc_frame");
while (isa<AllocaInst>(IP))
++IP;
AtEntry.SetInsertPoint(IP->getParent(), IP);
// Initialize the map pointer and load the current head of the shadow stack.
Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
StackEntry, 0, 1, "gc_frame.map");
AtEntry.CreateStore(FrameMap, EntryMapPtr);
// After all the allocas...
for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
// For each root, find the corresponding slot in the aggregate...
Value *SlotPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
StackEntry, 1 + I, "gc_root");
// And use it in lieu of the alloca.
AllocaInst *OriginalAlloca = Roots[I].second;
SlotPtr->takeName(OriginalAlloca);
OriginalAlloca->replaceAllUsesWith(SlotPtr);
}
// Move past the original stores inserted by GCStrategy::InitRoots. This isn't
// really necessary (the collector would never see the intermediate state at
// runtime), but it's nicer not to push the half-initialized entry onto the
// shadow stack.
while (isa<StoreInst>(IP))
++IP;
AtEntry.SetInsertPoint(IP->getParent(), IP);
// Push the entry onto the shadow stack.
Instruction *EntryNextPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
StackEntry, 0, 0, "gc_frame.next");
Instruction *NewHeadVal = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
StackEntry, 0, "gc_newhead");
AtEntry.CreateStore(CurrentHead, EntryNextPtr);
AtEntry.CreateStore(NewHeadVal, Head);
// For each instruction that escapes...
EscapeEnumerator EE(F, "gc_cleanup");
while (IRBuilder<> *AtExit = EE.Next()) {
// Pop the entry from the shadow stack. Don't reuse CurrentHead from
// AtEntry, since that would make the value live for the entire function.
Instruction *EntryNextPtr2 =
CreateGEP(Context, *AtExit, ConcreteStackEntryTy, StackEntry, 0, 0,
"gc_frame.next");
Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
AtExit->CreateStore(SavedHead, Head);
}
// Delete the original allocas (which are no longer used) and the intrinsic
// calls (which are no longer valid). Doing this last avoids invalidating
// iterators.
for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
Roots[I].first->eraseFromParent();
Roots[I].second->eraseFromParent();
}
Roots.clear();
return true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/BasicTargetTransformInfo.cpp | //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the implementation of a basic TargetTransformInfo pass
/// predicated on the target abstractions present in the target independent
/// code generator. It uses these (primarily TargetLowering) to model as much
/// of the TTI query interface as possible. It is included by most targets so
/// that they can specialize only a small subset of the query space.
///
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/TargetTransformInfoImpl.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "basictti"
// This flag is used by the template base class for BasicTTIImpl, and here to
// provide a definition.
cl::opt<unsigned>
llvm::PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0),
cl::desc("Threshold for partial unrolling"),
cl::Hidden);
BasicTTIImpl::BasicTTIImpl(const TargetMachine *TM, Function &F)
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/CriticalAntiDepBreaker.cpp | //===----- CriticalAntiDepBreaker.cpp - Anti-dep breaker -------- ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the CriticalAntiDepBreaker class, which
// implements register anti-dependence breaking along a blocks
// critical path during post-RA scheduler.
//
//===----------------------------------------------------------------------===//
#include "CriticalAntiDepBreaker.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "post-RA-sched"
CriticalAntiDepBreaker::CriticalAntiDepBreaker(MachineFunction &MFi,
const RegisterClassInfo &RCI)
: AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
TII(MF.getSubtarget().getInstrInfo()),
TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI),
Classes(TRI->getNumRegs(), nullptr), KillIndices(TRI->getNumRegs(), 0),
DefIndices(TRI->getNumRegs(), 0), KeepRegs(TRI->getNumRegs(), false) {}
CriticalAntiDepBreaker::~CriticalAntiDepBreaker() {
}
void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
const unsigned BBSize = BB->size();
for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
// Clear out the register class data.
Classes[i] = nullptr;
// Initialize the indices to indicate that no registers are live.
KillIndices[i] = ~0u;
DefIndices[i] = BBSize;
}
// Clear "do not change" set.
KeepRegs.reset();
bool IsReturnBlock = (BBSize != 0 && BB->back().isReturn());
// Examine the live-in regs of all successors.
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
unsigned Reg = *AI;
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
KillIndices[Reg] = BBSize;
DefIndices[Reg] = ~0u;
}
}
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
// callee-saved register that is not saved in the prolog.
const MachineFrameInfo *MFI = MF.getFrameInfo();
BitVector Pristine = MFI->getPristineRegs(MF);
for (const MCPhysReg *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
if (!IsReturnBlock && !Pristine.test(*I)) continue;
for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
unsigned Reg = *AI;
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
KillIndices[Reg] = BBSize;
DefIndices[Reg] = ~0u;
}
}
}
void CriticalAntiDepBreaker::FinishBlock() {
RegRefs.clear();
KeepRegs.reset();
}
void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
unsigned InsertPosIndex) {
// Kill instructions can define registers but are really nops, and there might
// be a real definition earlier that needs to be paired with uses dominated by
// this kill.
// FIXME: It may be possible to remove the isKill() restriction once PR18663
// has been properly fixed. There can be value in processing kills as seen in
// the AggressiveAntiDepBreaker class.
if (MI->isDebugValue() || MI->isKill())
return;
assert(Count < InsertPosIndex && "Instruction index out of expected range!");
for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
if (KillIndices[Reg] != ~0u) {
// If Reg is currently live, then mark that it can't be renamed as
// we don't know the extent of its live-range anymore (now that it
// has been scheduled).
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
KillIndices[Reg] = Count;
} else if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
// Any register which was defined within the previous scheduling region
// may have been rescheduled and its lifetime may overlap with registers
// in ways not reflected in our current liveness state. For each such
// register, adjust the liveness state to be conservatively correct.
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
// Move the def index to the end of the previous region, to reflect
// that the def could theoretically have been scheduled at the end.
DefIndices[Reg] = InsertPosIndex;
}
}
PrescanInstruction(MI);
ScanInstruction(MI, Count);
}
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static const SDep *CriticalPathStep(const SUnit *SU) {
const SDep *Next = nullptr;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
const SUnit *PredSU = P->getSUnit();
unsigned PredLatency = P->getLatency();
unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
// In the case of a latency tie, prefer an anti-dependency edge over
// other types of edges.
if (NextDepth < PredTotalLatency ||
(NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
NextDepth = PredTotalLatency;
Next = &*P;
}
}
return Next;
}
void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
// It's not safe to change register allocation for source operands of
// instructions that have special allocation requirements. Also assume all
// registers used in a call must not be changed (ABI).
// FIXME: The issue with predicated instruction is more complex. We are being
// conservative here because the kill markers cannot be trusted after
// if-conversion:
// %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
// ...
// STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
// %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
// STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
//
// The first R6 kill is not really a kill since it's killed by a predicated
// instruction which may not be executed. The second R6 def may or may not
// re-define R6 so it's not safe to change it since the last R6 use cannot be
// changed.
bool Special = MI->isCall() ||
MI->hasExtraSrcRegAllocReq() ||
TII->isPredicated(MI);
// Scan the register operands for this instruction and update
// Classes and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
const TargetRegisterClass *NewRC = nullptr;
if (i < MI->getDesc().getNumOperands())
NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
if (!Classes[Reg] && NewRC)
Classes[Reg] = NewRC;
else if (!NewRC || Classes[Reg] != NewRC)
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
// Now check for aliases.
for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
// If an alias of the reg is used during the live range, give up.
// Note that this allows us to skip checking if AntiDepReg
// overlaps with any of the aliases, among other things.
unsigned AliasReg = *AI;
if (Classes[AliasReg]) {
Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
}
}
// If we're still willing to consider this register, note the reference.
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
// If this reg is tied and live (Classes[Reg] is set to -1), we can't change
// it or any of its sub or super regs. We need to use KeepRegs to mark the
// reg because not all uses of the same reg within an instruction are
// necessarily tagged as tied.
// Example: an x86 "xor %eax, %eax" will have one source operand tied to the
// def register but not the second (see PR20020 for details).
// FIXME: can this check be relaxed to account for undef uses
// of a register? In the above 'xor' example, the uses of %eax are undef, so
// earlier instructions could still replace %eax even though the 'xor'
// itself can't be changed.
if (MI->isRegTiedToUseOperand(i) &&
Classes[Reg] == reinterpret_cast<TargetRegisterClass *>(-1)) {
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs) {
KeepRegs.set(*SubRegs);
}
for (MCSuperRegIterator SuperRegs(Reg, TRI);
SuperRegs.isValid(); ++SuperRegs) {
KeepRegs.set(*SuperRegs);
}
}
if (MO.isUse() && Special) {
if (!KeepRegs.test(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
KeepRegs.set(*SubRegs);
}
}
}
}
void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
unsigned Count) {
// Update liveness.
// Proceeding upwards, registers that are defed but not used in this
// instruction are now dead.
assert(!MI->isKill() && "Attempting to scan a kill instruction");
if (!TII->isPredicated(MI)) {
// Predicated defs are modeled as read + write, i.e. similar to two
// address updates.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask())
for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i)
if (MO.clobbersPhysReg(i)) {
DefIndices[i] = Count;
KillIndices[i] = ~0u;
KeepRegs.reset(i);
Classes[i] = nullptr;
RegRefs.erase(i);
}
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isDef()) continue;
// If we've already marked this reg as unchangeable, carry on.
if (KeepRegs.test(Reg)) continue;
// Ignore two-addr defs.
if (MI->isRegTiedToUseOperand(i)) continue;
// For the reg itself and all subregs: update the def to current;
// reset the kill state, any restrictions, and references.
for (MCSubRegIterator SRI(Reg, TRI, true); SRI.isValid(); ++SRI) {
unsigned SubregReg = *SRI;
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
KeepRegs.reset(SubregReg);
Classes[SubregReg] = nullptr;
RegRefs.erase(SubregReg);
}
// Conservatively mark super-registers as unusable.
for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
Classes[*SR] = reinterpret_cast<TargetRegisterClass *>(-1);
}
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isUse()) continue;
const TargetRegisterClass *NewRC = nullptr;
if (i < MI->getDesc().getNumOperands())
NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
if (!Classes[Reg] && NewRC)
Classes[Reg] = NewRC;
else if (!NewRC || Classes[Reg] != NewRC)
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
RegRefs.insert(std::make_pair(Reg, &MO));
// It wasn't previously live but now it is, this is a kill.
// Repeat for all aliases.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
unsigned AliasReg = *AI;
if (KillIndices[AliasReg] == ~0u) {
KillIndices[AliasReg] = Count;
DefIndices[AliasReg] = ~0u;
}
}
}
}
// Check all machine operands that reference the antidependent register and must
// be replaced by NewReg. Return true if any of their parent instructions may
// clobber the new register.
//
// Note: AntiDepReg may be referenced by a two-address instruction such that
// it's use operand is tied to a def operand. We guard against the case in which
// the two-address instruction also defines NewReg, as may happen with
// pre/postincrement loads. In this case, both the use and def operands are in
// RegRefs because the def is inserted by PrescanInstruction and not erased
// during ScanInstruction. So checking for an instruction with definitions of
// both NewReg and AntiDepReg covers it.
bool
CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
RegRefIter RegRefEnd,
unsigned NewReg)
{
for (RegRefIter I = RegRefBegin; I != RegRefEnd; ++I ) {
MachineOperand *RefOper = I->second;
// Don't allow the instruction defining AntiDepReg to earlyclobber its
// operands, in case they may be assigned to NewReg. In this case antidep
// breaking must fail, but it's too rare to bother optimizing.
if (RefOper->isDef() && RefOper->isEarlyClobber())
return true;
// Handle cases in which this instruction defines NewReg.
MachineInstr *MI = RefOper->getParent();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &CheckOper = MI->getOperand(i);
if (CheckOper.isRegMask() && CheckOper.clobbersPhysReg(NewReg))
return true;
if (!CheckOper.isReg() || !CheckOper.isDef() ||
CheckOper.getReg() != NewReg)
continue;
// Don't allow the instruction to define NewReg and AntiDepReg.
// When AntiDepReg is renamed it will be an illegal op.
if (RefOper->isDef())
return true;
// Don't allow an instruction using AntiDepReg to be earlyclobbered by
// NewReg.
if (CheckOper.isEarlyClobber())
return true;
// Don't allow inline asm to define NewReg at all. Who knows what it's
// doing with it.
if (MI->isInlineAsm())
return true;
}
}
return false;
}
unsigned CriticalAntiDepBreaker::
findSuitableFreeRegister(RegRefIter RegRefBegin,
RegRefIter RegRefEnd,
unsigned AntiDepReg,
unsigned LastNewReg,
const TargetRegisterClass *RC,
SmallVectorImpl<unsigned> &Forbid)
{
ArrayRef<MCPhysReg> Order = RegClassInfo.getOrder(RC);
for (unsigned i = 0; i != Order.size(); ++i) {
unsigned NewReg = Order[i];
// Don't replace a register with itself.
if (NewReg == AntiDepReg) continue;
// Don't replace a register with one that was recently used to repair
// an anti-dependence with this AntiDepReg, because that would
// re-introduce that anti-dependence.
if (NewReg == LastNewReg) continue;
// If any instructions that define AntiDepReg also define the NewReg, it's
// not suitable. For example, Instruction with multiple definitions can
// result in this condition.
if (isNewRegClobberedByRefs(RegRefBegin, RegRefEnd, NewReg)) continue;
// If NewReg is dead and NewReg's most recent def is not before
// AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u))
&& "Kill and Def maps aren't consistent for AntiDepReg!");
assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u))
&& "Kill and Def maps aren't consistent for NewReg!");
if (KillIndices[NewReg] != ~0u ||
Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
KillIndices[AntiDepReg] > DefIndices[NewReg])
continue;
// If NewReg overlaps any of the forbidden registers, we can't use it.
bool Forbidden = false;
for (SmallVectorImpl<unsigned>::iterator it = Forbid.begin(),
ite = Forbid.end(); it != ite; ++it)
if (TRI->regsOverlap(NewReg, *it)) {
Forbidden = true;
break;
}
if (Forbidden) continue;
return NewReg;
}
// No registers are free and available!
return 0;
}
unsigned CriticalAntiDepBreaker::
BreakAntiDependencies(const std::vector<SUnit>& SUnits,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
DbgValueVector &DbgValues) {
// The code below assumes that there is at least one instruction,
// so just duck out immediately if the block is empty.
if (SUnits.empty()) return 0;
// Keep a map of the MachineInstr*'s back to the SUnit representing them.
// This is used for updating debug information.
//
// FIXME: Replace this with the existing map in ScheduleDAGInstrs::MISUnitMap
DenseMap<MachineInstr*,const SUnit*> MISUnitMap;
// Find the node at the bottom of the critical path.
const SUnit *Max = nullptr;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
MISUnitMap[SU->getInstr()] = SU;
if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
Max = SU;
}
#ifndef NDEBUG
{
DEBUG(dbgs() << "Critical path has total latency "
<< (Max->getDepth() + Max->Latency) << "\n");
DEBUG(dbgs() << "Available regs:");
for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
if (KillIndices[Reg] == ~0u)
DEBUG(dbgs() << " " << TRI->getName(Reg));
}
DEBUG(dbgs() << '\n');
}
#endif
// Track progress along the critical path through the SUnit graph as we walk
// the instructions.
const SUnit *CriticalPathSU = Max;
MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
// Consider this pattern:
// A = ...
// ... = A
// A = ...
// ... = A
// A = ...
// ... = A
// A = ...
// ... = A
// There are three anti-dependencies here, and without special care,
// we'd break all of them using the same register:
// A = ...
// ... = A
// B = ...
// ... = B
// B = ...
// ... = B
// B = ...
// ... = B
// because at each anti-dependence, B is the first register that
// isn't A which is free. This re-introduces anti-dependencies
// at all but one of the original anti-dependencies that we were
// trying to break. To avoid this, keep track of the most recent
// register that each register was replaced with, avoid
// using it to repair an anti-dependence on the same register.
// This lets us produce this:
// A = ...
// ... = A
// B = ...
// ... = B
// C = ...
// ... = C
// B = ...
// ... = B
// This still has an anti-dependence on B, but at least it isn't on the
// original critical path.
//
// TODO: If we tracked more than one register here, we could potentially
// fix that remaining critical edge too. This is a little more involved,
// because unlike the most recent register, less recent registers should
// still be considered, though only if no other registers are available.
std::vector<unsigned> LastNewReg(TRI->getNumRegs(), 0);
// Attempt to break anti-dependence edges on the critical path. Walk the
// instructions from the bottom up, tracking information about liveness
// as we go to help determine which registers are available.
unsigned Broken = 0;
unsigned Count = InsertPosIndex - 1;
for (MachineBasicBlock::iterator I = End, E = Begin; I != E; --Count) {
MachineInstr *MI = --I;
// Kill instructions can define registers but are really nops, and there
// might be a real definition earlier that needs to be paired with uses
// dominated by this kill.
// FIXME: It may be possible to remove the isKill() restriction once PR18663
// has been properly fixed. There can be value in processing kills as seen
// in the AggressiveAntiDepBreaker class.
if (MI->isDebugValue() || MI->isKill())
continue;
// Check if this instruction has a dependence on the critical path that
// is an anti-dependence that we may be able to break. If it is, set
// AntiDepReg to the non-zero register associated with the anti-dependence.
//
// We limit our attention to the critical path as a heuristic to avoid
// breaking anti-dependence edges that aren't going to significantly
// impact the overall schedule. There are a limited number of registers
// and we want to save them for the important edges.
//
// TODO: Instructions with multiple defs could have multiple
// anti-dependencies. The current code here only knows how to break one
// edge per instruction. Note that we'd have to be able to break all of
// the anti-dependencies in an instruction in order to be effective.
unsigned AntiDepReg = 0;
if (MI == CriticalPathMI) {
if (const SDep *Edge = CriticalPathStep(CriticalPathSU)) {
const SUnit *NextSU = Edge->getSUnit();
// Only consider anti-dependence edges.
if (Edge->getKind() == SDep::Anti) {
AntiDepReg = Edge->getReg();
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
if (!MRI.isAllocatable(AntiDepReg))
// Don't break anti-dependencies on non-allocatable registers.
AntiDepReg = 0;
else if (KeepRegs.test(AntiDepReg))
// Don't break anti-dependencies if a use down below requires
// this exact register.
AntiDepReg = 0;
else {
// If the SUnit has other dependencies on the SUnit that it
// anti-depends on, don't bother breaking the anti-dependency
// since those edges would prevent such units from being
// scheduled past each other regardless.
//
// Also, if there are dependencies on other SUnits with the
// same register as the anti-dependency, don't attempt to
// break it.
for (SUnit::const_pred_iterator P = CriticalPathSU->Preds.begin(),
PE = CriticalPathSU->Preds.end(); P != PE; ++P)
if (P->getSUnit() == NextSU ?
(P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
(P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
AntiDepReg = 0;
break;
}
}
}
CriticalPathSU = NextSU;
CriticalPathMI = CriticalPathSU->getInstr();
} else {
// We've reached the end of the critical path.
CriticalPathSU = nullptr;
CriticalPathMI = nullptr;
}
}
PrescanInstruction(MI);
SmallVector<unsigned, 2> ForbidRegs;
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
if (MI->isCall() || MI->hasExtraDefRegAllocReq() || TII->isPredicated(MI))
// If this instruction's defs have special allocation requirement, don't
// break this anti-dependency.
AntiDepReg = 0;
else if (AntiDepReg) {
// If this instruction has a use of AntiDepReg, breaking it
// is invalid. If the instruction defines other registers,
// save a list of them so that we don't pick a new register
// that overlaps any of them.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) {
AntiDepReg = 0;
break;
}
if (MO.isDef() && Reg != AntiDepReg)
ForbidRegs.push_back(Reg);
}
}
// Determine AntiDepReg's register class, if it is live and is
// consistently used within a single class.
const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg]
: nullptr;
assert((AntiDepReg == 0 || RC != nullptr) &&
"Register should be live if it's causing an anti-dependence!");
if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
AntiDepReg = 0;
// Look for a suitable register to use to break the anti-dependence.
//
// TODO: Instead of picking the first free register, consider which might
// be the best.
if (AntiDepReg != 0) {
std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
std::multimap<unsigned, MachineOperand *>::iterator>
Range = RegRefs.equal_range(AntiDepReg);
if (unsigned NewReg = findSuitableFreeRegister(Range.first, Range.second,
AntiDepReg,
LastNewReg[AntiDepReg],
RC, ForbidRegs)) {
DEBUG(dbgs() << "Breaking anti-dependence edge on "
<< TRI->getName(AntiDepReg)
<< " with " << RegRefs.count(AntiDepReg) << " references"
<< " using " << TRI->getName(NewReg) << "!\n");
// Update the references to the old register to refer to the new
// register.
for (std::multimap<unsigned, MachineOperand *>::iterator
Q = Range.first, QE = Range.second; Q != QE; ++Q) {
Q->second->setReg(NewReg);
// If the SU for the instruction being updated has debug information
// related to the anti-dependency register, make sure to update that
// as well.
const SUnit *SU = MISUnitMap[Q->second->getParent()];
if (!SU) continue;
for (DbgValueVector::iterator DVI = DbgValues.begin(),
DVE = DbgValues.end(); DVI != DVE; ++DVI)
if (DVI->second == Q->second->getParent())
UpdateDbgValue(DVI->first, AntiDepReg, NewReg);
}
// We just went back in time and modified history; the
// liveness information for the anti-dependence reg is now
// inconsistent. Set the state as if it were dead.
Classes[NewReg] = Classes[AntiDepReg];
DefIndices[NewReg] = DefIndices[AntiDepReg];
KillIndices[NewReg] = KillIndices[AntiDepReg];
assert(((KillIndices[NewReg] == ~0u) !=
(DefIndices[NewReg] == ~0u)) &&
"Kill and Def maps aren't consistent for NewReg!");
Classes[AntiDepReg] = nullptr;
DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
KillIndices[AntiDepReg] = ~0u;
assert(((KillIndices[AntiDepReg] == ~0u) !=
(DefIndices[AntiDepReg] == ~0u)) &&
"Kill and Def maps aren't consistent for AntiDepReg!");
RegRefs.erase(AntiDepReg);
LastNewReg[AntiDepReg] = NewReg;
++Broken;
}
}
ScanInstruction(MI, Count);
}
return Broken;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveRangeEdit.cpp | //===-- LiveRangeEdit.cpp - Basic tools for editing a register live range -===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The LiveRangeEdit class represents changes done to a virtual register when it
// is spilled or split.
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumDCEDeleted, "Number of instructions deleted by DCE");
STATISTIC(NumDCEFoldedLoads, "Number of single use loads folded after DCE");
STATISTIC(NumFracRanges, "Number of live ranges fractured by DCE");
void LiveRangeEdit::Delegate::anchor() { }
LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(unsigned OldReg) {
unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
if (VRM) {
VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg));
}
LiveInterval &LI = LIS.createEmptyInterval(VReg);
return LI;
}
unsigned LiveRangeEdit::createFrom(unsigned OldReg) {
unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
if (VRM) {
VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg));
}
return VReg;
}
bool LiveRangeEdit::checkRematerializable(VNInfo *VNI,
const MachineInstr *DefMI,
AliasAnalysis *aa) {
assert(DefMI && "Missing instruction");
ScannedRemattable = true;
if (!TII.isTriviallyReMaterializable(DefMI, aa))
return false;
Remattable.insert(VNI);
return true;
}
void LiveRangeEdit::scanRemattable(AliasAnalysis *aa) {
for (VNInfo *VNI : getParent().valnos) {
if (VNI->isUnused())
continue;
MachineInstr *DefMI = LIS.getInstructionFromIndex(VNI->def);
if (!DefMI)
continue;
checkRematerializable(VNI, DefMI, aa);
}
ScannedRemattable = true;
}
bool LiveRangeEdit::anyRematerializable(AliasAnalysis *aa) {
if (!ScannedRemattable)
scanRemattable(aa);
return !Remattable.empty();
}
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
/// OrigIdx are also available with the same value at UseIdx.
bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
SlotIndex OrigIdx,
SlotIndex UseIdx) const {
OrigIdx = OrigIdx.getRegSlot(true);
UseIdx = UseIdx.getRegSlot(true);
for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = OrigMI->getOperand(i);
if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
continue;
// We can't remat physreg uses, unless it is a constant.
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
if (MRI.isConstantPhysReg(MO.getReg(), *OrigMI->getParent()->getParent()))
continue;
return false;
}
LiveInterval &li = LIS.getInterval(MO.getReg());
const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
if (!OVNI)
continue;
// Don't allow rematerialization immediately after the original def.
// It would be incorrect if OrigMI redefines the register.
// See PR14098.
if (SlotIndex::isSameInstr(OrigIdx, UseIdx))
return false;
if (OVNI != li.getVNInfoAt(UseIdx))
return false;
}
return true;
}
bool LiveRangeEdit::canRematerializeAt(Remat &RM,
SlotIndex UseIdx,
bool cheapAsAMove) {
assert(ScannedRemattable && "Call anyRematerializable first");
// Use scanRemattable info.
if (!Remattable.count(RM.ParentVNI))
return false;
// No defining instruction provided.
SlotIndex DefIdx;
if (RM.OrigMI)
DefIdx = LIS.getInstructionIndex(RM.OrigMI);
else {
DefIdx = RM.ParentVNI->def;
RM.OrigMI = LIS.getInstructionFromIndex(DefIdx);
assert(RM.OrigMI && "No defining instruction for remattable value");
}
// If only cheap remats were requested, bail out early.
if (cheapAsAMove && !TII.isAsCheapAsAMove(RM.OrigMI))
return false;
// Verify that all used registers are available with the same values.
if (!allUsesAvailableAt(RM.OrigMI, DefIdx, UseIdx))
return false;
return true;
}
SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg,
const Remat &RM,
const TargetRegisterInfo &tri,
bool Late) {
assert(RM.OrigMI && "Invalid remat");
TII.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri);
Rematted.insert(RM.ParentVNI);
return LIS.getSlotIndexes()->insertMachineInstrInMaps(--MI, Late)
.getRegSlot();
}
void LiveRangeEdit::eraseVirtReg(unsigned Reg) {
if (TheDelegate && TheDelegate->LRE_CanEraseVirtReg(Reg))
LIS.removeInterval(Reg);
}
bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
SmallVectorImpl<MachineInstr*> &Dead) {
MachineInstr *DefMI = nullptr, *UseMI = nullptr;
// Check that there is a single def and a single use.
for (MachineOperand &MO : MRI.reg_nodbg_operands(LI->reg)) {
MachineInstr *MI = MO.getParent();
if (MO.isDef()) {
if (DefMI && DefMI != MI)
return false;
if (!MI->canFoldAsLoad())
return false;
DefMI = MI;
} else if (!MO.isUndef()) {
if (UseMI && UseMI != MI)
return false;
// FIXME: Targets don't know how to fold subreg uses.
if (MO.getSubReg())
return false;
UseMI = MI;
}
}
if (!DefMI || !UseMI)
return false;
// Since we're moving the DefMI load, make sure we're not extending any live
// ranges.
if (!allUsesAvailableAt(DefMI,
LIS.getInstructionIndex(DefMI),
LIS.getInstructionIndex(UseMI)))
return false;
// We also need to make sure it is safe to move the load.
// Assume there are stores between DefMI and UseMI.
bool SawStore = true;
if (!DefMI->isSafeToMove(nullptr, SawStore))
return false;
DEBUG(dbgs() << "Try to fold single def: " << *DefMI
<< " into single use: " << *UseMI);
SmallVector<unsigned, 8> Ops;
if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second)
return false;
MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI);
if (!FoldMI)
return false;
DEBUG(dbgs() << " folded: " << *FoldMI);
LIS.ReplaceMachineInstrInMaps(UseMI, FoldMI);
UseMI->eraseFromParent();
DefMI->addRegisterDead(LI->reg, nullptr);
Dead.push_back(DefMI);
++NumDCEFoldedLoads;
return true;
}
bool LiveRangeEdit::useIsKill(const LiveInterval &LI,
const MachineOperand &MO) const {
const MachineInstr *MI = MO.getParent();
SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
if (LI.Query(Idx).isKill())
return true;
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
unsigned SubReg = MO.getSubReg();
unsigned LaneMask = TRI.getSubRegIndexLaneMask(SubReg);
for (const LiveInterval::SubRange &S : LI.subranges()) {
if ((S.LaneMask & LaneMask) != 0 && S.Query(Idx).isKill())
return true;
}
return false;
}
/// Find all live intervals that need to shrink, then remove the instruction.
void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) {
assert(MI->allDefsAreDead() && "Def isn't really dead");
SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
// Never delete a bundled instruction.
if (MI->isBundled()) {
return;
}
// Never delete inline asm.
if (MI->isInlineAsm()) {
DEBUG(dbgs() << "Won't delete: " << Idx << '\t' << *MI);
return;
}
// Use the same criteria as DeadMachineInstructionElim.
bool SawStore = false;
if (!MI->isSafeToMove(nullptr, SawStore)) {
DEBUG(dbgs() << "Can't delete: " << Idx << '\t' << *MI);
return;
}
DEBUG(dbgs() << "Deleting dead def " << Idx << '\t' << *MI);
// Collect virtual registers to be erased after MI is gone.
SmallVector<unsigned, 8> RegsToErase;
bool ReadsPhysRegs = false;
// Check for live intervals that may shrink
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
if (!MOI->isReg())
continue;
unsigned Reg = MOI->getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
// Check if MI reads any unreserved physregs.
if (Reg && MOI->readsReg() && !MRI.isReserved(Reg))
ReadsPhysRegs = true;
else if (MOI->isDef())
LIS.removePhysRegDefAt(Reg, Idx);
continue;
}
LiveInterval &LI = LIS.getInterval(Reg);
// Shrink read registers, unless it is likely to be expensive and
// unlikely to change anything. We typically don't want to shrink the
// PIC base register that has lots of uses everywhere.
// Always shrink COPY uses that probably come from live range splitting.
if ((MI->readsVirtualRegister(Reg) && (MI->isCopy() || MOI->isDef())) ||
(MOI->readsReg() && (MRI.hasOneNonDBGUse(Reg) || useIsKill(LI, *MOI))))
ToShrink.insert(&LI);
// Remove defined value.
if (MOI->isDef()) {
if (TheDelegate && LI.getVNInfoAt(Idx) != nullptr)
TheDelegate->LRE_WillShrinkVirtReg(LI.reg);
LIS.removeVRegDefAt(LI, Idx);
if (LI.empty())
RegsToErase.push_back(Reg);
}
}
// Currently, we don't support DCE of physreg live ranges. If MI reads
// any unreserved physregs, don't erase the instruction, but turn it into
// a KILL instead. This way, the physreg live ranges don't end up
// dangling.
// FIXME: It would be better to have something like shrinkToUses() for
// physregs. That could potentially enable more DCE and it would free up
// the physreg. It would not happen often, though.
if (ReadsPhysRegs) {
MI->setDesc(TII.get(TargetOpcode::KILL));
// Remove all operands that aren't physregs.
for (unsigned i = MI->getNumOperands(); i; --i) {
const MachineOperand &MO = MI->getOperand(i-1);
if (MO.isReg() && TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
continue;
MI->RemoveOperand(i-1);
}
DEBUG(dbgs() << "Converted physregs to:\t" << *MI);
} else {
if (TheDelegate)
TheDelegate->LRE_WillEraseInstruction(MI);
LIS.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
++NumDCEDeleted;
}
// Erase any virtregs that are now empty and unused. There may be <undef>
// uses around. Keep the empty live range in that case.
for (unsigned i = 0, e = RegsToErase.size(); i != e; ++i) {
unsigned Reg = RegsToErase[i];
if (LIS.hasInterval(Reg) && MRI.reg_nodbg_empty(Reg)) {
ToShrink.remove(&LIS.getInterval(Reg));
eraseVirtReg(Reg);
}
}
}
void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
ArrayRef<unsigned> RegsBeingSpilled) {
ToShrinkSet ToShrink;
for (;;) {
// Erase all dead defs.
while (!Dead.empty())
eliminateDeadDef(Dead.pop_back_val(), ToShrink);
if (ToShrink.empty())
break;
// Shrink just one live interval. Then delete new dead defs.
LiveInterval *LI = ToShrink.back();
ToShrink.pop_back();
if (foldAsLoad(LI, Dead))
continue;
if (TheDelegate)
TheDelegate->LRE_WillShrinkVirtReg(LI->reg);
if (!LIS.shrinkToUses(LI, &Dead))
continue;
// Don't create new intervals for a register being spilled.
// The new intervals would have to be spilled anyway so its not worth it.
// Also they currently aren't spilled so creating them and not spilling
// them results in incorrect code.
bool BeingSpilled = false;
for (unsigned i = 0, e = RegsBeingSpilled.size(); i != e; ++i) {
if (LI->reg == RegsBeingSpilled[i]) {
BeingSpilled = true;
break;
}
}
if (BeingSpilled) continue;
// LI may have been separated, create new intervals.
LI->RenumberValues();
ConnectedVNInfoEqClasses ConEQ(LIS);
unsigned NumComp = ConEQ.Classify(LI);
if (NumComp <= 1)
continue;
++NumFracRanges;
bool IsOriginal = VRM && VRM->getOriginal(LI->reg) == LI->reg;
DEBUG(dbgs() << NumComp << " components: " << *LI << '\n');
SmallVector<LiveInterval*, 8> Dups(1, LI);
for (unsigned i = 1; i != NumComp; ++i) {
Dups.push_back(&createEmptyIntervalFrom(LI->reg));
// If LI is an original interval that hasn't been split yet, make the new
// intervals their own originals instead of referring to LI. The original
// interval must contain all the split products, and LI doesn't.
if (IsOriginal)
VRM->setIsSplitFromReg(Dups.back()->reg, 0);
if (TheDelegate)
TheDelegate->LRE_DidCloneVirtReg(Dups.back()->reg, LI->reg);
}
ConEQ.Distribute(&Dups[0], MRI);
DEBUG({
for (unsigned i = 0; i != NumComp; ++i)
dbgs() << '\t' << *Dups[i] << '\n';
});
}
}
// Keep track of new virtual registers created via
// MachineRegisterInfo::createVirtualRegister.
void
LiveRangeEdit::MRI_NoteNewVirtualRegister(unsigned VReg)
{
if (VRM)
VRM->grow();
NewRegs.push_back(VReg);
}
void
LiveRangeEdit::calculateRegClassAndHint(MachineFunction &MF,
const MachineLoopInfo &Loops,
const MachineBlockFrequencyInfo &MBFI) {
VirtRegAuxInfo VRAI(MF, LIS, Loops, MBFI);
for (unsigned I = 0, Size = size(); I < Size; ++I) {
LiveInterval &LI = LIS.getInterval(get(I));
if (MRI.recomputeRegClass(LI.reg))
DEBUG({
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
dbgs() << "Inflated " << PrintReg(LI.reg) << " to "
<< TRI->getRegClassName(MRI.getRegClass(LI.reg)) << '\n';
});
VRAI.calculateSpillWeightAndHint(LI);
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/SplitKit.cpp | //===---------- SplitKit.cpp - Toolkit for splitting live ranges ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the SplitAnalysis class as well as mutator functions for
// live range splitting.
//
//===----------------------------------------------------------------------===//
#include "SplitKit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumFinished, "Number of splits finished");
STATISTIC(NumSimple, "Number of splits that were simple");
STATISTIC(NumCopies, "Number of copies inserted for splitting");
STATISTIC(NumRemats, "Number of rematerialized defs for splitting");
STATISTIC(NumRepairs, "Number of invalid live ranges repaired");
//===----------------------------------------------------------------------===//
// Split Analysis
//===----------------------------------------------------------------------===//
SplitAnalysis::SplitAnalysis(const VirtRegMap &vrm, const LiveIntervals &lis,
const MachineLoopInfo &mli)
: MF(vrm.getMachineFunction()), VRM(vrm), LIS(lis), Loops(mli),
TII(*MF.getSubtarget().getInstrInfo()), CurLI(nullptr),
LastSplitPoint(MF.getNumBlockIDs()) {}
void SplitAnalysis::clear() {
UseSlots.clear();
UseBlocks.clear();
ThroughBlocks.clear();
CurLI = nullptr;
DidRepairRange = false;
}
SlotIndex SplitAnalysis::computeLastSplitPoint(unsigned Num) {
const MachineBasicBlock *MBB = MF.getBlockNumbered(Num);
const MachineBasicBlock *LPad = MBB->getLandingPadSuccessor();
std::pair<SlotIndex, SlotIndex> &LSP = LastSplitPoint[Num];
SlotIndex MBBEnd = LIS.getMBBEndIdx(MBB);
// Compute split points on the first call. The pair is independent of the
// current live interval.
if (!LSP.first.isValid()) {
MachineBasicBlock::const_iterator FirstTerm = MBB->getFirstTerminator();
if (FirstTerm == MBB->end())
LSP.first = MBBEnd;
else
LSP.first = LIS.getInstructionIndex(FirstTerm);
// If there is a landing pad successor, also find the call instruction.
if (!LPad)
return LSP.first;
// There may not be a call instruction (?) in which case we ignore LPad.
LSP.second = LSP.first;
for (MachineBasicBlock::const_iterator I = MBB->end(), E = MBB->begin();
I != E;) {
--I;
if (I->isCall()) {
LSP.second = LIS.getInstructionIndex(I);
break;
}
}
}
// If CurLI is live into a landing pad successor, move the last split point
// back to the call that may throw.
if (!LPad || !LSP.second || !LIS.isLiveInToMBB(*CurLI, LPad))
return LSP.first;
// Find the value leaving MBB.
const VNInfo *VNI = CurLI->getVNInfoBefore(MBBEnd);
if (!VNI)
return LSP.first;
// If the value leaving MBB was defined after the call in MBB, it can't
// really be live-in to the landing pad. This can happen if the landing pad
// has a PHI, and this register is undef on the exceptional edge.
// <rdar://problem/10664933>
if (!SlotIndex::isEarlierInstr(VNI->def, LSP.second) && VNI->def < MBBEnd)
return LSP.first;
// Value is properly live-in to the landing pad.
// Only allow splits before the call.
return LSP.second;
}
MachineBasicBlock::iterator
SplitAnalysis::getLastSplitPointIter(MachineBasicBlock *MBB) {
SlotIndex LSP = getLastSplitPoint(MBB->getNumber());
if (LSP == LIS.getMBBEndIdx(MBB))
return MBB->end();
return LIS.getInstructionFromIndex(LSP);
}
/// analyzeUses - Count instructions, basic blocks, and loops using CurLI.
void SplitAnalysis::analyzeUses() {
assert(UseSlots.empty() && "Call clear first");
// First get all the defs from the interval values. This provides the correct
// slots for early clobbers.
for (const VNInfo *VNI : CurLI->valnos)
if (!VNI->isPHIDef() && !VNI->isUnused())
UseSlots.push_back(VNI->def);
// Get use slots form the use-def chain.
const MachineRegisterInfo &MRI = MF.getRegInfo();
for (MachineOperand &MO : MRI.use_nodbg_operands(CurLI->reg))
if (!MO.isUndef())
UseSlots.push_back(LIS.getInstructionIndex(MO.getParent()).getRegSlot());
array_pod_sort(UseSlots.begin(), UseSlots.end());
// Remove duplicates, keeping the smaller slot for each instruction.
// That is what we want for early clobbers.
UseSlots.erase(std::unique(UseSlots.begin(), UseSlots.end(),
SlotIndex::isSameInstr),
UseSlots.end());
// Compute per-live block info.
if (!calcLiveBlockInfo()) {
// FIXME: calcLiveBlockInfo found inconsistencies in the live range.
// I am looking at you, RegisterCoalescer!
DidRepairRange = true;
++NumRepairs;
DEBUG(dbgs() << "*** Fixing inconsistent live interval! ***\n");
const_cast<LiveIntervals&>(LIS)
.shrinkToUses(const_cast<LiveInterval*>(CurLI));
UseBlocks.clear();
ThroughBlocks.clear();
bool fixed = calcLiveBlockInfo();
(void)fixed;
assert(fixed && "Couldn't fix broken live interval");
}
DEBUG(dbgs() << "Analyze counted "
<< UseSlots.size() << " instrs in "
<< UseBlocks.size() << " blocks, through "
<< NumThroughBlocks << " blocks.\n");
}
/// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks
/// where CurLI is live.
bool SplitAnalysis::calcLiveBlockInfo() {
ThroughBlocks.resize(MF.getNumBlockIDs());
NumThroughBlocks = NumGapBlocks = 0;
if (CurLI->empty())
return true;
LiveInterval::const_iterator LVI = CurLI->begin();
LiveInterval::const_iterator LVE = CurLI->end();
SmallVectorImpl<SlotIndex>::const_iterator UseI, UseE;
UseI = UseSlots.begin();
UseE = UseSlots.end();
// Loop over basic blocks where CurLI is live.
MachineFunction::iterator MFI = LIS.getMBBFromIndex(LVI->start);
for (;;) {
BlockInfo BI;
BI.MBB = MFI;
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
// If the block contains no uses, the range must be live through. At one
// point, RegisterCoalescer could create dangling ranges that ended
// mid-block.
if (UseI == UseE || *UseI >= Stop) {
++NumThroughBlocks;
ThroughBlocks.set(BI.MBB->getNumber());
// The range shouldn't end mid-block if there are no uses. This shouldn't
// happen.
if (LVI->end < Stop)
return false;
} else {
// This block has uses. Find the first and last uses in the block.
BI.FirstInstr = *UseI;
assert(BI.FirstInstr >= Start);
do ++UseI;
while (UseI != UseE && *UseI < Stop);
BI.LastInstr = UseI[-1];
assert(BI.LastInstr < Stop);
// LVI is the first live segment overlapping MBB.
BI.LiveIn = LVI->start <= Start;
// When not live in, the first use should be a def.
if (!BI.LiveIn) {
assert(LVI->start == LVI->valno->def && "Dangling Segment start");
assert(LVI->start == BI.FirstInstr && "First instr should be a def");
BI.FirstDef = BI.FirstInstr;
}
// Look for gaps in the live range.
BI.LiveOut = true;
while (LVI->end < Stop) {
SlotIndex LastStop = LVI->end;
if (++LVI == LVE || LVI->start >= Stop) {
BI.LiveOut = false;
BI.LastInstr = LastStop;
break;
}
if (LastStop < LVI->start) {
// There is a gap in the live range. Create duplicate entries for the
// live-in snippet and the live-out snippet.
++NumGapBlocks;
// Push the Live-in part.
BI.LiveOut = false;
UseBlocks.push_back(BI);
UseBlocks.back().LastInstr = LastStop;
// Set up BI for the live-out part.
BI.LiveIn = false;
BI.LiveOut = true;
BI.FirstInstr = BI.FirstDef = LVI->start;
}
// A Segment that starts in the middle of the block must be a def.
assert(LVI->start == LVI->valno->def && "Dangling Segment start");
if (!BI.FirstDef)
BI.FirstDef = LVI->start;
}
UseBlocks.push_back(BI);
// LVI is now at LVE or LVI->end >= Stop.
if (LVI == LVE)
break;
}
// Live segment ends exactly at Stop. Move to the next segment.
if (LVI->end == Stop && ++LVI == LVE)
break;
// Pick the next basic block.
if (LVI->start < Stop)
++MFI;
else
MFI = LIS.getMBBFromIndex(LVI->start);
}
assert(getNumLiveBlocks() == countLiveBlocks(CurLI) && "Bad block count");
return true;
}
unsigned SplitAnalysis::countLiveBlocks(const LiveInterval *cli) const {
if (cli->empty())
return 0;
LiveInterval *li = const_cast<LiveInterval*>(cli);
LiveInterval::iterator LVI = li->begin();
LiveInterval::iterator LVE = li->end();
unsigned Count = 0;
// Loop over basic blocks where li is live.
MachineFunction::const_iterator MFI = LIS.getMBBFromIndex(LVI->start);
SlotIndex Stop = LIS.getMBBEndIdx(MFI);
for (;;) {
++Count;
LVI = li->advanceTo(LVI, Stop);
if (LVI == LVE)
return Count;
do {
++MFI;
Stop = LIS.getMBBEndIdx(MFI);
} while (Stop <= LVI->start);
}
}
bool SplitAnalysis::isOriginalEndpoint(SlotIndex Idx) const {
unsigned OrigReg = VRM.getOriginal(CurLI->reg);
const LiveInterval &Orig = LIS.getInterval(OrigReg);
assert(!Orig.empty() && "Splitting empty interval?");
LiveInterval::const_iterator I = Orig.find(Idx);
// Range containing Idx should begin at Idx.
if (I != Orig.end() && I->start <= Idx)
return I->start == Idx;
// Range does not contain Idx, previous must end at Idx.
return I != Orig.begin() && (--I)->end == Idx;
}
void SplitAnalysis::analyze(const LiveInterval *li) {
clear();
CurLI = li;
analyzeUses();
}
//===----------------------------------------------------------------------===//
// Split Editor
//===----------------------------------------------------------------------===//
/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
SplitEditor::SplitEditor(SplitAnalysis &sa, LiveIntervals &lis, VirtRegMap &vrm,
MachineDominatorTree &mdt,
MachineBlockFrequencyInfo &mbfi)
: SA(sa), LIS(lis), VRM(vrm), MRI(vrm.getMachineFunction().getRegInfo()),
MDT(mdt), TII(*vrm.getMachineFunction().getSubtarget().getInstrInfo()),
TRI(*vrm.getMachineFunction().getSubtarget().getRegisterInfo()),
MBFI(mbfi), Edit(nullptr), OpenIdx(0), SpillMode(SM_Partition),
RegAssign(Allocator) {}
void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
Edit = &LRE;
SpillMode = SM;
OpenIdx = 0;
RegAssign.clear();
Values.clear();
// Reset the LiveRangeCalc instances needed for this spill mode.
LRCalc[0].reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,
&LIS.getVNInfoAllocator());
if (SpillMode)
LRCalc[1].reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,
&LIS.getVNInfoAllocator());
// We don't need an AliasAnalysis since we will only be performing
// cheap-as-a-copy remats anyway.
Edit->anyRematerializable(nullptr);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SplitEditor::dump() const {
if (RegAssign.empty()) {
dbgs() << " empty\n";
return;
}
for (RegAssignMap::const_iterator I = RegAssign.begin(); I.valid(); ++I)
dbgs() << " [" << I.start() << ';' << I.stop() << "):" << I.value();
dbgs() << '\n';
}
#endif
VNInfo *SplitEditor::defValue(unsigned RegIdx,
const VNInfo *ParentVNI,
SlotIndex Idx) {
assert(ParentVNI && "Mapping NULL value");
assert(Idx.isValid() && "Invalid SlotIndex");
assert(Edit->getParent().getVNInfoAt(Idx) == ParentVNI && "Bad Parent VNI");
LiveInterval *LI = &LIS.getInterval(Edit->get(RegIdx));
// Create a new value.
VNInfo *VNI = LI->getNextValue(Idx, LIS.getVNInfoAllocator());
// Use insert for lookup, so we can add missing values with a second lookup.
std::pair<ValueMap::iterator, bool> InsP =
Values.insert(std::make_pair(std::make_pair(RegIdx, ParentVNI->id),
ValueForcePair(VNI, false)));
// This was the first time (RegIdx, ParentVNI) was mapped.
// Keep it as a simple def without any liveness.
if (InsP.second)
return VNI;
// If the previous value was a simple mapping, add liveness for it now.
if (VNInfo *OldVNI = InsP.first->second.getPointer()) {
SlotIndex Def = OldVNI->def;
LI->addSegment(LiveInterval::Segment(Def, Def.getDeadSlot(), OldVNI));
// No longer a simple mapping. Switch to a complex, non-forced mapping.
InsP.first->second = ValueForcePair();
}
// This is a complex mapping, add liveness for VNI
SlotIndex Def = VNI->def;
LI->addSegment(LiveInterval::Segment(Def, Def.getDeadSlot(), VNI));
return VNI;
}
void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo *ParentVNI) {
assert(ParentVNI && "Mapping NULL value");
ValueForcePair &VFP = Values[std::make_pair(RegIdx, ParentVNI->id)];
VNInfo *VNI = VFP.getPointer();
// ParentVNI was either unmapped or already complex mapped. Either way, just
// set the force bit.
if (!VNI) {
VFP.setInt(true);
return;
}
// This was previously a single mapping. Make sure the old def is represented
// by a trivial live range.
SlotIndex Def = VNI->def;
LiveInterval *LI = &LIS.getInterval(Edit->get(RegIdx));
LI->addSegment(LiveInterval::Segment(Def, Def.getDeadSlot(), VNI));
// Mark as complex mapped, forced.
VFP = ValueForcePair(nullptr, true);
}
VNInfo *SplitEditor::defFromParent(unsigned RegIdx,
VNInfo *ParentVNI,
SlotIndex UseIdx,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
MachineInstr *CopyMI = nullptr;
SlotIndex Def;
LiveInterval *LI = &LIS.getInterval(Edit->get(RegIdx));
// We may be trying to avoid interference that ends at a deleted instruction,
// so always begin RegIdx 0 early and all others late.
bool Late = RegIdx != 0;
// Attempt cheap-as-a-copy rematerialization.
LiveRangeEdit::Remat RM(ParentVNI);
if (Edit->canRematerializeAt(RM, UseIdx, true)) {
Def = Edit->rematerializeAt(MBB, I, LI->reg, RM, TRI, Late);
++NumRemats;
} else {
// Can't remat, just insert a copy from parent.
CopyMI = BuildMI(MBB, I, DebugLoc(), TII.get(TargetOpcode::COPY), LI->reg)
.addReg(Edit->getReg());
Def = LIS.getSlotIndexes()->insertMachineInstrInMaps(CopyMI, Late)
.getRegSlot();
++NumCopies;
}
// Define the value in Reg.
return defValue(RegIdx, ParentVNI, Def);
}
/// Create a new virtual register and live interval.
unsigned SplitEditor::openIntv() {
// Create the complement as index 0.
if (Edit->empty())
Edit->createEmptyInterval();
// Create the open interval.
OpenIdx = Edit->size();
Edit->createEmptyInterval();
return OpenIdx;
}
void SplitEditor::selectIntv(unsigned Idx) {
assert(Idx != 0 && "Cannot select the complement interval");
assert(Idx < Edit->size() && "Can only select previously opened interval");
DEBUG(dbgs() << " selectIntv " << OpenIdx << " -> " << Idx << '\n');
OpenIdx = Idx;
}
SlotIndex SplitEditor::enterIntvBefore(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before enterIntvBefore");
DEBUG(dbgs() << " enterIntvBefore " << Idx);
Idx = Idx.getBaseIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
if (!ParentVNI) {
DEBUG(dbgs() << ": not live\n");
return Idx;
}
DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
assert(MI && "enterIntvBefore called with invalid index");
VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Idx, *MI->getParent(), MI);
return VNI->def;
}
SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before enterIntvAfter");
DEBUG(dbgs() << " enterIntvAfter " << Idx);
Idx = Idx.getBoundaryIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
if (!ParentVNI) {
DEBUG(dbgs() << ": not live\n");
return Idx;
}
DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
assert(MI && "enterIntvAfter called with invalid index");
VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Idx, *MI->getParent(),
std::next(MachineBasicBlock::iterator(MI)));
return VNI->def;
}
SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
assert(OpenIdx && "openIntv not called before enterIntvAtEnd");
SlotIndex End = LIS.getMBBEndIdx(&MBB);
SlotIndex Last = End.getPrevSlot();
DEBUG(dbgs() << " enterIntvAtEnd BB#" << MBB.getNumber() << ", " << Last);
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Last);
if (!ParentVNI) {
DEBUG(dbgs() << ": not live\n");
return End;
}
DEBUG(dbgs() << ": valno " << ParentVNI->id);
VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Last, MBB,
SA.getLastSplitPointIter(&MBB));
RegAssign.insert(VNI->def, End, OpenIdx);
DEBUG(dump());
return VNI->def;
}
/// useIntv - indicate that all instructions in MBB should use OpenLI.
void SplitEditor::useIntv(const MachineBasicBlock &MBB) {
useIntv(LIS.getMBBStartIdx(&MBB), LIS.getMBBEndIdx(&MBB));
}
void SplitEditor::useIntv(SlotIndex Start, SlotIndex End) {
assert(OpenIdx && "openIntv not called before useIntv");
DEBUG(dbgs() << " useIntv [" << Start << ';' << End << "):");
RegAssign.insert(Start, End, OpenIdx);
DEBUG(dump());
}
SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before leaveIntvAfter");
DEBUG(dbgs() << " leaveIntvAfter " << Idx);
// The interval must be live beyond the instruction at Idx.
SlotIndex Boundary = Idx.getBoundaryIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Boundary);
if (!ParentVNI) {
DEBUG(dbgs() << ": not live\n");
return Boundary.getNextSlot();
}
DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Boundary);
assert(MI && "No instruction at index");
// In spill mode, make live ranges as short as possible by inserting the copy
// before MI. This is only possible if that instruction doesn't redefine the
// value. The inserted COPY is not a kill, and we don't need to recompute
// the source live range. The spiller also won't try to hoist this copy.
if (SpillMode && !SlotIndex::isSameInstr(ParentVNI->def, Idx) &&
MI->readsVirtualRegister(Edit->getReg())) {
forceRecompute(0, ParentVNI);
defFromParent(0, ParentVNI, Idx, *MI->getParent(), MI);
return Idx;
}
VNInfo *VNI = defFromParent(0, ParentVNI, Boundary, *MI->getParent(),
std::next(MachineBasicBlock::iterator(MI)));
return VNI->def;
}
SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before leaveIntvBefore");
DEBUG(dbgs() << " leaveIntvBefore " << Idx);
// The interval must be live into the instruction at Idx.
Idx = Idx.getBaseIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
if (!ParentVNI) {
DEBUG(dbgs() << ": not live\n");
return Idx.getNextSlot();
}
DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
assert(MI && "No instruction at index");
VNInfo *VNI = defFromParent(0, ParentVNI, Idx, *MI->getParent(), MI);
return VNI->def;
}
SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
assert(OpenIdx && "openIntv not called before leaveIntvAtTop");
SlotIndex Start = LIS.getMBBStartIdx(&MBB);
DEBUG(dbgs() << " leaveIntvAtTop BB#" << MBB.getNumber() << ", " << Start);
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Start);
if (!ParentVNI) {
DEBUG(dbgs() << ": not live\n");
return Start;
}
VNInfo *VNI = defFromParent(0, ParentVNI, Start, MBB,
MBB.SkipPHIsAndLabels(MBB.begin()));
RegAssign.insert(Start, VNI->def, OpenIdx);
DEBUG(dump());
return VNI->def;
}
void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
assert(OpenIdx && "openIntv not called before overlapIntv");
const VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Start);
assert(ParentVNI == Edit->getParent().getVNInfoBefore(End) &&
"Parent changes value in extended range");
assert(LIS.getMBBFromIndex(Start) == LIS.getMBBFromIndex(End) &&
"Range cannot span basic blocks");
// The complement interval will be extended as needed by LRCalc.extend().
if (ParentVNI)
forceRecompute(0, ParentVNI);
DEBUG(dbgs() << " overlapIntv [" << Start << ';' << End << "):");
RegAssign.insert(Start, End, OpenIdx);
DEBUG(dump());
}
//===----------------------------------------------------------------------===//
// Spill modes
//===----------------------------------------------------------------------===//
void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
LiveInterval *LI = &LIS.getInterval(Edit->get(0));
DEBUG(dbgs() << "Removing " << Copies.size() << " back-copies.\n");
RegAssignMap::iterator AssignI;
AssignI.setMap(RegAssign);
for (unsigned i = 0, e = Copies.size(); i != e; ++i) {
SlotIndex Def = Copies[i]->def;
MachineInstr *MI = LIS.getInstructionFromIndex(Def);
assert(MI && "No instruction for back-copy");
MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::iterator MBBI(MI);
bool AtBegin;
do AtBegin = MBBI == MBB->begin();
while (!AtBegin && (--MBBI)->isDebugValue());
DEBUG(dbgs() << "Removing " << Def << '\t' << *MI);
LIS.removeVRegDefAt(*LI, Def);
LIS.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
// Adjust RegAssign if a register assignment is killed at Def. We want to
// avoid calculating the live range of the source register if possible.
AssignI.find(Def.getPrevSlot());
if (!AssignI.valid() || AssignI.start() >= Def)
continue;
// If MI doesn't kill the assigned register, just leave it.
if (AssignI.stop() != Def)
continue;
unsigned RegIdx = AssignI.value();
if (AtBegin || !MBBI->readsVirtualRegister(Edit->getReg())) {
DEBUG(dbgs() << " cannot find simple kill of RegIdx " << RegIdx << '\n');
forceRecompute(RegIdx, Edit->getParent().getVNInfoAt(Def));
} else {
SlotIndex Kill = LIS.getInstructionIndex(MBBI).getRegSlot();
DEBUG(dbgs() << " move kill to " << Kill << '\t' << *MBBI);
AssignI.setStop(Kill);
}
}
}
MachineBasicBlock*
SplitEditor::findShallowDominator(MachineBasicBlock *MBB,
MachineBasicBlock *DefMBB) {
if (MBB == DefMBB)
return MBB;
assert(MDT.dominates(DefMBB, MBB) && "MBB must be dominated by the def.");
const MachineLoopInfo &Loops = SA.Loops;
const MachineLoop *DefLoop = Loops.getLoopFor(DefMBB);
MachineDomTreeNode *DefDomNode = MDT[DefMBB];
// Best candidate so far.
MachineBasicBlock *BestMBB = MBB;
unsigned BestDepth = UINT_MAX;
for (;;) {
const MachineLoop *Loop = Loops.getLoopFor(MBB);
// MBB isn't in a loop, it doesn't get any better. All dominators have a
// higher frequency by definition.
if (!Loop) {
DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#"
<< MBB->getNumber() << " at depth 0\n");
return MBB;
}
// We'll never be able to exit the DefLoop.
if (Loop == DefLoop) {
DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#"
<< MBB->getNumber() << " in the same loop\n");
return MBB;
}
// Least busy dominator seen so far.
unsigned Depth = Loop->getLoopDepth();
if (Depth < BestDepth) {
BestMBB = MBB;
BestDepth = Depth;
DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#"
<< MBB->getNumber() << " at depth " << Depth << '\n');
}
// Leave loop by going to the immediate dominator of the loop header.
// This is a bigger stride than simply walking up the dominator tree.
MachineDomTreeNode *IDom = MDT[Loop->getHeader()]->getIDom();
// Too far up the dominator tree?
if (!IDom || !MDT.dominates(DefDomNode, IDom))
return BestMBB;
MBB = IDom->getBlock();
}
}
void SplitEditor::hoistCopiesForSize() {
// Get the complement interval, always RegIdx 0.
LiveInterval *LI = &LIS.getInterval(Edit->get(0));
LiveInterval *Parent = &Edit->getParent();
// Track the nearest common dominator for all back-copies for each ParentVNI,
// indexed by ParentVNI->id.
typedef std::pair<MachineBasicBlock*, SlotIndex> DomPair;
SmallVector<DomPair, 8> NearestDom(Parent->getNumValNums());
// Find the nearest common dominator for parent values with multiple
// back-copies. If a single back-copy dominates, put it in DomPair.second.
for (VNInfo *VNI : LI->valnos) {
if (VNI->isUnused())
continue;
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(VNI->def);
assert(ParentVNI && "Parent not live at complement def");
// Don't hoist remats. The complement is probably going to disappear
// completely anyway.
if (Edit->didRematerialize(ParentVNI))
continue;
MachineBasicBlock *ValMBB = LIS.getMBBFromIndex(VNI->def);
DomPair &Dom = NearestDom[ParentVNI->id];
// Keep directly defined parent values. This is either a PHI or an
// instruction in the complement range. All other copies of ParentVNI
// should be eliminated.
if (VNI->def == ParentVNI->def) {
DEBUG(dbgs() << "Direct complement def at " << VNI->def << '\n');
Dom = DomPair(ValMBB, VNI->def);
continue;
}
// Skip the singly mapped values. There is nothing to gain from hoisting a
// single back-copy.
if (Values.lookup(std::make_pair(0, ParentVNI->id)).getPointer()) {
DEBUG(dbgs() << "Single complement def at " << VNI->def << '\n');
continue;
}
if (!Dom.first) {
// First time we see ParentVNI. VNI dominates itself.
Dom = DomPair(ValMBB, VNI->def);
} else if (Dom.first == ValMBB) {
// Two defs in the same block. Pick the earlier def.
if (!Dom.second.isValid() || VNI->def < Dom.second)
Dom.second = VNI->def;
} else {
// Different basic blocks. Check if one dominates.
MachineBasicBlock *Near =
MDT.findNearestCommonDominator(Dom.first, ValMBB);
if (Near == ValMBB)
// Def ValMBB dominates.
Dom = DomPair(ValMBB, VNI->def);
else if (Near != Dom.first)
// None dominate. Hoist to common dominator, need new def.
Dom = DomPair(Near, SlotIndex());
}
DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@' << VNI->def
<< " for parent " << ParentVNI->id << '@' << ParentVNI->def
<< " hoist to BB#" << Dom.first->getNumber() << ' '
<< Dom.second << '\n');
}
// Insert the hoisted copies.
for (unsigned i = 0, e = Parent->getNumValNums(); i != e; ++i) {
DomPair &Dom = NearestDom[i];
if (!Dom.first || Dom.second.isValid())
continue;
// This value needs a hoisted copy inserted at the end of Dom.first.
VNInfo *ParentVNI = Parent->getValNumInfo(i);
MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(ParentVNI->def);
// Get a less loopy dominator than Dom.first.
Dom.first = findShallowDominator(Dom.first, DefMBB);
SlotIndex Last = LIS.getMBBEndIdx(Dom.first).getPrevSlot();
Dom.second =
defFromParent(0, ParentVNI, Last, *Dom.first,
SA.getLastSplitPointIter(Dom.first))->def;
}
// Remove redundant back-copies that are now known to be dominated by another
// def with the same value.
SmallVector<VNInfo*, 8> BackCopies;
for (VNInfo *VNI : LI->valnos) {
if (VNI->isUnused())
continue;
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(VNI->def);
const DomPair &Dom = NearestDom[ParentVNI->id];
if (!Dom.first || Dom.second == VNI->def)
continue;
BackCopies.push_back(VNI);
forceRecompute(0, ParentVNI);
}
removeBackCopies(BackCopies);
}
/// transferValues - Transfer all possible values to the new live ranges.
/// Values that were rematerialized are left alone, they need LRCalc.extend().
bool SplitEditor::transferValues() {
bool Skipped = false;
RegAssignMap::const_iterator AssignI = RegAssign.begin();
for (const LiveRange::Segment &S : Edit->getParent()) {
DEBUG(dbgs() << " blit " << S << ':');
VNInfo *ParentVNI = S.valno;
// RegAssign has holes where RegIdx 0 should be used.
SlotIndex Start = S.start;
AssignI.advanceTo(Start);
do {
unsigned RegIdx;
SlotIndex End = S.end;
if (!AssignI.valid()) {
RegIdx = 0;
} else if (AssignI.start() <= Start) {
RegIdx = AssignI.value();
if (AssignI.stop() < End) {
End = AssignI.stop();
++AssignI;
}
} else {
RegIdx = 0;
End = std::min(End, AssignI.start());
}
// The interval [Start;End) is continuously mapped to RegIdx, ParentVNI.
DEBUG(dbgs() << " [" << Start << ';' << End << ")=" << RegIdx);
LiveRange &LR = LIS.getInterval(Edit->get(RegIdx));
// Check for a simply defined value that can be blitted directly.
ValueForcePair VFP = Values.lookup(std::make_pair(RegIdx, ParentVNI->id));
if (VNInfo *VNI = VFP.getPointer()) {
DEBUG(dbgs() << ':' << VNI->id);
LR.addSegment(LiveInterval::Segment(Start, End, VNI));
Start = End;
continue;
}
// Skip values with forced recomputation.
if (VFP.getInt()) {
DEBUG(dbgs() << "(recalc)");
Skipped = true;
Start = End;
continue;
}
LiveRangeCalc &LRC = getLRCalc(RegIdx);
// This value has multiple defs in RegIdx, but it wasn't rematerialized,
// so the live range is accurate. Add live-in blocks in [Start;End) to the
// LiveInBlocks.
MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start);
SlotIndex BlockStart, BlockEnd;
std::tie(BlockStart, BlockEnd) = LIS.getSlotIndexes()->getMBBRange(MBB);
// The first block may be live-in, or it may have its own def.
if (Start != BlockStart) {
VNInfo *VNI = LR.extendInBlock(BlockStart, std::min(BlockEnd, End));
assert(VNI && "Missing def for complex mapped value");
DEBUG(dbgs() << ':' << VNI->id << "*BB#" << MBB->getNumber());
// MBB has its own def. Is it also live-out?
if (BlockEnd <= End)
LRC.setLiveOutValue(MBB, VNI);
// Skip to the next block for live-in.
++MBB;
BlockStart = BlockEnd;
}
// Handle the live-in blocks covered by [Start;End).
assert(Start <= BlockStart && "Expected live-in block");
while (BlockStart < End) {
DEBUG(dbgs() << ">BB#" << MBB->getNumber());
BlockEnd = LIS.getMBBEndIdx(MBB);
if (BlockStart == ParentVNI->def) {
// This block has the def of a parent PHI, so it isn't live-in.
assert(ParentVNI->isPHIDef() && "Non-phi defined at block start?");
VNInfo *VNI = LR.extendInBlock(BlockStart, std::min(BlockEnd, End));
assert(VNI && "Missing def for complex mapped parent PHI");
if (End >= BlockEnd)
LRC.setLiveOutValue(MBB, VNI); // Live-out as well.
} else {
// This block needs a live-in value. The last block covered may not
// be live-out.
if (End < BlockEnd)
LRC.addLiveInBlock(LR, MDT[MBB], End);
else {
// Live-through, and we don't know the value.
LRC.addLiveInBlock(LR, MDT[MBB]);
LRC.setLiveOutValue(MBB, nullptr);
}
}
BlockStart = BlockEnd;
++MBB;
}
Start = End;
} while (Start != S.end);
DEBUG(dbgs() << '\n');
}
LRCalc[0].calculateValues();
if (SpillMode)
LRCalc[1].calculateValues();
return Skipped;
}
void SplitEditor::extendPHIKillRanges() {
// Extend live ranges to be live-out for successor PHI values.
for (const VNInfo *PHIVNI : Edit->getParent().valnos) {
if (PHIVNI->isUnused() || !PHIVNI->isPHIDef())
continue;
unsigned RegIdx = RegAssign.lookup(PHIVNI->def);
LiveRange &LR = LIS.getInterval(Edit->get(RegIdx));
LiveRangeCalc &LRC = getLRCalc(RegIdx);
MachineBasicBlock *MBB = LIS.getMBBFromIndex(PHIVNI->def);
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
SlotIndex End = LIS.getMBBEndIdx(*PI);
SlotIndex LastUse = End.getPrevSlot();
// The predecessor may not have a live-out value. That is OK, like an
// undef PHI operand.
if (Edit->getParent().liveAt(LastUse)) {
assert(RegAssign.lookup(LastUse) == RegIdx &&
"Different register assignment in phi predecessor");
LRC.extend(LR, End);
}
}
}
}
/// rewriteAssigned - Rewrite all uses of Edit->getReg().
void SplitEditor::rewriteAssigned(bool ExtendRanges) {
for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Edit->getReg()),
RE = MRI.reg_end(); RI != RE;) {
MachineOperand &MO = *RI;
MachineInstr *MI = MO.getParent();
++RI;
// LiveDebugVariables should have handled all DBG_VALUE instructions.
if (MI->isDebugValue()) {
DEBUG(dbgs() << "Zapping " << *MI);
MO.setReg(0);
continue;
}
// <undef> operands don't really read the register, so it doesn't matter
// which register we choose. When the use operand is tied to a def, we must
// use the same register as the def, so just do that always.
SlotIndex Idx = LIS.getInstructionIndex(MI);
if (MO.isDef() || MO.isUndef())
Idx = Idx.getRegSlot(MO.isEarlyClobber());
// Rewrite to the mapped register at Idx.
unsigned RegIdx = RegAssign.lookup(Idx);
LiveInterval *LI = &LIS.getInterval(Edit->get(RegIdx));
MO.setReg(LI->reg);
DEBUG(dbgs() << " rewr BB#" << MI->getParent()->getNumber() << '\t'
<< Idx << ':' << RegIdx << '\t' << *MI);
// Extend liveness to Idx if the instruction reads reg.
if (!ExtendRanges || MO.isUndef())
continue;
// Skip instructions that don't read Reg.
if (MO.isDef()) {
if (!MO.getSubReg() && !MO.isEarlyClobber())
continue;
// We may wan't to extend a live range for a partial redef, or for a use
// tied to an early clobber.
Idx = Idx.getPrevSlot();
if (!Edit->getParent().liveAt(Idx))
continue;
} else
Idx = Idx.getRegSlot(true);
getLRCalc(RegIdx).extend(*LI, Idx.getNextSlot());
}
}
void SplitEditor::deleteRematVictims() {
SmallVector<MachineInstr*, 8> Dead;
for (LiveRangeEdit::iterator I = Edit->begin(), E = Edit->end(); I != E; ++I){
LiveInterval *LI = &LIS.getInterval(*I);
for (const LiveRange::Segment &S : LI->segments) {
// Dead defs end at the dead slot.
if (S.end != S.valno->def.getDeadSlot())
continue;
MachineInstr *MI = LIS.getInstructionFromIndex(S.valno->def);
assert(MI && "Missing instruction for dead def");
MI->addRegisterDead(LI->reg, &TRI);
if (!MI->allDefsAreDead())
continue;
DEBUG(dbgs() << "All defs dead: " << *MI);
Dead.push_back(MI);
}
}
if (Dead.empty())
return;
Edit->eliminateDeadDefs(Dead);
}
void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
++NumFinished;
// At this point, the live intervals in Edit contain VNInfos corresponding to
// the inserted copies.
// Add the original defs from the parent interval.
for (const VNInfo *ParentVNI : Edit->getParent().valnos) {
if (ParentVNI->isUnused())
continue;
unsigned RegIdx = RegAssign.lookup(ParentVNI->def);
defValue(RegIdx, ParentVNI, ParentVNI->def);
// Force rematted values to be recomputed everywhere.
// The new live ranges may be truncated.
if (Edit->didRematerialize(ParentVNI))
for (unsigned i = 0, e = Edit->size(); i != e; ++i)
forceRecompute(i, ParentVNI);
}
// Hoist back-copies to the complement interval when in spill mode.
switch (SpillMode) {
case SM_Partition:
// Leave all back-copies as is.
break;
case SM_Size:
hoistCopiesForSize();
break;
case SM_Speed:
llvm_unreachable("Spill mode 'speed' not implemented yet");
}
// Transfer the simply mapped values, check if any are skipped.
bool Skipped = transferValues();
if (Skipped)
extendPHIKillRanges();
else
++NumSimple;
// Rewrite virtual registers, possibly extending ranges.
rewriteAssigned(Skipped);
// Delete defs that were rematted everywhere.
if (Skipped)
deleteRematVictims();
// Get rid of unused values and set phi-kill flags.
for (LiveRangeEdit::iterator I = Edit->begin(), E = Edit->end(); I != E; ++I) {
LiveInterval &LI = LIS.getInterval(*I);
LI.RenumberValues();
}
// Provide a reverse mapping from original indices to Edit ranges.
if (LRMap) {
LRMap->clear();
for (unsigned i = 0, e = Edit->size(); i != e; ++i)
LRMap->push_back(i);
}
// Now check if any registers were separated into multiple components.
ConnectedVNInfoEqClasses ConEQ(LIS);
for (unsigned i = 0, e = Edit->size(); i != e; ++i) {
// Don't use iterators, they are invalidated by create() below.
LiveInterval *li = &LIS.getInterval(Edit->get(i));
unsigned NumComp = ConEQ.Classify(li);
if (NumComp <= 1)
continue;
DEBUG(dbgs() << " " << NumComp << " components: " << *li << '\n');
SmallVector<LiveInterval*, 8> dups;
dups.push_back(li);
for (unsigned j = 1; j != NumComp; ++j)
dups.push_back(&Edit->createEmptyInterval());
ConEQ.Distribute(&dups[0], MRI);
// The new intervals all map back to i.
if (LRMap)
LRMap->resize(Edit->size(), i);
}
// Calculate spill weight and allocation hints for new intervals.
Edit->calculateRegClassAndHint(VRM.getMachineFunction(), SA.Loops, MBFI);
assert(!LRMap || LRMap->size() == Edit->size());
}
//===----------------------------------------------------------------------===//
// Single Block Splitting
//===----------------------------------------------------------------------===//
bool SplitAnalysis::shouldSplitSingleBlock(const BlockInfo &BI,
bool SingleInstrs) const {
// Always split for multiple instructions.
if (!BI.isOneInstr())
return true;
// Don't split for single instructions unless explicitly requested.
if (!SingleInstrs)
return false;
// Splitting a live-through range always makes progress.
if (BI.LiveIn && BI.LiveOut)
return true;
// No point in isolating a copy. It has no register class constraints.
if (LIS.getInstructionFromIndex(BI.FirstInstr)->isCopyLike())
return false;
// Finally, don't isolate an end point that was created by earlier splits.
return isOriginalEndpoint(BI.FirstInstr);
}
void SplitEditor::splitSingleBlock(const SplitAnalysis::BlockInfo &BI) {
openIntv();
SlotIndex LastSplitPoint = SA.getLastSplitPoint(BI.MBB->getNumber());
SlotIndex SegStart = enterIntvBefore(std::min(BI.FirstInstr,
LastSplitPoint));
if (!BI.LiveOut || BI.LastInstr < LastSplitPoint) {
useIntv(SegStart, leaveIntvAfter(BI.LastInstr));
} else {
// The last use is after the last valid split point.
SlotIndex SegStop = leaveIntvBefore(LastSplitPoint);
useIntv(SegStart, SegStop);
overlapIntv(SegStop, BI.LastInstr);
}
}
//===----------------------------------------------------------------------===//
// Global Live Range Splitting Support
//===----------------------------------------------------------------------===//
// These methods support a method of global live range splitting that uses a
// global algorithm to decide intervals for CFG edges. They will insert split
// points and color intervals in basic blocks while avoiding interference.
//
// Note that splitSingleBlock is also useful for blocks where both CFG edges
// are on the stack.
void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
unsigned IntvIn, SlotIndex LeaveBefore,
unsigned IntvOut, SlotIndex EnterAfter){
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(MBBNum);
DEBUG(dbgs() << "BB#" << MBBNum << " [" << Start << ';' << Stop
<< ") intf " << LeaveBefore << '-' << EnterAfter
<< ", live-through " << IntvIn << " -> " << IntvOut);
assert((IntvIn || IntvOut) && "Use splitSingleBlock for isolated blocks");
assert((!LeaveBefore || LeaveBefore < Stop) && "Interference after block");
assert((!IntvIn || !LeaveBefore || LeaveBefore > Start) && "Impossible intf");
assert((!EnterAfter || EnterAfter >= Start) && "Interference before block");
MachineBasicBlock *MBB = VRM.getMachineFunction().getBlockNumbered(MBBNum);
if (!IntvOut) {
DEBUG(dbgs() << ", spill on entry.\n");
//
// <<<<<<<<< Possible LeaveBefore interference.
// |-----------| Live through.
// -____________ Spill on entry.
//
selectIntv(IntvIn);
SlotIndex Idx = leaveIntvAtTop(*MBB);
assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
(void)Idx;
return;
}
if (!IntvIn) {
DEBUG(dbgs() << ", reload on exit.\n");
//
// >>>>>>> Possible EnterAfter interference.
// |-----------| Live through.
// ___________-- Reload on exit.
//
selectIntv(IntvOut);
SlotIndex Idx = enterIntvAtEnd(*MBB);
assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
(void)Idx;
return;
}
if (IntvIn == IntvOut && !LeaveBefore && !EnterAfter) {
DEBUG(dbgs() << ", straight through.\n");
//
// |-----------| Live through.
// ------------- Straight through, same intv, no interference.
//
selectIntv(IntvOut);
useIntv(Start, Stop);
return;
}
// We cannot legally insert splits after LSP.
SlotIndex LSP = SA.getLastSplitPoint(MBBNum);
assert((!IntvOut || !EnterAfter || EnterAfter < LSP) && "Impossible intf");
if (IntvIn != IntvOut && (!LeaveBefore || !EnterAfter ||
LeaveBefore.getBaseIndex() > EnterAfter.getBoundaryIndex())) {
DEBUG(dbgs() << ", switch avoiding interference.\n");
//
// >>>> <<<< Non-overlapping EnterAfter/LeaveBefore interference.
// |-----------| Live through.
// ------======= Switch intervals between interference.
//
selectIntv(IntvOut);
SlotIndex Idx;
if (LeaveBefore && LeaveBefore < LSP) {
Idx = enterIntvBefore(LeaveBefore);
useIntv(Idx, Stop);
} else {
Idx = enterIntvAtEnd(*MBB);
}
selectIntv(IntvIn);
useIntv(Start, Idx);
assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
return;
}
DEBUG(dbgs() << ", create local intv for interference.\n");
//
// >>><><><><<<< Overlapping EnterAfter/LeaveBefore interference.
// |-----------| Live through.
// ==---------== Switch intervals before/after interference.
//
assert(LeaveBefore <= EnterAfter && "Missed case");
selectIntv(IntvOut);
SlotIndex Idx = enterIntvAfter(EnterAfter);
useIntv(Idx, Stop);
assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
selectIntv(IntvIn);
Idx = leaveIntvBefore(LeaveBefore);
useIntv(Start, Idx);
assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
}
void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
unsigned IntvIn, SlotIndex LeaveBefore) {
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop
<< "), uses " << BI.FirstInstr << '-' << BI.LastInstr
<< ", reg-in " << IntvIn << ", leave before " << LeaveBefore
<< (BI.LiveOut ? ", stack-out" : ", killed in block"));
assert(IntvIn && "Must have register in");
assert(BI.LiveIn && "Must be live-in");
assert((!LeaveBefore || LeaveBefore > Start) && "Bad interference");
if (!BI.LiveOut && (!LeaveBefore || LeaveBefore >= BI.LastInstr)) {
DEBUG(dbgs() << " before interference.\n");
//
// <<< Interference after kill.
// |---o---x | Killed in block.
// ========= Use IntvIn everywhere.
//
selectIntv(IntvIn);
useIntv(Start, BI.LastInstr);
return;
}
SlotIndex LSP = SA.getLastSplitPoint(BI.MBB->getNumber());
if (!LeaveBefore || LeaveBefore > BI.LastInstr.getBoundaryIndex()) {
//
// <<< Possible interference after last use.
// |---o---o---| Live-out on stack.
// =========____ Leave IntvIn after last use.
//
// < Interference after last use.
// |---o---o--o| Live-out on stack, late last use.
// ============ Copy to stack after LSP, overlap IntvIn.
// \_____ Stack interval is live-out.
//
if (BI.LastInstr < LSP) {
DEBUG(dbgs() << ", spill after last use before interference.\n");
selectIntv(IntvIn);
SlotIndex Idx = leaveIntvAfter(BI.LastInstr);
useIntv(Start, Idx);
assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
} else {
DEBUG(dbgs() << ", spill before last split point.\n");
selectIntv(IntvIn);
SlotIndex Idx = leaveIntvBefore(LSP);
overlapIntv(Idx, BI.LastInstr);
useIntv(Start, Idx);
assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
}
return;
}
// The interference is overlapping somewhere we wanted to use IntvIn. That
// means we need to create a local interval that can be allocated a
// different register.
unsigned LocalIntv = openIntv();
(void)LocalIntv;
DEBUG(dbgs() << ", creating local interval " << LocalIntv << ".\n");
if (!BI.LiveOut || BI.LastInstr < LSP) {
//
// <<<<<<< Interference overlapping uses.
// |---o---o---| Live-out on stack.
// =====----____ Leave IntvIn before interference, then spill.
//
SlotIndex To = leaveIntvAfter(BI.LastInstr);
SlotIndex From = enterIntvBefore(LeaveBefore);
useIntv(From, To);
selectIntv(IntvIn);
useIntv(Start, From);
assert((!LeaveBefore || From <= LeaveBefore) && "Interference");
return;
}
// <<<<<<< Interference overlapping uses.
// |---o---o--o| Live-out on stack, late last use.
// =====------- Copy to stack before LSP, overlap LocalIntv.
// \_____ Stack interval is live-out.
//
SlotIndex To = leaveIntvBefore(LSP);
overlapIntv(To, BI.LastInstr);
SlotIndex From = enterIntvBefore(std::min(To, LeaveBefore));
useIntv(From, To);
selectIntv(IntvIn);
useIntv(Start, From);
assert((!LeaveBefore || From <= LeaveBefore) && "Interference");
}
void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
unsigned IntvOut, SlotIndex EnterAfter) {
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop
<< "), uses " << BI.FirstInstr << '-' << BI.LastInstr
<< ", reg-out " << IntvOut << ", enter after " << EnterAfter
<< (BI.LiveIn ? ", stack-in" : ", defined in block"));
SlotIndex LSP = SA.getLastSplitPoint(BI.MBB->getNumber());
assert(IntvOut && "Must have register out");
assert(BI.LiveOut && "Must be live-out");
assert((!EnterAfter || EnterAfter < LSP) && "Bad interference");
if (!BI.LiveIn && (!EnterAfter || EnterAfter <= BI.FirstInstr)) {
DEBUG(dbgs() << " after interference.\n");
//
// >>>> Interference before def.
// | o---o---| Defined in block.
// ========= Use IntvOut everywhere.
//
selectIntv(IntvOut);
useIntv(BI.FirstInstr, Stop);
return;
}
if (!EnterAfter || EnterAfter < BI.FirstInstr.getBaseIndex()) {
DEBUG(dbgs() << ", reload after interference.\n");
//
// >>>> Interference before def.
// |---o---o---| Live-through, stack-in.
// ____========= Enter IntvOut before first use.
//
selectIntv(IntvOut);
SlotIndex Idx = enterIntvBefore(std::min(LSP, BI.FirstInstr));
useIntv(Idx, Stop);
assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
return;
}
// The interference is overlapping somewhere we wanted to use IntvOut. That
// means we need to create a local interval that can be allocated a
// different register.
DEBUG(dbgs() << ", interference overlaps uses.\n");
//
// >>>>>>> Interference overlapping uses.
// |---o---o---| Live-through, stack-in.
// ____---====== Create local interval for interference range.
//
selectIntv(IntvOut);
SlotIndex Idx = enterIntvAfter(EnterAfter);
useIntv(Idx, Stop);
assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
openIntv();
SlotIndex From = enterIntvBefore(std::min(Idx, BI.FirstInstr));
useIntv(From, Idx);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/CallingConvLower.cpp | //===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the CCState class, used for lowering and implementing
// calling conventions.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
: CallingConv(CC), IsVarArg(isVarArg), MF(mf),
TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C),
CallOrPrologue(Unknown) {
// No stack is used.
StackOffset = 0;
clearByValRegsInfo();
UsedRegs.resize((TRI.getNumRegs()+31)/32);
}
/// Allocate space on the stack large enough to pass an argument by value.
/// The size and alignment information of the argument is encoded in
/// its parameter attribute.
void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
int MinSize, int MinAlign,
ISD::ArgFlagsTy ArgFlags) {
unsigned Align = ArgFlags.getByValAlign();
unsigned Size = ArgFlags.getByValSize();
if (MinSize > (int)Size)
Size = MinSize;
if (MinAlign > (int)Align)
Align = MinAlign;
MF.getFrameInfo()->ensureMaxAlignment(Align);
MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
Size = unsigned(RoundUpToAlignment(Size, MinAlign));
unsigned Offset = AllocateStack(Size, Align);
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
/// Mark a register and all of its aliases as allocated.
void CCState::MarkAllocated(unsigned Reg) {
for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
UsedRegs[*AI/32] |= 1 << (*AI&31);
}
/// Analyze an array of argument values,
/// incorporating info about the formals into this state.
void
CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
unsigned NumArgs = Ins.size();
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Ins[i].VT;
ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Formal argument #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
llvm_unreachable(nullptr);
}
}
}
/// Analyze the return values of a function, returning true if the return can
/// be performed without sret-demotion and false otherwise.
bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
return false;
}
return true;
}
/// Analyze the returned values of a return,
/// incorporating info about the result values into this state.
void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Return operand #" << i << " has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
llvm_unreachable(nullptr);
}
}
}
/// Analyze the outgoing arguments to a call,
/// incorporating info about the passed values into this state.
void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
unsigned NumOps = Outs.size();
for (unsigned i = 0; i != NumOps; ++i) {
MVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
llvm_unreachable(nullptr);
}
}
}
/// Same as above except it takes vectors of types and argument flags.
void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn) {
unsigned NumOps = ArgVTs.size();
for (unsigned i = 0; i != NumOps; ++i) {
MVT ArgVT = ArgVTs[i];
ISD::ArgFlagsTy ArgFlags = Flags[i];
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
llvm_unreachable(nullptr);
}
}
}
/// Analyze the return values of a call, incorporating info about the passed
/// values into this state.
void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
MVT VT = Ins[i].VT;
ISD::ArgFlagsTy Flags = Ins[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
#ifndef NDEBUG
dbgs() << "Call result #" << i << " has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
llvm_unreachable(nullptr);
}
}
}
/// Same as above except it's specialized for calls that produce a single value.
void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
#ifndef NDEBUG
dbgs() << "Call result has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
llvm_unreachable(nullptr);
}
}
static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
if (VT.isVector())
return true; // Assume -msse-regparm might be in effect.
if (!VT.isInteger())
return false;
if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
return true;
return false;
}
void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
MVT VT, CCAssignFn Fn) {
unsigned SavedStackOffset = StackOffset;
unsigned NumLocs = Locs.size();
// Set the 'inreg' flag if it is used for this calling convention.
ISD::ArgFlagsTy Flags;
if (isValueTypeInRegForCC(CallingConv, VT))
Flags.setInReg();
// Allocate something of this value type repeatedly until we get assigned a
// location in memory.
bool HaveRegParm = true;
while (HaveRegParm) {
if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
#ifndef NDEBUG
dbgs() << "Call has unhandled type " << EVT(VT).getEVTString()
<< " while computing remaining regparms\n";
#endif
llvm_unreachable(nullptr);
}
HaveRegParm = Locs.back().isRegLoc();
}
// Copy all the registers from the value locations we added.
assert(NumLocs < Locs.size() && "CC assignment failed to add location");
for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
if (Locs[I].isRegLoc())
Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
// Clear the assigned values and stack memory. We leave the registers marked
// as allocated so that future queries don't return the same registers, i.e.
// when i64 and f64 are both passed in GPRs.
StackOffset = SavedStackOffset;
Locs.resize(NumLocs);
}
void CCState::analyzeMustTailForwardedRegisters(
SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
CCAssignFn Fn) {
// Oftentimes calling conventions will not user register parameters for
// variadic functions, so we need to assume we're not variadic so that we get
// all the registers that might be used in a non-variadic call.
SaveAndRestore<bool> SavedVarArg(IsVarArg, false);
for (MVT RegVT : RegParmTypes) {
SmallVector<MCPhysReg, 8> RemainingRegs;
getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
for (MCPhysReg PReg : RemainingRegs) {
unsigned VReg = MF.addLiveIn(PReg, RC);
Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
}
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/InlineSpiller.cpp | //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The inline spiller modifies the machine function directly instead of
// inserting spills and restores in VirtRegMap.
//
//===----------------------------------------------------------------------===//
#include "Spiller.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
STATISTIC(NumSnippets, "Number of spilled snippets");
STATISTIC(NumSpills, "Number of spills inserted");
STATISTIC(NumSpillsRemoved, "Number of spills removed");
STATISTIC(NumReloads, "Number of reloads inserted");
STATISTIC(NumReloadsRemoved, "Number of reloads removed");
STATISTIC(NumFolded, "Number of folded stack accesses");
STATISTIC(NumFoldedLoads, "Number of folded loads");
STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
STATISTIC(NumOmitReloadSpill, "Number of omitted spills of reloads");
STATISTIC(NumHoists, "Number of hoisted spills");
static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
cl::desc("Disable inline spill hoisting"));
namespace {
class InlineSpiller : public Spiller {
MachineFunction &MF;
LiveIntervals &LIS;
LiveStacks &LSS;
AliasAnalysis *AA;
MachineDominatorTree &MDT;
MachineLoopInfo &Loops;
VirtRegMap &VRM;
MachineFrameInfo &MFI;
MachineRegisterInfo &MRI;
const TargetInstrInfo &TII;
const TargetRegisterInfo &TRI;
const MachineBlockFrequencyInfo &MBFI;
// Variables that are valid during spill(), but used by multiple methods.
LiveRangeEdit *Edit;
LiveInterval *StackInt;
int StackSlot;
unsigned Original;
// All registers to spill to StackSlot, including the main register.
SmallVector<unsigned, 8> RegsToSpill;
// All COPY instructions to/from snippets.
// They are ignored since both operands refer to the same stack slot.
SmallPtrSet<MachineInstr*, 8> SnippetCopies;
// Values that failed to remat at some point.
SmallPtrSet<VNInfo*, 8> UsedValues;
public:
// Information about a value that was defined by a copy from a sibling
// register.
struct SibValueInfo {
// True when all reaching defs were reloads: No spill is necessary.
bool AllDefsAreReloads;
// True when value is defined by an original PHI not from splitting.
bool DefByOrigPHI;
// True when the COPY defining this value killed its source.
bool KillsSource;
// The preferred register to spill.
unsigned SpillReg;
// The value of SpillReg that should be spilled.
VNInfo *SpillVNI;
// The block where SpillVNI should be spilled. Currently, this must be the
// block containing SpillVNI->def.
MachineBasicBlock *SpillMBB;
// A defining instruction that is not a sibling copy or a reload, or NULL.
// This can be used as a template for rematerialization.
MachineInstr *DefMI;
// List of values that depend on this one. These values are actually the
// same, but live range splitting has placed them in different registers,
// or SSA update needed to insert PHI-defs to preserve SSA form. This is
// copies of the current value and phi-kills. Usually only phi-kills cause
// more than one dependent value.
TinyPtrVector<VNInfo*> Deps;
SibValueInfo(unsigned Reg, VNInfo *VNI)
: AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false),
SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {}
// Returns true when a def has been found.
bool hasDef() const { return DefByOrigPHI || DefMI; }
};
private:
// Values in RegsToSpill defined by sibling copies.
typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap;
SibValueMap SibValues;
// Dead defs generated during spilling.
SmallVector<MachineInstr*, 8> DeadDefs;
~InlineSpiller() override {}
public:
InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
: MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
LSS(pass.getAnalysis<LiveStacks>()),
AA(&pass.getAnalysis<AliasAnalysis>()),
MDT(pass.getAnalysis<MachineDominatorTree>()),
Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
MFI(*mf.getFrameInfo()), MRI(mf.getRegInfo()),
TII(*mf.getSubtarget().getInstrInfo()),
TRI(*mf.getSubtarget().getRegisterInfo()),
MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {}
void spill(LiveRangeEdit &) override;
private:
bool isSnippet(const LiveInterval &SnipLI);
void collectRegsToSpill();
bool isRegToSpill(unsigned Reg) {
return std::find(RegsToSpill.begin(),
RegsToSpill.end(), Reg) != RegsToSpill.end();
}
bool isSibling(unsigned Reg);
MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*);
void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr);
void analyzeSiblingValues();
bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI);
void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
void markValueUsed(LiveInterval*, VNInfo*);
bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI);
void reMaterializeAll();
bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >,
MachineInstr *LoadMI = nullptr);
void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
void spillAroundUses(unsigned Reg);
void spillAll();
};
}
namespace llvm {
Spiller::~Spiller() { }
void Spiller::anchor() { }
Spiller *createInlineSpiller(MachineFunctionPass &pass,
MachineFunction &mf,
VirtRegMap &vrm) {
return new InlineSpiller(pass, mf, vrm);
}
}
//===----------------------------------------------------------------------===//
// Snippets
//===----------------------------------------------------------------------===//
// When spilling a virtual register, we also spill any snippets it is connected
// to. The snippets are small live ranges that only have a single real use,
// leftovers from live range splitting. Spilling them enables memory operand
// folding or tightens the live range around the single use.
//
// This minimizes register pressure and maximizes the store-to-load distance for
// spill slots which can be important in tight loops.
/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
/// otherwise return 0.
static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) {
if (!MI->isFullCopy())
return 0;
if (MI->getOperand(0).getReg() == Reg)
return MI->getOperand(1).getReg();
if (MI->getOperand(1).getReg() == Reg)
return MI->getOperand(0).getReg();
return 0;
}
/// isSnippet - Identify if a live interval is a snippet that should be spilled.
/// It is assumed that SnipLI is a virtual register with the same original as
/// Edit->getReg().
bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
unsigned Reg = Edit->getReg();
// A snippet is a tiny live range with only a single instruction using it
// besides copies to/from Reg or spills/fills. We accept:
//
// %snip = COPY %Reg / FILL fi#
// %snip = USE %snip
// %Reg = COPY %snip / SPILL %snip, fi#
//
if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
return false;
MachineInstr *UseMI = nullptr;
// Check that all uses satisfy our criteria.
for (MachineRegisterInfo::reg_instr_nodbg_iterator
RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
E = MRI.reg_instr_nodbg_end(); RI != E; ) {
MachineInstr *MI = &*(RI++);
// Allow copies to/from Reg.
if (isFullCopyOf(MI, Reg))
continue;
// Allow stack slot loads.
int FI;
if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
continue;
// Allow stack slot stores.
if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
continue;
// Allow a single additional instruction.
if (UseMI && MI != UseMI)
return false;
UseMI = MI;
}
return true;
}
/// collectRegsToSpill - Collect live range snippets that only have a single
/// real use.
void InlineSpiller::collectRegsToSpill() {
unsigned Reg = Edit->getReg();
// Main register always spills.
RegsToSpill.assign(1, Reg);
SnippetCopies.clear();
// Snippets all have the same original, so there can't be any for an original
// register.
if (Original == Reg)
return;
for (MachineRegisterInfo::reg_instr_iterator
RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
MachineInstr *MI = &*(RI++);
unsigned SnipReg = isFullCopyOf(MI, Reg);
if (!isSibling(SnipReg))
continue;
LiveInterval &SnipLI = LIS.getInterval(SnipReg);
if (!isSnippet(SnipLI))
continue;
SnippetCopies.insert(MI);
if (isRegToSpill(SnipReg))
continue;
RegsToSpill.push_back(SnipReg);
DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
++NumSnippets;
}
}
//===----------------------------------------------------------------------===//
// Sibling Values
//===----------------------------------------------------------------------===//
// After live range splitting, some values to be spilled may be defined by
// copies from sibling registers. We trace the sibling copies back to the
// original value if it still exists. We need it for rematerialization.
//
// Even when the value can't be rematerialized, we still want to determine if
// the value has already been spilled, or we may want to hoist the spill from a
// loop.
bool InlineSpiller::isSibling(unsigned Reg) {
return TargetRegisterInfo::isVirtualRegister(Reg) &&
VRM.getOriginal(Reg) == Original;
}
#ifndef NDEBUG
static raw_ostream &operator<<(raw_ostream &OS,
const InlineSpiller::SibValueInfo &SVI) {
OS << "spill " << PrintReg(SVI.SpillReg) << ':'
<< SVI.SpillVNI->id << '@' << SVI.SpillVNI->def;
if (SVI.SpillMBB)
OS << " in BB#" << SVI.SpillMBB->getNumber();
if (SVI.AllDefsAreReloads)
OS << " all-reloads";
if (SVI.DefByOrigPHI)
OS << " orig-phi";
if (SVI.KillsSource)
OS << " kill";
OS << " deps[";
for (unsigned i = 0, e = SVI.Deps.size(); i != e; ++i)
OS << ' ' << SVI.Deps[i]->id << '@' << SVI.Deps[i]->def;
OS << " ]";
if (SVI.DefMI)
OS << " def: " << *SVI.DefMI;
else
OS << '\n';
return OS;
}
#endif
/// propagateSiblingValue - Propagate the value in SVI to dependents if it is
/// known. Otherwise remember the dependency for later.
///
/// @param SVIIter SibValues entry to propagate.
/// @param VNI Dependent value, or NULL to propagate to all saved dependents.
void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter,
VNInfo *VNI) {
SibValueMap::value_type *SVI = &*SVIIter;
// When VNI is non-NULL, add it to SVI's deps, and only propagate to that.
TinyPtrVector<VNInfo*> FirstDeps;
if (VNI) {
FirstDeps.push_back(VNI);
SVI->second.Deps.push_back(VNI);
}
// Has the value been completely determined yet? If not, defer propagation.
if (!SVI->second.hasDef())
return;
// Work list of values to propagate.
SmallSetVector<SibValueMap::value_type *, 8> WorkList;
WorkList.insert(SVI);
do {
SVI = WorkList.pop_back_val();
TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps;
VNI = nullptr;
SibValueInfo &SV = SVI->second;
if (!SV.SpillMBB)
SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def);
DEBUG(dbgs() << " prop to " << Deps->size() << ": "
<< SVI->first->id << '@' << SVI->first->def << ":\t" << SV);
assert(SV.hasDef() && "Propagating undefined value");
// Should this value be propagated as a preferred spill candidate? We don't
// propagate values of registers that are about to spill.
bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg);
unsigned SpillDepth = ~0u;
for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(),
DepE = Deps->end(); DepI != DepE; ++DepI) {
SibValueMap::iterator DepSVI = SibValues.find(*DepI);
assert(DepSVI != SibValues.end() && "Dependent value not in SibValues");
SibValueInfo &DepSV = DepSVI->second;
if (!DepSV.SpillMBB)
DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def);
bool Changed = false;
// Propagate defining instruction.
if (!DepSV.hasDef()) {
Changed = true;
DepSV.DefMI = SV.DefMI;
DepSV.DefByOrigPHI = SV.DefByOrigPHI;
}
// Propagate AllDefsAreReloads. For PHI values, this computes an AND of
// all predecessors.
if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) {
Changed = true;
DepSV.AllDefsAreReloads = false;
}
// Propagate best spill value.
if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) {
if (SV.SpillMBB == DepSV.SpillMBB) {
// DepSV is in the same block. Hoist when dominated.
if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) {
// This is an alternative def earlier in the same MBB.
// Hoist the spill as far as possible in SpillMBB. This can ease
// register pressure:
//
// x = def
// y = use x
// s = copy x
//
// Hoisting the spill of s to immediately after the def removes the
// interference between x and y:
//
// x = def
// spill x
// y = use x<kill>
//
// This hoist only helps when the DepSV copy kills its source.
Changed = true;
DepSV.SpillReg = SV.SpillReg;
DepSV.SpillVNI = SV.SpillVNI;
DepSV.SpillMBB = SV.SpillMBB;
}
} else {
// DepSV is in a different block.
if (SpillDepth == ~0u)
SpillDepth = Loops.getLoopDepth(SV.SpillMBB);
// Also hoist spills to blocks with smaller loop depth, but make sure
// that the new value dominates. Non-phi dependents are always
// dominated, phis need checking.
const BranchProbability MarginProb(4, 5); // 80%
// Hoist a spill to outer loop if there are multiple dependents (it
// can be beneficial if more than one dependents are hoisted) or
// if DepSV (the hoisting source) is hotter than SV (the hoisting
// destination) (we add a 80% margin to bias a little towards
// loop depth).
bool HoistCondition =
(MBFI.getBlockFreq(DepSV.SpillMBB) >=
(MBFI.getBlockFreq(SV.SpillMBB) * MarginProb)) ||
Deps->size() > 1;
if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) &&
HoistCondition &&
(!DepSVI->first->isPHIDef() ||
MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) {
Changed = true;
DepSV.SpillReg = SV.SpillReg;
DepSV.SpillVNI = SV.SpillVNI;
DepSV.SpillMBB = SV.SpillMBB;
}
}
}
if (!Changed)
continue;
// Something changed in DepSVI. Propagate to dependents.
WorkList.insert(&*DepSVI);
DEBUG(dbgs() << " update " << DepSVI->first->id << '@'
<< DepSVI->first->def << " to:\t" << DepSV);
}
} while (!WorkList.empty());
}
/// traceSiblingValue - Trace a value that is about to be spilled back to the
/// real defining instructions by looking through sibling copies. Always stay
/// within the range of OrigVNI so the registers are known to carry the same
/// value.
///
/// Determine if the value is defined by all reloads, so spilling isn't
/// necessary - the value is already in the stack slot.
///
/// Return a defining instruction that may be a candidate for rematerialization.
///
MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
VNInfo *OrigVNI) {
// Check if a cached value already exists.
SibValueMap::iterator SVI;
bool Inserted;
std::tie(SVI, Inserted) =
SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI)));
if (!Inserted) {
DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':'
<< UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second);
return SVI->second.DefMI;
}
DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':'
<< UseVNI->id << '@' << UseVNI->def << '\n');
// List of (Reg, VNI) that have been inserted into SibValues, but need to be
// processed.
SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList;
WorkList.push_back(std::make_pair(UseReg, UseVNI));
LiveInterval &OrigLI = LIS.getInterval(Original);
do {
unsigned Reg;
VNInfo *VNI;
std::tie(Reg, VNI) = WorkList.pop_back_val();
DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def
<< ":\t");
// First check if this value has already been computed.
SVI = SibValues.find(VNI);
assert(SVI != SibValues.end() && "Missing SibValues entry");
// Trace through PHI-defs created by live range splitting.
if (VNI->isPHIDef()) {
// Stop at original PHIs. We don't know the value at the
// predecessors. Look up the VNInfo for the current definition
// in OrigLI, to properly determine whether or not this phi was
// added by splitting.
if (VNI->def == OrigLI.getVNInfoAt(VNI->def)->def) {
DEBUG(dbgs() << "orig phi value\n");
SVI->second.DefByOrigPHI = true;
SVI->second.AllDefsAreReloads = false;
propagateSiblingValue(SVI);
continue;
}
// This is a PHI inserted by live range splitting. We could trace the
// live-out value from predecessor blocks, but that search can be very
// expensive if there are many predecessors and many more PHIs as
// generated by tail-dup when it sees an indirectbr. Instead, look at
// all the non-PHI defs that have the same value as OrigVNI. They must
// jointly dominate VNI->def. This is not optimal since VNI may actually
// be jointly dominated by a smaller subset of defs, so there is a change
// we will miss a AllDefsAreReloads optimization.
// Separate all values dominated by OrigVNI into PHIs and non-PHIs.
SmallVector<VNInfo*, 8> PHIs, NonPHIs;
LiveInterval &LI = LIS.getInterval(Reg);
for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end();
VI != VE; ++VI) {
VNInfo *VNI2 = *VI;
if (VNI2->isUnused())
continue;
if (!OrigLI.containsOneValue() &&
OrigLI.getVNInfoAt(VNI2->def) != OrigVNI)
continue;
if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def)
PHIs.push_back(VNI2);
else
NonPHIs.push_back(VNI2);
}
DEBUG(dbgs() << "split phi value, checking " << PHIs.size()
<< " phi-defs, and " << NonPHIs.size()
<< " non-phi/orig defs\n");
// Create entries for all the PHIs. Don't add them to the worklist, we
// are processing all of them in one go here.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
SibValues.insert(std::make_pair(PHIs[i], SibValueInfo(Reg, PHIs[i])));
// Add every PHI as a dependent of all the non-PHIs.
for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) {
VNInfo *NonPHI = NonPHIs[i];
// Known value? Try an insertion.
std::tie(SVI, Inserted) =
SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI)));
// Add all the PHIs as dependents of NonPHI.
SVI->second.Deps.insert(SVI->second.Deps.end(), PHIs.begin(),
PHIs.end());
// This is the first time we see NonPHI, add it to the worklist.
if (Inserted)
WorkList.push_back(std::make_pair(Reg, NonPHI));
else
// Propagate to all inserted PHIs, not just VNI.
propagateSiblingValue(SVI);
}
// Next work list item.
continue;
}
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
assert(MI && "Missing def");
// Trace through sibling copies.
if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
if (isSibling(SrcReg)) {
LiveInterval &SrcLI = LIS.getInterval(SrcReg);
LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
assert(SrcQ.valueIn() && "Copy from non-existing value");
// Check if this COPY kills its source.
SVI->second.KillsSource = SrcQ.isKill();
VNInfo *SrcVNI = SrcQ.valueIn();
DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':'
<< SrcVNI->id << '@' << SrcVNI->def
<< " kill=" << unsigned(SVI->second.KillsSource) << '\n');
// Known sibling source value? Try an insertion.
std::tie(SVI, Inserted) = SibValues.insert(
std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI)));
// This is the first time we see Src, add it to the worklist.
if (Inserted)
WorkList.push_back(std::make_pair(SrcReg, SrcVNI));
propagateSiblingValue(SVI, VNI);
// Next work list item.
continue;
}
}
// Track reachable reloads.
SVI->second.DefMI = MI;
SVI->second.SpillMBB = MI->getParent();
int FI;
if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) {
DEBUG(dbgs() << "reload\n");
propagateSiblingValue(SVI);
// Next work list item.
continue;
}
// Potential remat candidate.
DEBUG(dbgs() << "def " << *MI);
SVI->second.AllDefsAreReloads = false;
propagateSiblingValue(SVI);
} while (!WorkList.empty());
// Look up the value we were looking for. We already did this lookup at the
// top of the function, but SibValues may have been invalidated.
SVI = SibValues.find(UseVNI);
assert(SVI != SibValues.end() && "Didn't compute requested info");
DEBUG(dbgs() << " traced to:\t" << SVI->second);
return SVI->second.DefMI;
}
/// analyzeSiblingValues - Trace values defined by sibling copies back to
/// something that isn't a sibling copy.
///
/// Keep track of values that may be rematerializable.
void InlineSpiller::analyzeSiblingValues() {
SibValues.clear();
// No siblings at all?
if (Edit->getReg() == Original)
return;
LiveInterval &OrigLI = LIS.getInterval(Original);
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
unsigned Reg = RegsToSpill[i];
LiveInterval &LI = LIS.getInterval(Reg);
for (LiveInterval::const_vni_iterator VI = LI.vni_begin(),
VE = LI.vni_end(); VI != VE; ++VI) {
VNInfo *VNI = *VI;
if (VNI->isUnused())
continue;
MachineInstr *DefMI = nullptr;
if (!VNI->isPHIDef()) {
DefMI = LIS.getInstructionFromIndex(VNI->def);
assert(DefMI && "No defining instruction");
}
// Check possible sibling copies.
if (VNI->isPHIDef() || DefMI->isCopy()) {
VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def);
assert(OrigVNI && "Def outside original live range");
if (OrigVNI->def != VNI->def)
DefMI = traceSiblingValue(Reg, VNI, OrigVNI);
}
if (DefMI && Edit->checkRematerializable(VNI, DefMI, AA)) {
DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@'
<< VNI->def << " may remat from " << *DefMI);
}
}
}
}
/// hoistSpill - Given a sibling copy that defines a value to be spilled, insert
/// a spill at a better location.
bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) {
SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
SibValueMap::iterator I = SibValues.find(VNI);
if (I == SibValues.end())
return false;
const SibValueInfo &SVI = I->second;
// Let the normal folding code deal with the boring case.
if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI)
return false;
// SpillReg may have been deleted by remat and DCE.
if (!LIS.hasInterval(SVI.SpillReg)) {
DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n');
SibValues.erase(I);
return false;
}
LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg);
if (!SibLI.containsValue(SVI.SpillVNI)) {
DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n');
SibValues.erase(I);
return false;
}
// Conservatively extend the stack slot range to the range of the original
// value. We may be able to do better with stack slot coloring by being more
// careful here.
assert(StackInt && "No stack slot assigned yet.");
LiveInterval &OrigLI = LIS.getInterval(Original);
VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
<< *StackInt << '\n');
// Already spilled everywhere.
if (SVI.AllDefsAreReloads) {
DEBUG(dbgs() << "\tno spill needed: " << SVI);
++NumOmitReloadSpill;
return true;
}
// We are going to spill SVI.SpillVNI immediately after its def, so clear out
// any later spills of the same value.
eliminateRedundantSpills(SibLI, SVI.SpillVNI);
MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def);
MachineBasicBlock::iterator MII;
if (SVI.SpillVNI->isPHIDef())
MII = MBB->SkipPHIsAndLabels(MBB->begin());
else {
MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def);
assert(DefMI && "Defining instruction disappeared");
MII = DefMI;
++MII;
}
// Insert spill without kill flag immediately after def.
TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot,
MRI.getRegClass(SVI.SpillReg), &TRI);
--MII; // Point to store instruction.
LIS.InsertMachineInstrInMaps(MII);
DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII);
++NumSpills;
++NumHoists;
return true;
}
/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
/// redundant spills of this value in SLI.reg and sibling copies.
void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
assert(VNI && "Missing value");
SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
WorkList.push_back(std::make_pair(&SLI, VNI));
assert(StackInt && "No stack slot assigned yet.");
do {
LiveInterval *LI;
std::tie(LI, VNI) = WorkList.pop_back_val();
unsigned Reg = LI->reg;
DEBUG(dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def << " in " << *LI << '\n');
// Regs to spill are taken care of.
if (isRegToSpill(Reg))
continue;
// Add all of VNI's live range to StackInt.
StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
// Find all spills and copies of VNI.
for (MachineRegisterInfo::use_instr_nodbg_iterator
UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
UI != E; ) {
MachineInstr *MI = &*(UI++);
if (!MI->isCopy() && !MI->mayStore())
continue;
SlotIndex Idx = LIS.getInstructionIndex(MI);
if (LI->getVNInfoAt(Idx) != VNI)
continue;
// Follow sibling copies down the dominator tree.
if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
if (isSibling(DstReg)) {
LiveInterval &DstLI = LIS.getInterval(DstReg);
VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
assert(DstVNI && "Missing defined value");
assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
WorkList.push_back(std::make_pair(&DstLI, DstVNI));
}
continue;
}
// Erase spills.
int FI;
if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI);
// eliminateDeadDefs won't normally remove stores, so switch opcode.
MI->setDesc(TII.get(TargetOpcode::KILL));
DeadDefs.push_back(MI);
++NumSpillsRemoved;
--NumSpills;
}
}
} while (!WorkList.empty());
}
//===----------------------------------------------------------------------===//
// Rematerialization
//===----------------------------------------------------------------------===//
/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
/// instruction cannot be eliminated. See through snippet copies
void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
WorkList.push_back(std::make_pair(LI, VNI));
do {
std::tie(LI, VNI) = WorkList.pop_back_val();
if (!UsedValues.insert(VNI).second)
continue;
if (VNI->isPHIDef()) {
MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(*PI));
if (PVNI)
WorkList.push_back(std::make_pair(LI, PVNI));
}
continue;
}
// Follow snippet copies.
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
if (!SnippetCopies.count(MI))
continue;
LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
assert(SnipVNI && "Snippet undefined before copy");
WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
} while (!WorkList.empty());
}
/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
MachineBasicBlock::iterator MI) {
// Analyze instruction
SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
if (!RI.Reads)
return false;
SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
if (!ParentVNI) {
DEBUG(dbgs() << "\tadding <undef> flags: ");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
MO.setIsUndef();
}
DEBUG(dbgs() << UseIdx << '\t' << *MI);
return true;
}
if (SnippetCopies.count(MI))
return false;
// Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
LiveRangeEdit::Remat RM(ParentVNI);
SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
if (SibI != SibValues.end())
RM.OrigMI = SibI->second.DefMI;
if (!Edit->canRematerializeAt(RM, UseIdx, false)) {
markValueUsed(&VirtReg, ParentVNI);
DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
return false;
}
// If the instruction also writes VirtReg.reg, it had better not require the
// same register for uses and defs.
if (RI.Tied) {
markValueUsed(&VirtReg, ParentVNI);
DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
return false;
}
// Before rematerializing into a register for a single instruction, try to
// fold a load into the instruction. That avoids allocating a new register.
if (RM.OrigMI->canFoldAsLoad() &&
foldMemoryOperand(Ops, RM.OrigMI)) {
Edit->markRematerialized(RM.ParentVNI);
++NumFoldedLoads;
return true;
}
// Alocate a new register for the remat.
unsigned NewVReg = Edit->createFrom(Original);
// Finally we can rematerialize OrigMI before MI.
SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewVReg, RM,
TRI);
(void)DefIdx;
DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
<< *LIS.getInstructionFromIndex(DefIdx));
// Replace operands
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
MO.setReg(NewVReg);
MO.setIsKill();
}
}
DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI << '\n');
++NumRemats;
return true;
}
/// reMaterializeAll - Try to rematerialize as many uses as possible,
/// and trim the live ranges after.
void InlineSpiller::reMaterializeAll() {
// analyzeSiblingValues has already tested all relevant defining instructions.
if (!Edit->anyRematerializable(AA))
return;
UsedValues.clear();
// Try to remat before all uses of snippets.
bool anyRemat = false;
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
unsigned Reg = RegsToSpill[i];
LiveInterval &LI = LIS.getInterval(Reg);
for (MachineRegisterInfo::reg_bundle_iterator
RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
RegI != E; ) {
MachineInstr *MI = &*(RegI++);
// Debug values are not allowed to affect codegen.
if (MI->isDebugValue())
continue;
anyRemat |= reMaterializeFor(LI, MI);
}
}
if (!anyRemat)
return;
// Remove any values that were completely rematted.
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
unsigned Reg = RegsToSpill[i];
LiveInterval &LI = LIS.getInterval(Reg);
for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
I != E; ++I) {
VNInfo *VNI = *I;
if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
continue;
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
MI->addRegisterDead(Reg, &TRI);
if (!MI->allDefsAreDead())
continue;
DEBUG(dbgs() << "All defs dead: " << *MI);
DeadDefs.push_back(MI);
}
}
// Eliminate dead code after remat. Note that some snippet copies may be
// deleted here.
if (DeadDefs.empty())
return;
DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
// Get rid of deleted and empty intervals.
unsigned ResultPos = 0;
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
unsigned Reg = RegsToSpill[i];
if (!LIS.hasInterval(Reg))
continue;
LiveInterval &LI = LIS.getInterval(Reg);
if (LI.empty()) {
Edit->eraseVirtReg(Reg);
continue;
}
RegsToSpill[ResultPos++] = Reg;
}
RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
}
//===----------------------------------------------------------------------===//
// Spilling
//===----------------------------------------------------------------------===//
/// If MI is a load or store of StackSlot, it can be removed.
bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
int FI = 0;
unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI);
bool IsLoad = InstrReg;
if (!IsLoad)
InstrReg = TII.isStoreToStackSlot(MI, FI);
// We have a stack access. Is it the right register and slot?
if (InstrReg != Reg || FI != StackSlot)
return false;
DEBUG(dbgs() << "Coalescing stack access: " << *MI);
LIS.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
if (IsLoad) {
++NumReloadsRemoved;
--NumReloads;
} else {
++NumSpillsRemoved;
--NumSpills;
}
return true;
}
#if !defined(NDEBUG)
// Dump the range of instructions from B to E with their slot indexes.
static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
MachineBasicBlock::iterator E,
LiveIntervals const &LIS,
const char *const header,
unsigned VReg =0) {
char NextLine = '\n';
char SlotIndent = '\t';
if (std::next(B) == E) {
NextLine = ' ';
SlotIndent = ' ';
}
dbgs() << '\t' << header << ": " << NextLine;
for (MachineBasicBlock::iterator I = B; I != E; ++I) {
SlotIndex Idx = LIS.getInstructionIndex(I).getRegSlot();
// If a register was passed in and this instruction has it as a
// destination that is marked as an early clobber, print the
// early-clobber slot index.
if (VReg) {
MachineOperand *MO = I->findRegisterDefOperand(VReg);
if (MO && MO->isEarlyClobber())
Idx = Idx.getRegSlot(true);
}
dbgs() << SlotIndent << Idx << '\t' << *I;
}
}
#endif
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
MachineInstr *LoadMI) {
if (Ops.empty())
return false;
// Don't attempt folding in bundles.
MachineInstr *MI = Ops.front().first;
if (Ops.back().first != MI || MI->isBundled())
return false;
bool WasCopy = MI->isCopy();
unsigned ImpReg = 0;
bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::STATEPOINT ||
MI->getOpcode() == TargetOpcode::PATCHPOINT ||
MI->getOpcode() == TargetOpcode::STACKMAP);
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
unsigned Idx = Ops[i].second;
assert(MI == Ops[i].first && "Instruction conflict during operand folding");
MachineOperand &MO = MI->getOperand(Idx);
if (MO.isImplicit()) {
ImpReg = MO.getReg();
continue;
}
// FIXME: Teach targets to deal with subregs.
if (!SpillSubRegs && MO.getSubReg())
return false;
// We cannot fold a load instruction into a def.
if (LoadMI && MO.isDef())
return false;
// Tied use operands should not be passed to foldMemoryOperand.
if (!MI->isRegTiedToDefOperand(Idx))
FoldOps.push_back(Idx);
}
MachineInstrSpan MIS(MI);
MachineInstr *FoldMI =
LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
: TII.foldMemoryOperand(MI, FoldOps, StackSlot);
if (!FoldMI)
return false;
// Remove LIS for any dead defs in the original MI not in FoldMI.
for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
if (!MO->isReg())
continue;
unsigned Reg = MO->getReg();
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
MRI.isReserved(Reg)) {
continue;
}
// Skip non-Defs, including undef uses and internal reads.
if (MO->isUse())
continue;
MIBundleOperands::PhysRegInfo RI =
MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
if (RI.Defines)
continue;
// FoldMI does not define this physreg. Remove the LI segment.
assert(MO->isDead() && "Cannot fold physreg def");
SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
LIS.removePhysRegDefAt(Reg, Idx);
}
LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
MI->eraseFromParent();
// Insert any new instructions other than FoldMI into the LIS maps.
assert(!MIS.empty() && "Unexpected empty span of instructions!");
for (MachineBasicBlock::iterator MII = MIS.begin(), End = MIS.end();
MII != End; ++MII)
if (&*MII != FoldMI)
LIS.InsertMachineInstrInMaps(&*MII);
// TII.foldMemoryOperand may have left some implicit operands on the
// instruction. Strip them.
if (ImpReg)
for (unsigned i = FoldMI->getNumOperands(); i; --i) {
MachineOperand &MO = FoldMI->getOperand(i - 1);
if (!MO.isReg() || !MO.isImplicit())
break;
if (MO.getReg() == ImpReg)
FoldMI->RemoveOperand(i - 1);
}
DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
"folded"));
if (!WasCopy)
++NumFolded;
else if (Ops.front().second == 0)
++NumSpills;
else
++NumReloads;
return true;
}
void InlineSpiller::insertReload(unsigned NewVReg,
SlotIndex Idx,
MachineBasicBlock::iterator MI) {
MachineBasicBlock &MBB = *MI->getParent();
MachineInstrSpan MIS(MI);
TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
MRI.getRegClass(NewVReg), &TRI);
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
NewVReg));
++NumReloads;
}
/// insertSpill - Insert a spill of NewVReg after MI.
void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
MachineBasicBlock::iterator MI) {
MachineBasicBlock &MBB = *MI->getParent();
MachineInstrSpan MIS(MI);
TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
MRI.getRegClass(NewVReg), &TRI);
LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
"spill"));
++NumSpills;
}
/// spillAroundUses - insert spill code around each use of Reg.
void InlineSpiller::spillAroundUses(unsigned Reg) {
DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n');
LiveInterval &OldLI = LIS.getInterval(Reg);
// Iterate over instructions using Reg.
for (MachineRegisterInfo::reg_bundle_iterator
RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
RegI != E; ) {
MachineInstr *MI = &*(RegI++);
// Debug values are not allowed to affect codegen.
if (MI->isDebugValue()) {
// Modify DBG_VALUE now that the value is in a spill slot.
bool IsIndirect = MI->isIndirectDebugValue();
uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
const MDNode *Var = MI->getDebugVariable();
const MDNode *Expr = MI->getDebugExpression();
DebugLoc DL = MI->getDebugLoc();
DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
MachineBasicBlock *MBB = MI->getParent();
assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE))
.addFrameIndex(StackSlot)
.addImm(Offset)
.addMetadata(Var)
.addMetadata(Expr);
continue;
}
// Ignore copies to/from snippets. We'll delete them.
if (SnippetCopies.count(MI))
continue;
// Stack slot accesses may coalesce away.
if (coalesceStackAccess(MI, Reg))
continue;
// Analyze instruction.
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops);
// Find the slot index where this instruction reads and writes OldLI.
// This is usually the def slot, except for tied early clobbers.
SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
if (SlotIndex::isSameInstr(Idx, VNI->def))
Idx = VNI->def;
// Check for a sibling copy.
unsigned SibReg = isFullCopyOf(MI, Reg);
if (SibReg && isSibling(SibReg)) {
// This may actually be a copy between snippets.
if (isRegToSpill(SibReg)) {
DEBUG(dbgs() << "Found new snippet copy: " << *MI);
SnippetCopies.insert(MI);
continue;
}
if (RI.Writes) {
// Hoist the spill of a sib-reg copy.
if (hoistSpill(OldLI, MI)) {
// This COPY is now dead, the value is already in the stack slot.
MI->getOperand(0).setIsDead();
DeadDefs.push_back(MI);
continue;
}
} else {
// This is a reload for a sib-reg copy. Drop spills downstream.
LiveInterval &SibLI = LIS.getInterval(SibReg);
eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
// The COPY will fold to a reload below.
}
}
// Attempt to fold memory ops.
if (foldMemoryOperand(Ops))
continue;
// Create a new virtual register for spill/fill.
// FIXME: Infer regclass from instruction alone.
unsigned NewVReg = Edit->createFrom(Reg);
if (RI.Reads)
insertReload(NewVReg, Idx, MI);
// Rewrite instruction operands.
bool hasLiveDef = false;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second);
MO.setReg(NewVReg);
if (MO.isUse()) {
if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second))
MO.setIsKill();
} else {
if (!MO.isDead())
hasLiveDef = true;
}
}
DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
// FIXME: Use a second vreg if instruction has no tied ops.
if (RI.Writes)
if (hasLiveDef)
insertSpill(NewVReg, true, MI);
}
}
/// spillAll - Spill all registers remaining after rematerialization.
void InlineSpiller::spillAll() {
// Update LiveStacks now that we are committed to spilling.
if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
StackSlot = VRM.assignVirt2StackSlot(Original);
StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
} else
StackInt = &LSS.getInterval(StackSlot);
if (Original != Edit->getReg())
VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
StackInt->MergeSegmentsInAsValue(LIS.getInterval(RegsToSpill[i]),
StackInt->getValNumInfo(0));
DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
// Spill around uses of all RegsToSpill.
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
spillAroundUses(RegsToSpill[i]);
// Hoisted spills may cause dead code.
if (!DeadDefs.empty()) {
DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
}
// Finally delete the SnippetCopies.
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
for (MachineRegisterInfo::reg_instr_iterator
RI = MRI.reg_instr_begin(RegsToSpill[i]), E = MRI.reg_instr_end();
RI != E; ) {
MachineInstr *MI = &*(RI++);
assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy");
// FIXME: Do this with a LiveRangeEdit callback.
LIS.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
}
}
// Delete all spilled registers.
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
Edit->eraseVirtReg(RegsToSpill[i]);
}
void InlineSpiller::spill(LiveRangeEdit &edit) {
++NumSpilledRanges;
Edit = &edit;
assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
&& "Trying to spill a stack slot.");
// Share a stack slot among all descendants of Original.
Original = VRM.getOriginal(edit.getReg());
StackSlot = VRM.getStackSlot(Original);
StackInt = nullptr;
DEBUG(dbgs() << "Inline spilling "
<< TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
<< ':' << edit.getParent()
<< "\nFrom original " << PrintReg(Original) << '\n');
assert(edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.");
assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
collectRegsToSpill();
analyzeSiblingValues();
reMaterializeAll();
// Remat may handle everything.
if (!RegsToSpill.empty())
spillAll();
Edit->calculateRegClassAndHint(MF, Loops, MBFI);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/AtomicExpandPass.cpp | //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass (at IR level) to replace atomic instructions with
// either (intrinsic-based) load-linked/store-conditional loops or AtomicCmpXchg.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "atomic-expand"
namespace {
class AtomicExpand: public FunctionPass {
const TargetMachine *TM;
const TargetLowering *TLI;
public:
static char ID; // Pass identification, replacement for typeid
explicit AtomicExpand(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM), TLI(nullptr) {
initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
private:
bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
bool IsStore, bool IsLoad);
bool expandAtomicLoad(LoadInst *LI);
bool expandAtomicLoadToLL(LoadInst *LI);
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
bool expandAtomicStore(StoreInst *SI);
bool tryExpandAtomicRMW(AtomicRMWInst *AI);
bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
bool isIdempotentRMW(AtomicRMWInst *AI);
bool simplifyIdempotentRMW(AtomicRMWInst *AI);
};
}
char AtomicExpand::ID = 0;
char &llvm::AtomicExpandID = AtomicExpand::ID;
INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
"Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
false, false)
FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
return new AtomicExpand(TM);
}
bool AtomicExpand::runOnFunction(Function &F) {
if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
return false;
TLI = TM->getSubtargetImpl(F)->getTargetLowering();
SmallVector<Instruction *, 1> AtomicInsts;
// Changing control-flow while iterating through it is a bad idea, so gather a
// list of all atomic instructions before we start.
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
if (I->isAtomic())
AtomicInsts.push_back(&*I);
}
bool MadeChange = false;
for (auto I : AtomicInsts) {
auto LI = dyn_cast<LoadInst>(I);
auto SI = dyn_cast<StoreInst>(I);
auto RMWI = dyn_cast<AtomicRMWInst>(I);
auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
"Unknown atomic instruction");
auto FenceOrdering = Monotonic;
bool IsStore, IsLoad;
if (TLI->getInsertFencesForAtomic()) {
if (LI && isAtLeastAcquire(LI->getOrdering())) {
FenceOrdering = LI->getOrdering();
LI->setOrdering(Monotonic);
IsStore = false;
IsLoad = true;
} else if (SI && isAtLeastRelease(SI->getOrdering())) {
FenceOrdering = SI->getOrdering();
SI->setOrdering(Monotonic);
IsStore = true;
IsLoad = false;
} else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
isAtLeastAcquire(RMWI->getOrdering()))) {
FenceOrdering = RMWI->getOrdering();
RMWI->setOrdering(Monotonic);
IsStore = IsLoad = true;
} else if (CASI && !TLI->hasLoadLinkedStoreConditional() &&
(isAtLeastRelease(CASI->getSuccessOrdering()) ||
isAtLeastAcquire(CASI->getSuccessOrdering()))) {
// If a compare and swap is lowered to LL/SC, we can do smarter fence
// insertion, with a stronger one on the success path than on the
// failure path. As a result, fence insertion is directly done by
// expandAtomicCmpXchg in that case.
FenceOrdering = CASI->getSuccessOrdering();
CASI->setSuccessOrdering(Monotonic);
CASI->setFailureOrdering(Monotonic);
IsStore = IsLoad = true;
}
if (FenceOrdering != Monotonic) {
MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
}
}
if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) {
MadeChange |= expandAtomicLoad(LI);
} else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
MadeChange |= expandAtomicStore(SI);
} else if (RMWI) {
// There are two different ways of expanding RMW instructions:
// - into a load if it is idempotent
// - into a Cmpxchg/LL-SC loop otherwise
// we try them in that order.
if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
MadeChange = true;
} else {
MadeChange |= tryExpandAtomicRMW(RMWI);
}
} else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
MadeChange |= expandAtomicCmpXchg(CASI);
}
}
return MadeChange;
}
bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
bool IsStore, bool IsLoad) {
IRBuilder<> Builder(I);
auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
// The trailing fence is emitted before the instruction instead of after
// because there is no easy way of setting Builder insertion point after
// an instruction. So we must erase it from the BB, and insert it back
// in the right place.
// We have a guard here because not every atomic operation generates a
// trailing fence.
if (TrailingFence) {
TrailingFence->removeFromParent();
TrailingFence->insertAfter(I);
}
return (LeadingFence || TrailingFence);
}
bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
if (TLI->hasLoadLinkedStoreConditional())
return expandAtomicLoadToLL(LI);
else
return expandAtomicLoadToCmpXchg(LI);
}
bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
IRBuilder<> Builder(LI);
// On some architectures, load-linked instructions are atomic for larger
// sizes than normal loads. For example, the only 64-bit load guaranteed
// to be single-copy atomic by ARM is an ldrexd (A3.5.3).
Value *Val =
TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
LI->replaceAllUsesWith(Val);
LI->eraseFromParent();
return true;
}
bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
IRBuilder<> Builder(LI);
AtomicOrdering Order = LI->getOrdering();
Value *Addr = LI->getPointerOperand();
Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
Constant *DummyVal = Constant::getNullValue(Ty);
Value *Pair = Builder.CreateAtomicCmpXchg(
Addr, DummyVal, DummyVal, Order,
AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
LI->replaceAllUsesWith(Loaded);
LI->eraseFromParent();
return true;
}
bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
// This function is only called on atomic stores that are too large to be
// atomic if implemented as a native store. So we replace them by an
// atomic swap, that can be implemented for example as a ldrex/strex on ARM
// or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
// It is the responsibility of the target to only signal expansion via
// shouldExpandAtomicRMW in cases where this is required and possible.
IRBuilder<> Builder(SI);
AtomicRMWInst *AI =
Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
SI->getValueOperand(), SI->getOrdering());
SI->eraseFromParent();
// Now we have an appropriate swap instruction, lower it as usual.
return tryExpandAtomicRMW(AI);
}
bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
case TargetLoweringBase::AtomicRMWExpansionKind::None:
return false;
case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: {
assert(TLI->hasLoadLinkedStoreConditional() &&
"TargetLowering requested we expand AtomicRMW instruction into "
"load-linked/store-conditional combos, but such instructions aren't "
"supported");
return expandAtomicRMWToLLSC(AI);
}
case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: {
return expandAtomicRMWToCmpXchg(AI);
}
}
llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
}
/// Emit IR to implement the given atomicrmw operation on values in registers,
/// returning the new value.
static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
Value *Loaded, Value *Inc) {
Value *NewVal;
switch (Op) {
case AtomicRMWInst::Xchg:
return Inc;
case AtomicRMWInst::Add:
return Builder.CreateAdd(Loaded, Inc, "new");
case AtomicRMWInst::Sub:
return Builder.CreateSub(Loaded, Inc, "new");
case AtomicRMWInst::And:
return Builder.CreateAnd(Loaded, Inc, "new");
case AtomicRMWInst::Nand:
return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
case AtomicRMWInst::Or:
return Builder.CreateOr(Loaded, Inc, "new");
case AtomicRMWInst::Xor:
return Builder.CreateXor(Loaded, Inc, "new");
case AtomicRMWInst::Max:
NewVal = Builder.CreateICmpSGT(Loaded, Inc);
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
case AtomicRMWInst::Min:
NewVal = Builder.CreateICmpSLE(Loaded, Inc);
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
case AtomicRMWInst::UMax:
NewVal = Builder.CreateICmpUGT(Loaded, Inc);
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
case AtomicRMWInst::UMin:
NewVal = Builder.CreateICmpULE(Loaded, Inc);
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
default:
llvm_unreachable("Unknown atomic op");
}
}
bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
AtomicOrdering MemOpOrder = AI->getOrdering();
Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent();
Function *F = BB->getParent();
LLVMContext &Ctx = F->getContext();
// Given: atomicrmw some_op iN* %addr, iN %incr ordering
//
// The standard expansion we produce is:
// [...]
// fence?
// atomicrmw.start:
// %loaded = @load.linked(%addr)
// %new = some_op iN %loaded, %incr
// %stored = @store_conditional(%new, %addr)
// %try_again = icmp i32 ne %stored, 0
// br i1 %try_again, label %loop, label %atomicrmw.end
// atomicrmw.end:
// fence?
// [...]
BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
// This grabs the DebugLoc from AI.
IRBuilder<> Builder(AI);
// The split call above "helpfully" added a branch at the end of BB (to the
// wrong place), but we might want a fence too. It's easiest to just remove
// the branch entirely.
std::prev(BB->end())->eraseFromParent();
Builder.SetInsertPoint(BB);
Builder.CreateBr(LoopBB);
// Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB);
Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
Value *NewVal =
performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
Value *StoreSuccess =
TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
Value *TryAgain = Builder.CreateICmpNE(
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();
return true;
}
bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) {
AtomicOrdering MemOpOrder =
AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent();
Function *F = BB->getParent();
LLVMContext &Ctx = F->getContext();
// Given: atomicrmw some_op iN* %addr, iN %incr ordering
//
// The standard expansion we produce is:
// [...]
// %init_loaded = load atomic iN* %addr
// br label %loop
// loop:
// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
// %new = some_op iN %loaded, %incr
// %pair = cmpxchg iN* %addr, iN %loaded, iN %new
// %new_loaded = extractvalue { iN, i1 } %pair, 0
// %success = extractvalue { iN, i1 } %pair, 1
// br i1 %success, label %atomicrmw.end, label %loop
// atomicrmw.end:
// [...]
BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
// This grabs the DebugLoc from AI.
IRBuilder<> Builder(AI);
// The split call above "helpfully" added a branch at the end of BB (to the
// wrong place), but we want a load. It's easiest to just remove
// the branch entirely.
std::prev(BB->end())->eraseFromParent();
Builder.SetInsertPoint(BB);
LoadInst *InitLoaded = Builder.CreateLoad(Addr);
// Atomics require at least natural alignment.
InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
Builder.CreateBr(LoopBB);
// Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB);
PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
Loaded->addIncoming(InitLoaded, BB);
Value *NewVal =
performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
Value *Pair = Builder.CreateAtomicCmpXchg(
Addr, Loaded, NewVal, MemOpOrder,
AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
Loaded->addIncoming(NewLoaded, LoopBB);
Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
Builder.CreateCondBr(Success, ExitBB, LoopBB);
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
AI->replaceAllUsesWith(NewLoaded);
AI->eraseFromParent();
return true;
}
bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();
Value *Addr = CI->getPointerOperand();
BasicBlock *BB = CI->getParent();
Function *F = BB->getParent();
LLVMContext &Ctx = F->getContext();
// If getInsertFencesForAtomic() returns true, then the target does not want
// to deal with memory orders, and emitLeading/TrailingFence should take care
// of everything. Otherwise, emitLeading/TrailingFence are no-op and we
// should preserve the ordering.
AtomicOrdering MemOpOrder =
TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
// Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
//
// The full expansion we produce is:
// [...]
// fence?
// cmpxchg.start:
// %loaded = @load.linked(%addr)
// %should_store = icmp eq %loaded, %desired
// br i1 %should_store, label %cmpxchg.trystore,
// label %cmpxchg.failure
// cmpxchg.trystore:
// %stored = @store_conditional(%new, %addr)
// %success = icmp eq i32 %stored, 0
// br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
// cmpxchg.success:
// fence?
// br label %cmpxchg.end
// cmpxchg.failure:
// fence?
// br label %cmpxchg.end
// cmpxchg.end:
// %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
// %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
// %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
// [...]
BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
// This grabs the DebugLoc from CI
IRBuilder<> Builder(CI);
// The split call above "helpfully" added a branch at the end of BB (to the
// wrong place), but we might want a fence too. It's easiest to just remove
// the branch entirely.
std::prev(BB->end())->eraseFromParent();
Builder.SetInsertPoint(BB);
TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
/*IsLoad=*/true);
Builder.CreateBr(LoopBB);
// Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB);
Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
Value *ShouldStore =
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
// If the cmpxchg doesn't actually need any ordering when it fails, we can
// jump straight past that fence instruction (if it exists).
Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
Builder.SetInsertPoint(TryStoreBB);
Value *StoreSuccess = TLI->emitStoreConditional(
Builder, CI->getNewValOperand(), Addr, MemOpOrder);
StoreSuccess = Builder.CreateICmpEQ(
StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
Builder.CreateCondBr(StoreSuccess, SuccessBB,
CI->isWeak() ? FailureBB : LoopBB);
// Make sure later instructions don't get reordered with a fence if necessary.
Builder.SetInsertPoint(SuccessBB);
TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
/*IsLoad=*/true);
Builder.CreateBr(ExitBB);
Builder.SetInsertPoint(FailureBB);
TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
/*IsLoad=*/true);
Builder.CreateBr(ExitBB);
// Finally, we have control-flow based knowledge of whether the cmpxchg
// succeeded or not. We expose this to later passes by converting any
// subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
// Setup the builder so we can create any PHIs we need.
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
// Look for any users of the cmpxchg that are just comparing the loaded value
// against the desired one, and replace them with the CFG-derived version.
SmallVector<ExtractValueInst *, 2> PrunedInsts;
for (auto User : CI->users()) {
ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
if (!EV)
continue;
assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
"weird extraction from { iN, i1 }");
if (EV->getIndices()[0] == 0)
EV->replaceAllUsesWith(Loaded);
else
EV->replaceAllUsesWith(Success);
PrunedInsts.push_back(EV);
}
// We can remove the instructions now we're no longer iterating through them.
for (auto EV : PrunedInsts)
EV->eraseFromParent();
if (!CI->use_empty()) {
// Some use of the full struct return that we don't understand has happened,
// so we've got to reconstruct it properly.
Value *Res;
Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
Res = Builder.CreateInsertValue(Res, Success, 1);
CI->replaceAllUsesWith(Res);
}
CI->eraseFromParent();
return true;
}
bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
if(!C)
return false;
AtomicRMWInst::BinOp Op = RMWI->getOperation();
switch(Op) {
case AtomicRMWInst::Add:
case AtomicRMWInst::Sub:
case AtomicRMWInst::Or:
case AtomicRMWInst::Xor:
return C->isZero();
case AtomicRMWInst::And:
return C->isMinusOne();
// FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
default:
return false;
}
}
bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad))
expandAtomicLoad(ResultingLoad);
return true;
}
return false;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegAllocFast.cpp | //===-- RegAllocFast.cpp - A fast register allocator for debug code -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This register allocator allocates registers to a basic block at a time,
// attempting to keep values in registers and reusing registers as appropriate.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumStores, "Number of stores added");
STATISTIC(NumLoads , "Number of loads added");
STATISTIC(NumCopies, "Number of copies coalesced");
static RegisterRegAlloc
fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator);
namespace {
class RAFast : public MachineFunctionPass {
public:
static char ID;
RAFast() : MachineFunctionPass(ID), StackSlotForVirtReg(-1),
isBulkSpilling(false) {}
private:
MachineFunction *MF;
MachineRegisterInfo *MRI;
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
RegisterClassInfo RegClassInfo;
// Basic block currently being allocated.
MachineBasicBlock *MBB;
// StackSlotForVirtReg - Maps virtual regs to the frame index where these
// values are spilled.
IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
// Everything we know about a live virtual register.
struct LiveReg {
MachineInstr *LastUse; // Last instr to use reg.
unsigned VirtReg; // Virtual register number.
unsigned PhysReg; // Currently held here.
unsigned short LastOpNum; // OpNum on LastUse.
bool Dirty; // Register needs spill.
explicit LiveReg(unsigned v)
: LastUse(nullptr), VirtReg(v), PhysReg(0), LastOpNum(0), Dirty(false){}
unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
}
};
typedef SparseSet<LiveReg> LiveRegMap;
// LiveVirtRegs - This map contains entries for each virtual register
// that is currently available in a physical register.
LiveRegMap LiveVirtRegs;
DenseMap<unsigned, SmallVector<MachineInstr *, 4> > LiveDbgValueMap;
// RegState - Track the state of a physical register.
enum RegState {
// A disabled register is not available for allocation, but an alias may
// be in use. A register can only be moved out of the disabled state if
// all aliases are disabled.
regDisabled,
// A free register is not currently in use and can be allocated
// immediately without checking aliases.
regFree,
// A reserved register has been assigned explicitly (e.g., setting up a
// call parameter), and it remains reserved until it is used.
regReserved
// A register state may also be a virtual register number, indication that
// the physical register is currently allocated to a virtual register. In
// that case, LiveVirtRegs contains the inverse mapping.
};
// PhysRegState - One of the RegState enums, or a virtreg.
std::vector<unsigned> PhysRegState;
// Set of register units.
typedef SparseSet<unsigned> UsedInInstrSet;
// Set of register units that are used in the current instruction, and so
// cannot be allocated.
UsedInInstrSet UsedInInstr;
// Mark a physreg as used in this instruction.
void markRegUsedInInstr(unsigned PhysReg) {
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
UsedInInstr.insert(*Units);
}
// Check if a physreg or any of its aliases are used in this instruction.
bool isRegUsedInInstr(unsigned PhysReg) const {
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
if (UsedInInstr.count(*Units))
return true;
return false;
}
// SkippedInstrs - Descriptors of instructions whose clobber list was
// ignored because all registers were spilled. It is still necessary to
// mark all the clobbered registers as used by the function.
SmallPtrSet<const MCInstrDesc*, 4> SkippedInstrs;
// isBulkSpilling - This flag is set when LiveRegMap will be cleared
// completely after spilling all live registers. LiveRegMap entries should
// not be erased.
bool isBulkSpilling;
enum : unsigned {
spillClean = 1,
spillDirty = 100,
spillImpossible = ~0u
};
public:
StringRef getPassName() const override {
return "Fast Register Allocator";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
private:
bool runOnMachineFunction(MachineFunction &Fn) override;
void AllocateBasicBlock();
void handleThroughOperands(MachineInstr *MI,
SmallVectorImpl<unsigned> &VirtDead);
int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC);
bool isLastUseOfLocalReg(MachineOperand&);
void addKillFlag(const LiveReg&);
void killVirtReg(LiveRegMap::iterator);
void killVirtReg(unsigned VirtReg);
void spillVirtReg(MachineBasicBlock::iterator MI, LiveRegMap::iterator);
void spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg);
void usePhysReg(MachineOperand&);
void definePhysReg(MachineInstr *MI, unsigned PhysReg, RegState NewState);
unsigned calcSpillCost(unsigned PhysReg) const;
void assignVirtToPhysReg(LiveReg&, unsigned PhysReg);
LiveRegMap::iterator findLiveVirtReg(unsigned VirtReg) {
return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
}
LiveRegMap::const_iterator findLiveVirtReg(unsigned VirtReg) const {
return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
}
LiveRegMap::iterator assignVirtToPhysReg(unsigned VReg, unsigned PhysReg);
LiveRegMap::iterator allocVirtReg(MachineInstr *MI, LiveRegMap::iterator,
unsigned Hint);
LiveRegMap::iterator defineVirtReg(MachineInstr *MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint);
LiveRegMap::iterator reloadVirtReg(MachineInstr *MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint);
void spillAll(MachineBasicBlock::iterator MI);
bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg);
};
char RAFast::ID = 0;
}
/// getStackSpaceFor - This allocates space for the specified virtual register
/// to be held on the stack.
int RAFast::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
// Find the location Reg would belong...
int SS = StackSlotForVirtReg[VirtReg];
if (SS != -1)
return SS; // Already has space allocated?
// Allocate a new stack object for this spill location...
int FrameIdx = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
RC->getAlignment());
// Assign the slot.
StackSlotForVirtReg[VirtReg] = FrameIdx;
return FrameIdx;
}
/// isLastUseOfLocalReg - Return true if MO is the only remaining reference to
/// its virtual register, and it is guaranteed to be a block-local register.
///
bool RAFast::isLastUseOfLocalReg(MachineOperand &MO) {
// If the register has ever been spilled or reloaded, we conservatively assume
// it is a global register used in multiple blocks.
if (StackSlotForVirtReg[MO.getReg()] != -1)
return false;
// Check that the use/def chain has exactly one operand - MO.
MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(MO.getReg());
if (&*I != &MO)
return false;
return ++I == MRI->reg_nodbg_end();
}
/// addKillFlag - Set kill flags on last use of a virtual register.
void RAFast::addKillFlag(const LiveReg &LR) {
if (!LR.LastUse) return;
MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum);
if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) {
if (MO.getReg() == LR.PhysReg)
MO.setIsKill();
else
LR.LastUse->addRegisterKilled(LR.PhysReg, TRI, true);
}
}
/// killVirtReg - Mark virtreg as no longer available.
void RAFast::killVirtReg(LiveRegMap::iterator LRI) {
addKillFlag(*LRI);
assert(PhysRegState[LRI->PhysReg] == LRI->VirtReg &&
"Broken RegState mapping");
PhysRegState[LRI->PhysReg] = regFree;
// Erase from LiveVirtRegs unless we're spilling in bulk.
if (!isBulkSpilling)
LiveVirtRegs.erase(LRI);
}
/// killVirtReg - Mark virtreg as no longer available.
void RAFast::killVirtReg(unsigned VirtReg) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"killVirtReg needs a virtual register");
LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
if (LRI != LiveVirtRegs.end())
killVirtReg(LRI);
}
/// spillVirtReg - This method spills the value specified by VirtReg into the
/// corresponding stack slot if needed.
void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Spilling a physical register is illegal!");
LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
assert(LRI != LiveVirtRegs.end() && "Spilling unmapped virtual register");
spillVirtReg(MI, LRI);
}
/// spillVirtReg - Do the actual work of spilling.
void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
LiveRegMap::iterator LRI) {
LiveReg &LR = *LRI;
assert(PhysRegState[LR.PhysReg] == LRI->VirtReg && "Broken RegState mapping");
if (LR.Dirty) {
// If this physreg is used by the instruction, we want to kill it on the
// instruction, not on the spill.
bool SpillKill = LR.LastUse != MI;
LR.Dirty = false;
DEBUG(dbgs() << "Spilling " << PrintReg(LRI->VirtReg, TRI)
<< " in " << PrintReg(LR.PhysReg, TRI));
const TargetRegisterClass *RC = MRI->getRegClass(LRI->VirtReg);
int FI = getStackSpaceFor(LRI->VirtReg, RC);
DEBUG(dbgs() << " to stack slot #" << FI << "\n");
TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, RC, TRI);
++NumStores; // Update statistics
// If this register is used by DBG_VALUE then insert new DBG_VALUE to
// identify spilled location as the place to find corresponding variable's
// value.
SmallVectorImpl<MachineInstr *> &LRIDbgValues =
LiveDbgValueMap[LRI->VirtReg];
for (unsigned li = 0, le = LRIDbgValues.size(); li != le; ++li) {
MachineInstr *DBG_instr = LRIDbgValues[li];
const MDNode *Var = DBG_instr->getDebugVariable();
const MDNode *Expr = DBG_instr->getDebugExpression();
bool IsIndirect = DBG_instr->isIndirectDebugValue();
uint64_t Offset = IsIndirect ? DBG_instr->getOperand(1).getImm() : 0;
DebugLoc DL = DBG_instr->getDebugLoc();
assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
MachineInstr *NewDV =
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::DBG_VALUE))
.addFrameIndex(FI)
.addImm(Offset)
.addMetadata(Var)
.addMetadata(Expr);
assert(NewDV->getParent() == MBB && "dangling parent pointer");
(void)NewDV;
DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV);
}
// Now this register is spilled there is should not be any DBG_VALUE
// pointing to this register because they are all pointing to spilled value
// now.
LRIDbgValues.clear();
if (SpillKill)
LR.LastUse = nullptr; // Don't kill register again
}
killVirtReg(LRI);
}
/// spillAll - Spill all dirty virtregs without killing them.
void RAFast::spillAll(MachineBasicBlock::iterator MI) {
if (LiveVirtRegs.empty()) return;
isBulkSpilling = true;
// The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
// of spilling here is deterministic, if arbitrary.
for (LiveRegMap::iterator i = LiveVirtRegs.begin(), e = LiveVirtRegs.end();
i != e; ++i)
spillVirtReg(MI, i);
LiveVirtRegs.clear();
isBulkSpilling = false;
}
/// usePhysReg - Handle the direct use of a physical register.
/// Check that the register is not used by a virtreg.
/// Kill the physreg, marking it free.
/// This may add implicit kills to MO->getParent() and invalidate MO.
void RAFast::usePhysReg(MachineOperand &MO) {
unsigned PhysReg = MO.getReg();
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
"Bad usePhysReg operand");
markRegUsedInInstr(PhysReg);
switch (PhysRegState[PhysReg]) {
case regDisabled:
break;
case regReserved:
PhysRegState[PhysReg] = regFree;
// Fall through
case regFree:
MO.setIsKill();
return;
default:
// The physreg was allocated to a virtual register. That means the value we
// wanted has been clobbered.
llvm_unreachable("Instruction uses an allocated register");
}
// Maybe a superregister is reserved?
for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
unsigned Alias = *AI;
switch (PhysRegState[Alias]) {
case regDisabled:
break;
case regReserved:
// Either PhysReg is a subregister of Alias and we mark the
// whole register as free, or PhysReg is the superregister of
// Alias and we mark all the aliases as disabled before freeing
// PhysReg.
// In the latter case, since PhysReg was disabled, this means that
// its value is defined only by physical sub-registers. This check
// is performed by the assert of the default case in this loop.
// Note: The value of the superregister may only be partial
// defined, that is why regDisabled is a valid state for aliases.
assert((TRI->isSuperRegister(PhysReg, Alias) ||
TRI->isSuperRegister(Alias, PhysReg)) &&
"Instruction is not using a subregister of a reserved register");
// Fall through.
case regFree:
if (TRI->isSuperRegister(PhysReg, Alias)) {
// Leave the superregister in the working set.
PhysRegState[Alias] = regFree;
MO.getParent()->addRegisterKilled(Alias, TRI, true);
return;
}
// Some other alias was in the working set - clear it.
PhysRegState[Alias] = regDisabled;
break;
default:
llvm_unreachable("Instruction uses an alias of an allocated register");
}
}
// All aliases are disabled, bring register into working set.
PhysRegState[PhysReg] = regFree;
MO.setIsKill();
}
/// definePhysReg - Mark PhysReg as reserved or free after spilling any
/// virtregs. This is very similar to defineVirtReg except the physreg is
/// reserved instead of allocated.
void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg,
RegState NewState) {
markRegUsedInInstr(PhysReg);
switch (unsigned VirtReg = PhysRegState[PhysReg]) {
case regDisabled:
break;
default:
spillVirtReg(MI, VirtReg);
// Fall through.
case regFree:
case regReserved:
PhysRegState[PhysReg] = NewState;
return;
}
// This is a disabled register, disable all aliases.
PhysRegState[PhysReg] = NewState;
for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
unsigned Alias = *AI;
switch (unsigned VirtReg = PhysRegState[Alias]) {
case regDisabled:
break;
default:
spillVirtReg(MI, VirtReg);
// Fall through.
case regFree:
case regReserved:
PhysRegState[Alias] = regDisabled;
if (TRI->isSuperRegister(PhysReg, Alias))
return;
break;
}
}
}
// calcSpillCost - Return the cost of spilling clearing out PhysReg and
// aliases so it is free for allocation.
// Returns 0 when PhysReg is free or disabled with all aliases disabled - it
// can be allocated directly.
// Returns spillImpossible when PhysReg or an alias can't be spilled.
unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
if (isRegUsedInInstr(PhysReg)) {
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is already used in instr.\n");
return spillImpossible;
}
switch (unsigned VirtReg = PhysRegState[PhysReg]) {
case regDisabled:
break;
case regFree:
return 0;
case regReserved:
DEBUG(dbgs() << PrintReg(VirtReg, TRI) << " corresponding "
<< PrintReg(PhysReg, TRI) << " is reserved already.\n");
return spillImpossible;
default: {
LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
return I->Dirty ? spillDirty : spillClean;
}
}
// This is a disabled register, add up cost of aliases.
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is disabled.\n");
unsigned Cost = 0;
for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
unsigned Alias = *AI;
switch (unsigned VirtReg = PhysRegState[Alias]) {
case regDisabled:
break;
case regFree:
++Cost;
break;
case regReserved:
return spillImpossible;
default: {
LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
Cost += I->Dirty ? spillDirty : spillClean;
break;
}
}
}
return Cost;
}
/// assignVirtToPhysReg - This method updates local state so that we know
/// that PhysReg is the proper container for VirtReg now. The physical
/// register must not be used for anything else when this is called.
///
void RAFast::assignVirtToPhysReg(LiveReg &LR, unsigned PhysReg) {
DEBUG(dbgs() << "Assigning " << PrintReg(LR.VirtReg, TRI) << " to "
<< PrintReg(PhysReg, TRI) << "\n");
PhysRegState[PhysReg] = LR.VirtReg;
assert(!LR.PhysReg && "Already assigned a physreg");
LR.PhysReg = PhysReg;
}
RAFast::LiveRegMap::iterator
RAFast::assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg) {
LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
assert(LRI != LiveVirtRegs.end() && "VirtReg disappeared");
assignVirtToPhysReg(*LRI, PhysReg);
return LRI;
}
/// allocVirtReg - Allocate a physical register for VirtReg.
RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineInstr *MI,
LiveRegMap::iterator LRI,
unsigned Hint) {
const unsigned VirtReg = LRI->VirtReg;
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Can only allocate virtual registers");
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
// Ignore invalid hints.
if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) ||
!RC->contains(Hint) || !MRI->isAllocatable(Hint)))
Hint = 0;
// Take hint when possible.
if (Hint) {
// Ignore the hint if we would have to spill a dirty register.
unsigned Cost = calcSpillCost(Hint);
if (Cost < spillDirty) {
if (Cost)
definePhysReg(MI, Hint, regFree);
// definePhysReg may kill virtual registers and modify LiveVirtRegs.
// That invalidates LRI, so run a new lookup for VirtReg.
return assignVirtToPhysReg(VirtReg, Hint);
}
}
ArrayRef<MCPhysReg> AO = RegClassInfo.getOrder(RC);
// First try to find a completely free register.
for (ArrayRef<MCPhysReg>::iterator I = AO.begin(), E = AO.end(); I != E; ++I){
unsigned PhysReg = *I;
if (PhysRegState[PhysReg] == regFree && !isRegUsedInInstr(PhysReg)) {
assignVirtToPhysReg(*LRI, PhysReg);
return LRI;
}
}
DEBUG(dbgs() << "Allocating " << PrintReg(VirtReg) << " from "
<< TRI->getRegClassName(RC) << "\n");
unsigned BestReg = 0, BestCost = spillImpossible;
for (ArrayRef<MCPhysReg>::iterator I = AO.begin(), E = AO.end(); I != E; ++I){
unsigned Cost = calcSpillCost(*I);
DEBUG(dbgs() << "\tRegister: " << PrintReg(*I, TRI) << "\n");
DEBUG(dbgs() << "\tCost: " << Cost << "\n");
DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n");
// Cost is 0 when all aliases are already disabled.
if (Cost == 0) {
assignVirtToPhysReg(*LRI, *I);
return LRI;
}
if (Cost < BestCost)
BestReg = *I, BestCost = Cost;
}
if (BestReg) {
definePhysReg(MI, BestReg, regFree);
// definePhysReg may kill virtual registers and modify LiveVirtRegs.
// That invalidates LRI, so run a new lookup for VirtReg.
return assignVirtToPhysReg(VirtReg, BestReg);
}
// Nothing we can do. Report an error and keep going with a bad allocation.
if (MI->isInlineAsm())
MI->emitError("inline assembly requires more registers than available");
else
MI->emitError("ran out of registers during register allocation");
definePhysReg(MI, *AO.begin(), regFree);
return assignVirtToPhysReg(VirtReg, *AO.begin());
}
/// defineVirtReg - Allocate a register for VirtReg and mark it as dirty.
RAFast::LiveRegMap::iterator
RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
if (New) {
// If there is no hint, peek at the only use of this register.
if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
MRI->hasOneNonDBGUse(VirtReg)) {
const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg);
// It's a copy, use the destination register as a hint.
if (UseMI.isCopyLike())
Hint = UseMI.getOperand(0).getReg();
}
LRI = allocVirtReg(MI, LRI, Hint);
} else if (LRI->LastUse) {
// Redefining a live register - kill at the last use, unless it is this
// instruction defining VirtReg multiple times.
if (LRI->LastUse != MI || LRI->LastUse->getOperand(LRI->LastOpNum).isUse())
addKillFlag(*LRI);
}
assert(LRI->PhysReg && "Register not assigned");
LRI->LastUse = MI;
LRI->LastOpNum = OpNum;
LRI->Dirty = true;
markRegUsedInInstr(LRI->PhysReg);
return LRI;
}
/// reloadVirtReg - Make sure VirtReg is available in a physreg and return it.
RAFast::LiveRegMap::iterator
RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
MachineOperand &MO = MI->getOperand(OpNum);
if (New) {
LRI = allocVirtReg(MI, LRI, Hint);
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
int FrameIndex = getStackSpaceFor(VirtReg, RC);
DEBUG(dbgs() << "Reloading " << PrintReg(VirtReg, TRI) << " into "
<< PrintReg(LRI->PhysReg, TRI) << "\n");
TII->loadRegFromStackSlot(*MBB, MI, LRI->PhysReg, FrameIndex, RC, TRI);
++NumLoads;
} else if (LRI->Dirty) {
if (isLastUseOfLocalReg(MO)) {
DEBUG(dbgs() << "Killing last use: " << MO << "\n");
if (MO.isUse())
MO.setIsKill();
else
MO.setIsDead();
} else if (MO.isKill()) {
DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n");
MO.setIsKill(false);
} else if (MO.isDead()) {
DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n");
MO.setIsDead(false);
}
} else if (MO.isKill()) {
// We must remove kill flags from uses of reloaded registers because the
// register would be killed immediately, and there might be a second use:
// %foo = OR %x<kill>, %x
// This would cause a second reload of %x into a different register.
DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
MO.setIsKill(false);
} else if (MO.isDead()) {
DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
MO.setIsDead(false);
}
assert(LRI->PhysReg && "Register not assigned");
LRI->LastUse = MI;
LRI->LastOpNum = OpNum;
markRegUsedInInstr(LRI->PhysReg);
return LRI;
}
// setPhysReg - Change operand OpNum in MI the refer the PhysReg, considering
// subregs. This may invalidate any operand pointers.
// Return true if the operand kills its register.
bool RAFast::setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg) {
MachineOperand &MO = MI->getOperand(OpNum);
bool Dead = MO.isDead();
if (!MO.getSubReg()) {
MO.setReg(PhysReg);
return MO.isKill() || Dead;
}
// Handle subregister index.
MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : 0);
MO.setSubReg(0);
// A kill flag implies killing the full register. Add corresponding super
// register kill.
if (MO.isKill()) {
MI->addRegisterKilled(PhysReg, TRI, true);
return true;
}
// A <def,read-undef> of a sub-register requires an implicit def of the full
// register.
if (MO.isDef() && MO.isUndef())
MI->addRegisterDefined(PhysReg, TRI);
return Dead;
}
// Handle special instruction operand like early clobbers and tied ops when
// there are additional physreg defines.
void RAFast::handleThroughOperands(MachineInstr *MI,
SmallVectorImpl<unsigned> &VirtDead) {
DEBUG(dbgs() << "Scanning for through registers:");
SmallSet<unsigned, 8> ThroughRegs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) ||
(MO.getSubReg() && MI->readsVirtualRegister(Reg))) {
if (ThroughRegs.insert(Reg).second)
DEBUG(dbgs() << ' ' << PrintReg(Reg));
}
}
// If any physreg defines collide with preallocated through registers,
// we must spill and reallocate.
DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
markRegUsedInInstr(Reg);
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
if (ThroughRegs.count(PhysRegState[*AI]))
definePhysReg(MI, *AI, regFree);
}
}
SmallVector<unsigned, 8> PartialDefs;
DEBUG(dbgs() << "Allocating tied uses.\n");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
unsigned DefIdx = 0;
if (!MI->isRegTiedToDefOperand(i, &DefIdx)) continue;
DEBUG(dbgs() << "Operand " << i << "("<< MO << ") is tied to operand "
<< DefIdx << ".\n");
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
unsigned PhysReg = LRI->PhysReg;
setPhysReg(MI, i, PhysReg);
// Note: we don't update the def operand yet. That would cause the normal
// def-scan to attempt spilling.
} else if (MO.getSubReg() && MI->readsVirtualRegister(Reg)) {
DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
// Reload the register, but don't assign to the operand just yet.
// That would confuse the later phys-def processing pass.
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
PartialDefs.push_back(LRI->PhysReg);
}
}
DEBUG(dbgs() << "Allocating early clobbers.\n");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (!MO.isEarlyClobber())
continue;
// Note: defineVirtReg may invalidate MO.
LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
unsigned PhysReg = LRI->PhysReg;
if (setPhysReg(MI, i, PhysReg))
VirtDead.push_back(Reg);
}
// Restore UsedInInstr to a state usable for allocating normal virtual uses.
UsedInInstr.clear();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
DEBUG(dbgs() << "\tSetting " << PrintReg(Reg, TRI)
<< " as used in instr\n");
markRegUsedInInstr(Reg);
}
// Also mark PartialDefs as used to avoid reallocation.
for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i)
markRegUsedInInstr(PartialDefs[i]);
}
void RAFast::AllocateBasicBlock() {
DEBUG(dbgs() << "\nAllocating " << *MBB);
PhysRegState.assign(TRI->getNumRegs(), regDisabled);
assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
MachineBasicBlock::iterator MII = MBB->begin();
// Add live-in registers as live.
for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I)
if (MRI->isAllocatable(*I))
definePhysReg(MII, *I, regReserved);
SmallVector<unsigned, 8> VirtDead;
SmallVector<MachineInstr*, 32> Coalesced;
// Otherwise, sequentially allocate each instruction in the MBB.
while (MII != MBB->end()) {
MachineInstr *MI = MII++;
const MCInstrDesc &MCID = MI->getDesc();
DEBUG({
dbgs() << "\n>> " << *MI << "Regs:";
for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) {
if (PhysRegState[Reg] == regDisabled) continue;
dbgs() << " " << TRI->getName(Reg);
switch(PhysRegState[Reg]) {
case regFree:
break;
case regReserved:
dbgs() << "*";
break;
default: {
dbgs() << '=' << PrintReg(PhysRegState[Reg]);
LiveRegMap::iterator I = findLiveVirtReg(PhysRegState[Reg]);
assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
if (I->Dirty)
dbgs() << "*";
assert(I->PhysReg == Reg && "Bad inverse map");
break;
}
}
}
dbgs() << '\n';
// Check that LiveVirtRegs is the inverse.
for (LiveRegMap::iterator i = LiveVirtRegs.begin(),
e = LiveVirtRegs.end(); i != e; ++i) {
assert(TargetRegisterInfo::isVirtualRegister(i->VirtReg) &&
"Bad map key");
assert(TargetRegisterInfo::isPhysicalRegister(i->PhysReg) &&
"Bad map value");
assert(PhysRegState[i->PhysReg] == i->VirtReg && "Bad inverse map");
}
});
// Debug values are not allowed to change codegen in any way.
if (MI->isDebugValue()) {
bool ScanDbgValue = true;
while (ScanDbgValue) {
ScanDbgValue = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
if (LRI != LiveVirtRegs.end())
setPhysReg(MI, i, LRI->PhysReg);
else {
int SS = StackSlotForVirtReg[Reg];
if (SS == -1) {
// We can't allocate a physreg for a DebugValue, sorry!
DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
MO.setReg(0);
}
else {
// Modify DBG_VALUE now that the value is in a spill slot.
bool IsIndirect = MI->isIndirectDebugValue();
uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
const MDNode *Var = MI->getDebugVariable();
const MDNode *Expr = MI->getDebugExpression();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock *MBB = MI->getParent();
assert(
cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
MachineInstr *NewDV = BuildMI(*MBB, MBB->erase(MI), DL,
TII->get(TargetOpcode::DBG_VALUE))
.addFrameIndex(SS)
.addImm(Offset)
.addMetadata(Var)
.addMetadata(Expr);
DEBUG(dbgs() << "Modifying debug info due to spill:"
<< "\t" << *NewDV);
// Scan NewDV operands from the beginning.
MI = NewDV;
ScanDbgValue = true;
break;
}
}
LiveDbgValueMap[Reg].push_back(MI);
}
}
// Next instruction.
continue;
}
// If this is a copy, we may be able to coalesce.
unsigned CopySrc = 0, CopyDst = 0, CopySrcSub = 0, CopyDstSub = 0;
if (MI->isCopy()) {
CopyDst = MI->getOperand(0).getReg();
CopySrc = MI->getOperand(1).getReg();
CopyDstSub = MI->getOperand(0).getSubReg();
CopySrcSub = MI->getOperand(1).getSubReg();
}
// Track registers used by instruction.
UsedInInstr.clear();
// First scan.
// Mark physreg uses and early clobbers as used.
// Find the end of the virtreg operands
unsigned VirtOpEnd = 0;
bool hasTiedOps = false;
bool hasEarlyClobbers = false;
bool hasPartialRedefs = false;
bool hasPhysDefs = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
// Make sure MRI knows about registers clobbered by regmasks.
if (MO.isRegMask()) {
MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
continue;
}
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!Reg) continue;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
VirtOpEnd = i+1;
if (MO.isUse()) {
hasTiedOps = hasTiedOps ||
MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1;
} else {
if (MO.isEarlyClobber())
hasEarlyClobbers = true;
if (MO.getSubReg() && MI->readsVirtualRegister(Reg))
hasPartialRedefs = true;
}
continue;
}
if (!MRI->isAllocatable(Reg)) continue;
if (MO.isUse()) {
usePhysReg(MO);
} else if (MO.isEarlyClobber()) {
definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ?
regFree : regReserved);
hasEarlyClobbers = true;
} else
hasPhysDefs = true;
}
// The instruction may have virtual register operands that must be allocated
// the same register at use-time and def-time: early clobbers and tied
// operands. If there are also physical defs, these registers must avoid
// both physical defs and uses, making them more constrained than normal
// operands.
// Similarly, if there are multiple defs and tied operands, we must make
// sure the same register is allocated to uses and defs.
// We didn't detect inline asm tied operands above, so just make this extra
// pass for all inline asm.
if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
(hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) {
handleThroughOperands(MI, VirtDead);
// Don't attempt coalescing when we have funny stuff going on.
CopyDst = 0;
// Pretend we have early clobbers so the use operands get marked below.
// This is not necessary for the common case of a single tied use.
hasEarlyClobbers = true;
}
// Second scan.
// Allocate virtreg uses.
for (unsigned i = 0; i != VirtOpEnd; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, CopyDst);
unsigned PhysReg = LRI->PhysReg;
CopySrc = (CopySrc == Reg || CopySrc == PhysReg) ? PhysReg : 0;
if (setPhysReg(MI, i, PhysReg))
killVirtReg(LRI);
}
}
for (UsedInInstrSet::iterator
I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I)
MRI->setRegUnitUsed(*I);
// Track registers defined by instruction - early clobbers and tied uses at
// this point.
UsedInInstr.clear();
if (hasEarlyClobbers) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
// Look for physreg defs and tied uses.
if (!MO.isDef() && !MI->isRegTiedToDefOperand(i)) continue;
markRegUsedInInstr(Reg);
}
}
unsigned DefOpEnd = MI->getNumOperands();
if (MI->isCall()) {
// Spill all virtregs before a call. This serves two purposes: 1. If an
// exception is thrown, the landing pad is going to expect to find
// registers in their spill slots, and 2. we don't have to wade through
// all the <imp-def> operands on the call instruction.
DefOpEnd = VirtOpEnd;
DEBUG(dbgs() << " Spilling remaining registers before call.\n");
spillAll(MI);
// The imp-defs are skipped below, but we still need to mark those
// registers as used by the function.
SkippedInstrs.insert(&MCID);
}
// Third scan.
// Allocate defs and collect dead defs.
for (unsigned i = 0; i != DefOpEnd; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
continue;
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (!MRI->isAllocatable(Reg)) continue;
definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
continue;
}
LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, CopySrc);
unsigned PhysReg = LRI->PhysReg;
if (setPhysReg(MI, i, PhysReg)) {
VirtDead.push_back(Reg);
CopyDst = 0; // cancel coalescing;
} else
CopyDst = (CopyDst == Reg || CopyDst == PhysReg) ? PhysReg : 0;
}
// Kill dead defs after the scan to ensure that multiple defs of the same
// register are allocated identically. We didn't need to do this for uses
// because we are crerating our own kill flags, and they are always at the
// last use.
for (unsigned i = 0, e = VirtDead.size(); i != e; ++i)
killVirtReg(VirtDead[i]);
VirtDead.clear();
for (UsedInInstrSet::iterator
I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I)
MRI->setRegUnitUsed(*I);
if (CopyDst && CopyDst == CopySrc && CopyDstSub == CopySrcSub) {
DEBUG(dbgs() << "-- coalescing: " << *MI);
Coalesced.push_back(MI);
} else {
DEBUG(dbgs() << "<< " << *MI);
}
}
// Spill all physical registers holding virtual registers now.
DEBUG(dbgs() << "Spilling live registers at end of block.\n");
spillAll(MBB->getFirstTerminator());
// Erase all the coalesced copies. We are delaying it until now because
// LiveVirtRegs might refer to the instrs.
for (unsigned i = 0, e = Coalesced.size(); i != e; ++i)
MBB->erase(Coalesced[i]);
NumCopies += Coalesced.size();
DEBUG(MBB->dump());
}
/// runOnMachineFunction - Register allocate the whole function
///
bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
<< "********** Function: " << Fn.getName() << '\n');
MF = &Fn;
MRI = &MF->getRegInfo();
TRI = MF->getSubtarget().getRegisterInfo();
TII = MF->getSubtarget().getInstrInfo();
MRI->freezeReservedRegs(Fn);
RegClassInfo.runOnMachineFunction(Fn);
UsedInInstr.clear();
UsedInInstr.setUniverse(TRI->getNumRegUnits());
assert(!MRI->isSSA() && "regalloc requires leaving SSA");
// initialize the virtual->physical register map to have a 'null'
// mapping for all virtual registers
StackSlotForVirtReg.resize(MRI->getNumVirtRegs());
LiveVirtRegs.setUniverse(MRI->getNumVirtRegs());
// Loop over all of the basic blocks, eliminating virtual register references
for (MachineFunction::iterator MBBi = Fn.begin(), MBBe = Fn.end();
MBBi != MBBe; ++MBBi) {
MBB = &*MBBi;
AllocateBasicBlock();
}
// Add the clobber lists for all the instructions we skipped earlier.
for (const MCInstrDesc *Desc : SkippedInstrs)
if (const uint16_t *Defs = Desc->getImplicitDefs())
while (*Defs)
MRI->setPhysRegUsed(*Defs++);
// All machine operands and other references to virtual registers have been
// replaced. Remove the virtual registers.
MRI->clearVirtRegs();
SkippedInstrs.clear();
StackSlotForVirtReg.clear();
LiveDbgValueMap.clear();
return true;
}
FunctionPass *llvm::createFastRegisterAllocator() {
return new RAFast();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ScheduleDAGInstrs.cpp | //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the ScheduleDAGInstrs class, which implements re-scheduling
// of MachineInstrs.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/ScheduleDFS.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <queue>
using namespace llvm;
#define DEBUG_TYPE "misched"
static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,
cl::ZeroOrMore, cl::init(false),
cl::desc("Enable use of AA during MI DAG construction"));
static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden,
cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"));
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo *mli,
bool IsPostRAFlag, bool RemoveKillFlags,
LiveIntervals *lis)
: ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), LIS(lis),
IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags),
CanHandleTerminators(false), FirstDbgValue(nullptr) {
assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
"Virtual registers must be removed prior to PostRA scheduling");
const TargetSubtargetInfo &ST = mf.getSubtarget();
SchedModel.init(ST.getSchedModel(), &ST, TII);
}
/// getUnderlyingObjectFromInt - This is the function that does the work of
/// looking through basic ptrtoint+arithmetic+inttoptr sequences.
static const Value *getUnderlyingObjectFromInt(const Value *V) {
do {
if (const Operator *U = dyn_cast<Operator>(V)) {
// If we find a ptrtoint, we can transfer control back to the
// regular getUnderlyingObjectFromInt.
if (U->getOpcode() == Instruction::PtrToInt)
return U->getOperand(0);
// If we find an add of a constant, a multiplied value, or a phi, it's
// likely that the other operand will lead us to the base
// object. We don't have to worry about the case where the
// object address is somehow being computed by the multiply,
// because our callers only care when the result is an
// identifiable object.
if (U->getOpcode() != Instruction::Add ||
(!isa<ConstantInt>(U->getOperand(1)) &&
Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
!isa<PHINode>(U->getOperand(1))))
return V;
V = U->getOperand(0);
} else {
return V;
}
assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
} while (1);
}
/// getUnderlyingObjects - This is a wrapper around GetUnderlyingObjects
/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
static void getUnderlyingObjects(const Value *V,
SmallVectorImpl<Value *> &Objects,
const DataLayout &DL) {
SmallPtrSet<const Value *, 16> Visited;
SmallVector<const Value *, 4> Working(1, V);
do {
V = Working.pop_back_val();
SmallVector<Value *, 4> Objs;
GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
I != IE; ++I) {
V = *I;
if (!Visited.insert(V).second)
continue;
if (Operator::getOpcode(V) == Instruction::IntToPtr) {
const Value *O =
getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
if (O->getType()->isPointerTy()) {
Working.push_back(O);
continue;
}
}
Objects.push_back(const_cast<Value *>(V));
}
} while (!Working.empty());
}
typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType;
typedef SmallVector<PointerIntPair<ValueType, 1, bool>, 4>
UnderlyingObjectsVector;
/// getUnderlyingObjectsForInstr - If this machine instr has memory reference
/// information and it can be tracked to a normal reference to a known
/// object, return the Value for that object.
static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
const MachineFrameInfo *MFI,
UnderlyingObjectsVector &Objects,
const DataLayout &DL) {
if (!MI->hasOneMemOperand() ||
(!(*MI->memoperands_begin())->getValue() &&
!(*MI->memoperands_begin())->getPseudoValue()) ||
(*MI->memoperands_begin())->isVolatile())
return;
if (const PseudoSourceValue *PSV =
(*MI->memoperands_begin())->getPseudoValue()) {
// Function that contain tail calls don't have unique PseudoSourceValue
// objects. Two PseudoSourceValues might refer to the same or overlapping
// locations. The client code calling this function assumes this is not the
// case. So return a conservative answer of no known object.
if (MFI->hasTailCall())
return;
// For now, ignore PseudoSourceValues which may alias LLVM IR values
// because the code that uses this function has no way to cope with
// such aliases.
if (!PSV->isAliased(MFI)) {
bool MayAlias = PSV->mayAlias(MFI);
Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias));
}
return;
}
const Value *V = (*MI->memoperands_begin())->getValue();
if (!V)
return;
SmallVector<Value *, 4> Objs;
getUnderlyingObjects(V, Objs, DL);
for (Value *V : Objs) {
if (!isIdentifiedObject(V)) {
Objects.clear();
return;
}
Objects.push_back(UnderlyingObjectsVector::value_type(V, true));
}
}
void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {
BB = bb;
}
void ScheduleDAGInstrs::finishBlock() {
// Subclasses should no longer refer to the old block.
BB = nullptr;
}
/// Initialize the DAG and common scheduler state for the current scheduling
/// region. This does not actually create the DAG, only clears it. The
/// scheduling driver may call BuildSchedGraph multiple times per scheduling
/// region.
void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs) {
assert(bb == BB && "startBlock should set BB");
RegionBegin = begin;
RegionEnd = end;
NumRegionInstrs = regioninstrs;
}
/// Close the current scheduling region. Don't clear any state in case the
/// driver wants to refer to the previous scheduling region.
void ScheduleDAGInstrs::exitRegion() {
// Nothing to do.
}
/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier by adding
/// the exit SU to the register defs and use list. This is because we want to
/// make sure instructions which define registers that are either used by
/// the terminator or are live-out are properly scheduled. This is
/// especially important when the definition latency of the return value(s)
/// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block.
void ScheduleDAGInstrs::addSchedBarrierDeps() {
MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr;
ExitSU.setInstr(ExitMI);
bool AllDepKnown = ExitMI &&
(ExitMI->isCall() || ExitMI->isBarrier());
if (ExitMI && AllDepKnown) {
// If it's a call or a barrier, add dependencies on the defs and uses of
// instruction.
for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = ExitMI->getOperand(i);
if (!MO.isReg() || MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (TRI->isPhysicalRegister(Reg))
Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
else {
assert(!IsPostRA && "Virtual register encountered after regalloc.");
if (MO.readsReg()) // ignore undef operands
addVRegUseDeps(&ExitSU, i);
}
}
} else {
// For others, e.g. fallthrough, conditional branch, assume the exit
// uses all the registers that are livein to the successor blocks.
assert(Uses.empty() && "Uses in set before adding deps?");
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
if (!Uses.contains(Reg))
Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
}
}
}
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
assert(MO.isDef() && "expect physreg def");
// Ask the target if address-backscheduling is desirable, and if so how much.
const TargetSubtargetInfo &ST = MF.getSubtarget();
for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
Alias.isValid(); ++Alias) {
if (!Uses.contains(*Alias))
continue;
for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) {
SUnit *UseSU = I->SU;
if (UseSU == SU)
continue;
// Adjust the dependence latency using operand def/use information,
// then allow the target to perform its own adjustments.
int UseOp = I->OpIdx;
MachineInstr *RegUse = nullptr;
SDep Dep;
if (UseOp < 0)
Dep = SDep(SU, SDep::Artificial);
else {
// Set the hasPhysRegDefs only for physreg defs that have a use within
// the scheduling region.
SU->hasPhysRegDefs = true;
Dep = SDep(SU, SDep::Data, *Alias);
RegUse = UseSU->getInstr();
}
Dep.setLatency(
SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse,
UseOp));
ST.adjustSchedDependency(SU, UseSU, Dep);
UseSU->addPred(Dep);
}
}
}
/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
/// this SUnit to following instructions in the same scheduling region that
/// depend the physical register referenced at OperIdx.
void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
MachineInstr *MI = SU->getInstr();
MachineOperand &MO = MI->getOperand(OperIdx);
// Optionally add output and anti dependencies. For anti
// dependencies we use a latency of 0 because for a multi-issue
// target we want to allow the defining instruction to issue
// in the same cycle as the using instruction.
// TODO: Using a latency of 1 here for output dependencies assumes
// there's no cost for reusing registers.
SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
Alias.isValid(); ++Alias) {
if (!Defs.contains(*Alias))
continue;
for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) {
SUnit *DefSU = I->SU;
if (DefSU == &ExitSU)
continue;
if (DefSU != SU &&
(Kind != SDep::Output || !MO.isDead() ||
!DefSU->getInstr()->registerDefIsDead(*Alias))) {
if (Kind == SDep::Anti)
DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias));
else {
SDep Dep(SU, Kind, /*Reg=*/*Alias);
Dep.setLatency(
SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
DefSU->addPred(Dep);
}
}
}
}
if (!MO.isDef()) {
SU->hasPhysRegUses = true;
// Either insert a new Reg2SUnits entry with an empty SUnits list, or
// retrieve the existing SUnits list for this register's uses.
// Push this SUnit on the use list.
Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg()));
if (RemoveKillFlags)
MO.setIsKill(false);
}
else {
addPhysRegDataDeps(SU, OperIdx);
unsigned Reg = MO.getReg();
// clear this register's use list
if (Uses.contains(Reg))
Uses.eraseAll(Reg);
if (!MO.isDead()) {
Defs.eraseAll(Reg);
} else if (SU->isCall) {
// Calls will not be reordered because of chain dependencies (see
// below). Since call operands are dead, calls may continue to be added
// to the DefList making dependence checking quadratic in the size of
// the block. Instead, we leave only one call at the back of the
// DefList.
Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg);
Reg2SUnitsMap::iterator B = P.first;
Reg2SUnitsMap::iterator I = P.second;
for (bool isBegin = I == B; !isBegin; /* empty */) {
isBegin = (--I) == B;
if (!I->SU->isCall)
break;
I = Defs.erase(I);
}
}
// Defs are pushed in the order they are visited and never reordered.
Defs.insert(PhysRegSUOper(SU, OperIdx, Reg));
}
}
/// addVRegDefDeps - Add register output and data dependencies from this SUnit
/// to instructions that occur later in the same scheduling region if they read
/// from or write to the virtual register defined at OperIdx.
///
/// TODO: Hoist loop induction variable increments. This has to be
/// reevaluated. Generally, IV scheduling should be done before coalescing.
void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
const MachineInstr *MI = SU->getInstr();
unsigned Reg = MI->getOperand(OperIdx).getReg();
// Singly defined vregs do not have output/anti dependencies.
// The current operand is a def, so we have at least one.
// Check here if there are any others...
if (MRI.hasOneDef(Reg))
return;
// Add output dependence to the next nearest def of this vreg.
//
// Unless this definition is dead, the output dependence should be
// transitively redundant with antidependencies from this definition's
// uses. We're conservative for now until we have a way to guarantee the uses
// are not eliminated sometime during scheduling. The output dependence edge
// is also useful if output latency exceeds def-use latency.
VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
if (DefI == VRegDefs.end())
VRegDefs.insert(VReg2SUnit(Reg, SU));
else {
SUnit *DefSU = DefI->SU;
if (DefSU != SU && DefSU != &ExitSU) {
SDep Dep(SU, SDep::Output, Reg);
Dep.setLatency(
SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
DefSU->addPred(Dep);
}
DefI->SU = SU;
}
}
/// addVRegUseDeps - Add a register data dependency if the instruction that
/// defines the virtual register used at OperIdx is mapped to an SUnit. Add a
/// register antidependency from this SUnit to instructions that occur later in
/// the same scheduling region if they write the virtual register.
///
/// TODO: Handle ExitSU "uses" properly.
void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
MachineInstr *MI = SU->getInstr();
unsigned Reg = MI->getOperand(OperIdx).getReg();
// Record this local VReg use.
VReg2UseMap::iterator UI = VRegUses.find(Reg);
for (; UI != VRegUses.end(); ++UI) {
if (UI->SU == SU)
break;
}
if (UI == VRegUses.end())
VRegUses.insert(VReg2SUnit(Reg, SU));
// Lookup this operand's reaching definition.
assert(LIS && "vreg dependencies requires LiveIntervals");
LiveQueryResult LRQ
= LIS->getInterval(Reg).Query(LIS->getInstructionIndex(MI));
VNInfo *VNI = LRQ.valueIn();
// VNI will be valid because MachineOperand::readsReg() is checked by caller.
assert(VNI && "No value to read by operand");
MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def);
// Phis and other noninstructions (after coalescing) have a NULL Def.
if (Def) {
SUnit *DefSU = getSUnit(Def);
if (DefSU) {
// The reaching Def lives within this scheduling region.
// Create a data dependence.
SDep dep(DefSU, SDep::Data, Reg);
// Adjust the dependence latency using operand def/use information, then
// allow the target to perform its own adjustments.
int DefOp = Def->findRegisterDefOperandIdx(Reg);
dep.setLatency(SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx));
const TargetSubtargetInfo &ST = MF.getSubtarget();
ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
SU->addPred(dep);
}
}
// Add antidependence to the following def of the vreg it uses.
VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
if (DefI != VRegDefs.end() && DefI->SU != SU)
DefI->SU->addPred(SDep(SU, SDep::Anti, Reg));
}
/// Return true if MI is an instruction we are unable to reason about
/// (like a call or something with unmodeled side effects).
static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) {
if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
(MI->hasOrderedMemoryRef() &&
(!MI->mayLoad() || !MI->isInvariantLoad(AA))))
return true;
return false;
}
// This MI might have either incomplete info, or known to be unsafe
// to deal with (i.e. volatile object).
static inline bool isUnsafeMemoryObject(MachineInstr *MI,
const MachineFrameInfo *MFI,
const DataLayout &DL) {
if (!MI || MI->memoperands_empty())
return true;
// We purposefully do no check for hasOneMemOperand() here
// in hope to trigger an assert downstream in order to
// finish implementation.
if ((*MI->memoperands_begin())->isVolatile() ||
MI->hasUnmodeledSideEffects())
return true;
if ((*MI->memoperands_begin())->getPseudoValue()) {
// Similarly to getUnderlyingObjectForInstr:
// For now, ignore PseudoSourceValues which may alias LLVM IR values
// because the code that uses this function has no way to cope with
// such aliases.
return true;
}
const Value *V = (*MI->memoperands_begin())->getValue();
if (!V)
return true;
SmallVector<Value *, 4> Objs;
getUnderlyingObjects(V, Objs, DL);
for (Value *V : Objs) {
// Does this pointer refer to a distinct and identifiable object?
if (!isIdentifiedObject(V))
return true;
}
return false;
}
/// This returns true if the two MIs need a chain edge betwee them.
/// If these are not even memory operations, we still may need
/// chain deps between them. The question really is - could
/// these two MIs be reordered during scheduling from memory dependency
/// point of view.
static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
const DataLayout &DL, MachineInstr *MIa,
MachineInstr *MIb) {
const MachineFunction *MF = MIa->getParent()->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
// Cover a trivial case - no edge is need to itself.
if (MIa == MIb)
return false;
// Let the target decide if memory accesses cannot possibly overlap.
if ((MIa->mayLoad() || MIa->mayStore()) &&
(MIb->mayLoad() || MIb->mayStore()))
if (TII->areMemAccessesTriviallyDisjoint(MIa, MIb, AA))
return false;
// FIXME: Need to handle multiple memory operands to support all targets.
if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
return true;
if (isUnsafeMemoryObject(MIa, MFI, DL) || isUnsafeMemoryObject(MIb, MFI, DL))
return true;
// If we are dealing with two "normal" loads, we do not need an edge
// between them - they could be reordered.
if (!MIa->mayStore() && !MIb->mayStore())
return false;
// To this point analysis is generic. From here on we do need AA.
if (!AA)
return true;
MachineMemOperand *MMOa = *MIa->memoperands_begin();
MachineMemOperand *MMOb = *MIb->memoperands_begin();
if (!MMOa->getValue() || !MMOb->getValue())
return true;
// The following interface to AA is fashioned after DAGCombiner::isAlias
// and operates with MachineMemOperand offset with some important
// assumptions:
// - LLVM fundamentally assumes flat address spaces.
// - MachineOperand offset can *only* result from legalization and
// cannot affect queries other than the trivial case of overlap
// checking.
// - These offsets never wrap and never step outside
// of allocated objects.
// - There should never be any negative offsets here.
//
// FIXME: Modify API to hide this math from "user"
// FIXME: Even before we go to AA we can reason locally about some
// memory objects. It can save compile time, and possibly catch some
// corner cases not currently covered.
assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset");
assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset");
int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset());
int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset;
int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
AliasResult AAResult =
AA->alias(MemoryLocation(MMOa->getValue(), Overlapa,
UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
MemoryLocation(MMOb->getValue(), Overlapb,
UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
return (AAResult != NoAlias);
}
/// This recursive function iterates over chain deps of SUb looking for
/// "latest" node that needs a chain edge to SUa.
static unsigned iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
const DataLayout &DL, SUnit *SUa, SUnit *SUb,
SUnit *ExitSU, unsigned *Depth,
SmallPtrSetImpl<const SUnit *> &Visited) {
if (!SUa || !SUb || SUb == ExitSU)
return *Depth;
// Remember visited nodes.
if (!Visited.insert(SUb).second)
return *Depth;
// If there is _some_ dependency already in place, do not
// descend any further.
// TODO: Need to make sure that if that dependency got eliminated or ignored
// for any reason in the future, we would not violate DAG topology.
// Currently it does not happen, but makes an implicit assumption about
// future implementation.
//
// Independently, if we encounter node that is some sort of global
// object (like a call) we already have full set of dependencies to it
// and we can stop descending.
if (SUa->isSucc(SUb) ||
isGlobalMemoryObject(AA, SUb->getInstr()))
return *Depth;
// If we do need an edge, or we have exceeded depth budget,
// add that edge to the predecessors chain of SUb,
// and stop descending.
if (*Depth > 200 ||
MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) {
SUb->addPred(SDep(SUa, SDep::MayAliasMem));
return *Depth;
}
// Track current depth.
(*Depth)++;
// Iterate over memory dependencies only.
for (SUnit::const_succ_iterator I = SUb->Succs.begin(), E = SUb->Succs.end();
I != E; ++I)
if (I->isNormalMemoryOrBarrier())
iterateChainSucc(AA, MFI, DL, SUa, I->getSUnit(), ExitSU, Depth, Visited);
return *Depth;
}
/// This function assumes that "downward" from SU there exist
/// tail/leaf of already constructed DAG. It iterates downward and
/// checks whether SU can be aliasing any node dominated
/// by it.
static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
const DataLayout &DL, SUnit *SU, SUnit *ExitSU,
std::set<SUnit *> &CheckList,
unsigned LatencyToLoad) {
if (!SU)
return;
SmallPtrSet<const SUnit*, 16> Visited;
unsigned Depth = 0;
for (std::set<SUnit *>::iterator I = CheckList.begin(), IE = CheckList.end();
I != IE; ++I) {
if (SU == *I)
continue;
if (MIsNeedChainEdge(AA, MFI, DL, SU->getInstr(), (*I)->getInstr())) {
SDep Dep(SU, SDep::MayAliasMem);
Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0);
(*I)->addPred(Dep);
}
// Iterate recursively over all previously added memory chain
// successors. Keep track of visited nodes.
for (SUnit::const_succ_iterator J = (*I)->Succs.begin(),
JE = (*I)->Succs.end(); J != JE; ++J)
if (J->isNormalMemoryOrBarrier())
iterateChainSucc(AA, MFI, DL, SU, J->getSUnit(), ExitSU, &Depth,
Visited);
}
}
/// Check whether two objects need a chain edge, if so, add it
/// otherwise remember the rejected SU.
static inline void addChainDependency(AliasAnalysis *AA,
const MachineFrameInfo *MFI,
const DataLayout &DL, SUnit *SUa,
SUnit *SUb, std::set<SUnit *> &RejectList,
unsigned TrueMemOrderLatency = 0,
bool isNormalMemory = false) {
// If this is a false dependency,
// do not add the edge, but rememeber the rejected node.
if (MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) {
SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier);
Dep.setLatency(TrueMemOrderLatency);
SUb->addPred(Dep);
}
else {
// Duplicate entries should be ignored.
RejectList.insert(SUb);
DEBUG(dbgs() << "\tReject chain dep between SU("
<< SUa->NodeNum << ") and SU("
<< SUb->NodeNum << ")\n");
}
}
/// Create an SUnit for each real instruction, numbered in top-down toplological
/// order. The instruction order A < B, implies that no edge exists from B to A.
///
/// Map each real instruction to its SUnit.
///
/// After initSUnits, the SUnits vector cannot be resized and the scheduler may
/// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
/// instead of pointers.
///
/// MachineScheduler relies on initSUnits numbering the nodes by their order in
/// the original instruction list.
void ScheduleDAGInstrs::initSUnits() {
// We'll be allocating one SUnit for each real instruction in the region,
// which is contained within a basic block.
SUnits.reserve(NumRegionInstrs);
for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) {
MachineInstr *MI = I;
if (MI->isDebugValue())
continue;
SUnit *SU = newSUnit(MI);
MISUnitMap[MI] = SU;
SU->isCall = MI->isCall();
SU->isCommutable = MI->isCommutable();
// Assign the Latency field of SU using target-provided information.
SU->Latency = SchedModel.computeInstrLatency(SU->getInstr());
// If this SUnit uses a reserved or unbuffered resource, mark it as such.
//
// Reserved resources block an instruction from issuing and stall the
// entire pipeline. These are identified by BufferSize=0.
//
// Unbuffered resources prevent execution of subsequent instructions that
// require the same resources. This is used for in-order execution pipelines
// within an out-of-order core. These are identified by BufferSize=1.
if (SchedModel.hasInstrSchedModel()) {
const MCSchedClassDesc *SC = getSchedClass(SU);
for (TargetSchedModel::ProcResIter
PI = SchedModel.getWriteProcResBegin(SC),
PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) {
switch (SchedModel.getProcResource(PI->ProcResourceIdx)->BufferSize) {
case 0:
SU->hasReservedResource = true;
break;
case 1:
SU->isUnbuffered = true;
break;
default:
break;
}
}
}
}
}
/// If RegPressure is non-null, compute register pressure as a side effect. The
/// DAG builder is an efficient place to do it because it already visits
/// operands.
void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
RegPressureTracker *RPTracker,
PressureDiffs *PDiffs) {
const TargetSubtargetInfo &ST = MF.getSubtarget();
bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI
: ST.useAA();
AliasAnalysis *AAForDep = UseAA ? AA : nullptr;
MISUnitMap.clear();
ScheduleDAG::clearDAG();
// Create an SUnit for each real instruction.
initSUnits();
if (PDiffs)
PDiffs->init(SUnits.size());
// We build scheduling units by walking a block's instruction list from bottom
// to top.
// Remember where a generic side-effecting instruction is as we procede.
SUnit *BarrierChain = nullptr, *AliasChain = nullptr;
// Memory references to specific known memory locations are tracked
// so that they can be given more precise dependencies. We track
// separately the known memory locations that may alias and those
// that are known not to alias
MapVector<ValueType, std::vector<SUnit *> > AliasMemDefs, NonAliasMemDefs;
MapVector<ValueType, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
std::set<SUnit*> RejectMemNodes;
// Remove any stale debug info; sometimes BuildSchedGraph is called again
// without emitting the info from the previous call.
DbgValues.clear();
FirstDbgValue = nullptr;
assert(Defs.empty() && Uses.empty() &&
"Only BuildGraph should update Defs/Uses");
Defs.setUniverse(TRI->getNumRegs());
Uses.setUniverse(TRI->getNumRegs());
assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
VRegUses.clear();
VRegDefs.setUniverse(MRI.getNumVirtRegs());
VRegUses.setUniverse(MRI.getNumVirtRegs());
// Model data dependencies between instructions being scheduled and the
// ExitSU.
addSchedBarrierDeps();
// Walk the list of instructions, from bottom moving up.
MachineInstr *DbgMI = nullptr;
for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;
MII != MIE; --MII) {
MachineInstr *MI = std::prev(MII);
if (MI && DbgMI) {
DbgValues.push_back(std::make_pair(DbgMI, MI));
DbgMI = nullptr;
}
if (MI->isDebugValue()) {
DbgMI = MI;
continue;
}
SUnit *SU = MISUnitMap[MI];
assert(SU && "No SUnit mapped to this MI");
if (RPTracker) {
PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : nullptr;
RPTracker->recede(/*LiveUses=*/nullptr, PDiff);
assert(RPTracker->getPos() == std::prev(MII) &&
"RPTracker can't find MI");
}
assert(
(CanHandleTerminators || (!MI->isTerminator() && !MI->isPosition())) &&
"Cannot schedule terminators or labels!");
// Add register-based dependencies (data, anti, and output).
bool HasVRegDef = false;
for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
const MachineOperand &MO = MI->getOperand(j);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (TRI->isPhysicalRegister(Reg))
addPhysRegDeps(SU, j);
else {
assert(!IsPostRA && "Virtual register encountered!");
if (MO.isDef()) {
HasVRegDef = true;
addVRegDefDeps(SU, j);
}
else if (MO.readsReg()) // ignore undef operands
addVRegUseDeps(SU, j);
}
}
// If we haven't seen any uses in this scheduling region, create a
// dependence edge to ExitSU to model the live-out latency. This is required
// for vreg defs with no in-region use, and prefetches with no vreg def.
//
// FIXME: NumDataSuccs would be more precise than NumSuccs here. This
// check currently relies on being called before adding chain deps.
if (SU->NumSuccs == 0 && SU->Latency > 1
&& (HasVRegDef || MI->mayLoad())) {
SDep Dep(SU, SDep::Artificial);
Dep.setLatency(SU->Latency - 1);
ExitSU.addPred(Dep);
}
// Add chain dependencies.
// Chain dependencies used to enforce memory order should have
// latency of 0 (except for true dependency of Store followed by
// aliased Load... we estimate that with a single cycle of latency
// assuming the hardware will bypass)
// Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
// after stack slots are lowered to actual addresses.
// TODO: Use an AliasAnalysis and do real alias-analysis queries, and
// produce more precise dependence information.
unsigned TrueMemOrderLatency = MI->mayStore() ? 1 : 0;
if (isGlobalMemoryObject(AA, MI)) {
// Be conservative with these and add dependencies on all memory
// references, even those that are known to not alias.
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
I->second[i]->addPred(SDep(SU, SDep::Barrier));
}
}
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
SDep Dep(SU, SDep::Barrier);
Dep.setLatency(TrueMemOrderLatency);
I->second[i]->addPred(Dep);
}
}
// Add SU to the barrier chain.
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Barrier));
BarrierChain = SU;
// This is a barrier event that acts as a pivotal node in the DAG,
// so it is safe to clear list of exposed nodes.
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
TrueMemOrderLatency);
RejectMemNodes.clear();
NonAliasMemDefs.clear();
NonAliasMemUses.clear();
// fall-through
new_alias_chain:
// Chain all possibly aliasing memory references through SU.
if (AliasChain) {
unsigned ChainLatency = 0;
if (AliasChain->getInstr()->mayLoad())
ChainLatency = TrueMemOrderLatency;
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
RejectMemNodes, ChainLatency);
}
AliasChain = SU;
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
PendingLoads[k], RejectMemNodes,
TrueMemOrderLatency);
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes);
}
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes, TrueMemOrderLatency);
}
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
TrueMemOrderLatency);
PendingLoads.clear();
AliasMemDefs.clear();
AliasMemUses.clear();
} else if (MI->mayStore()) {
// Add dependence on barrier chain, if needed.
// There is no point to check aliasing on barrier event. Even if
// SU and barrier _could_ be reordered, they should not. In addition,
// we have lost all RejectMemNodes below barrier.
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Barrier));
UnderlyingObjectsVector Objs;
getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout());
if (Objs.empty()) {
// Treat all other stores conservatively.
goto new_alias_chain;
}
bool MayAlias = false;
for (UnderlyingObjectsVector::iterator K = Objs.begin(), KE = Objs.end();
K != KE; ++K) {
ValueType V = K->getPointer();
bool ThisMayAlias = K->getInt();
if (ThisMayAlias)
MayAlias = true;
// A store to a specific PseudoSourceValue. Add precise dependencies.
// Record the def in MemDefs, first adding a dep if there is
// an existing def.
MapVector<ValueType, std::vector<SUnit *> >::iterator I =
((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
MapVector<ValueType, std::vector<SUnit *> >::iterator IE =
((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes, 0, true);
// If we're not using AA, then we only need one store per object.
if (!AAForDep)
I->second.clear();
I->second.push_back(SU);
} else {
if (ThisMayAlias) {
if (!AAForDep)
AliasMemDefs[V].clear();
AliasMemDefs[V].push_back(SU);
} else {
if (!AAForDep)
NonAliasMemDefs[V].clear();
NonAliasMemDefs[V].push_back(SU);
}
}
// Handle the uses in MemUses, if there are any.
MapVector<ValueType, std::vector<SUnit *> >::iterator J =
((ThisMayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
MapVector<ValueType, std::vector<SUnit *> >::iterator JE =
((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
if (J != JE) {
for (unsigned i = 0, e = J->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
J->second[i], RejectMemNodes,
TrueMemOrderLatency, true);
J->second.clear();
}
}
if (MayAlias) {
// Add dependencies from all the PendingLoads, i.e. loads
// with no underlying object.
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
PendingLoads[k], RejectMemNodes,
TrueMemOrderLatency);
// Add dependence on alias chain, if needed.
if (AliasChain)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
RejectMemNodes);
}
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
TrueMemOrderLatency);
} else if (MI->mayLoad()) {
bool MayAlias = true;
if (MI->isInvariantLoad(AA)) {
// Invariant load, no chain dependencies needed!
} else {
UnderlyingObjectsVector Objs;
getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout());
if (Objs.empty()) {
// A load with no underlying object. Depend on all
// potentially aliasing stores.
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes);
PendingLoads.push_back(SU);
MayAlias = true;
} else {
MayAlias = false;
}
for (UnderlyingObjectsVector::iterator
J = Objs.begin(), JE = Objs.end(); J != JE; ++J) {
ValueType V = J->getPointer();
bool ThisMayAlias = J->getInt();
if (ThisMayAlias)
MayAlias = true;
// A load from a specific PseudoSourceValue. Add precise dependencies.
MapVector<ValueType, std::vector<SUnit *> >::iterator I =
((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
MapVector<ValueType, std::vector<SUnit *> >::iterator IE =
((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE)
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes, 0, true);
if (ThisMayAlias)
AliasMemUses[V].push_back(SU);
else
NonAliasMemUses[V].push_back(SU);
}
if (MayAlias)
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU,
RejectMemNodes, /*Latency=*/0);
// Add dependencies on alias and barrier chains, if needed.
if (MayAlias && AliasChain)
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
RejectMemNodes);
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Barrier));
}
}
}
if (DbgMI)
FirstDbgValue = DbgMI;
Defs.clear();
Uses.clear();
VRegDefs.clear();
PendingLoads.clear();
}
/// \brief Initialize register live-range state for updating kills.
void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) {
// Start with no live registers.
LiveRegs.reset();
// Examine the live-in regs of all successors.
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI) {
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
// Repeat, for reg and all subregs.
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
LiveRegs.set(*SubRegs);
}
}
}
/// \brief If we change a kill flag on the bundle instruction implicit register
/// operands, then we also need to propagate that to any instructions inside
/// the bundle which had the same kill state.
static void toggleBundleKillFlag(MachineInstr *MI, unsigned Reg,
bool NewKillState) {
if (MI->getOpcode() != TargetOpcode::BUNDLE)
return;
// Walk backwards from the last instruction in the bundle to the first.
// Once we set a kill flag on an instruction, we bail out, as otherwise we
// might set it on too many operands. We will clear as many flags as we
// can though.
MachineBasicBlock::instr_iterator Begin = MI;
MachineBasicBlock::instr_iterator End = getBundleEnd(MI);
while (Begin != End) {
for (MachineOperand &MO : (--End)->operands()) {
if (!MO.isReg() || MO.isDef() || Reg != MO.getReg())
continue;
// DEBUG_VALUE nodes do not contribute to code generation and should
// always be ignored. Failure to do so may result in trying to modify
// KILL flags on DEBUG_VALUE nodes, which is distressing.
if (MO.isDebug())
continue;
// If the register has the internal flag then it could be killing an
// internal def of the register. In this case, just skip. We only want
// to toggle the flag on operands visible outside the bundle.
if (MO.isInternalRead())
continue;
if (MO.isKill() == NewKillState)
continue;
MO.setIsKill(NewKillState);
if (NewKillState)
return;
}
}
}
bool ScheduleDAGInstrs::toggleKillFlag(MachineInstr *MI, MachineOperand &MO) {
// Setting kill flag...
if (!MO.isKill()) {
MO.setIsKill(true);
toggleBundleKillFlag(MI, MO.getReg(), true);
return false;
}
// If MO itself is live, clear the kill flag...
if (LiveRegs.test(MO.getReg())) {
MO.setIsKill(false);
toggleBundleKillFlag(MI, MO.getReg(), false);
return false;
}
// If any subreg of MO is live, then create an imp-def for that
// subreg and keep MO marked as killed.
MO.setIsKill(false);
toggleBundleKillFlag(MI, MO.getReg(), false);
bool AllDead = true;
const unsigned SuperReg = MO.getReg();
MachineInstrBuilder MIB(MF, MI);
for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) {
if (LiveRegs.test(*SubRegs)) {
MIB.addReg(*SubRegs, RegState::ImplicitDefine);
AllDead = false;
}
}
if(AllDead) {
MO.setIsKill(true);
toggleBundleKillFlag(MI, MO.getReg(), true);
}
return false;
}
// FIXME: Reuse the LivePhysRegs utility for this.
void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
LiveRegs.resize(TRI->getNumRegs());
BitVector killedRegs(TRI->getNumRegs());
startBlockForKills(MBB);
// Examine block from end to start...
unsigned Count = MBB->size();
for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
I != E; --Count) {
MachineInstr *MI = --I;
if (MI->isDebugValue())
continue;
// Update liveness. Registers that are defed but not used in this
// instruction are now dead. Mark register and all subregs as they
// are completely defined.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask())
LiveRegs.clearBitsNotInMask(MO.getRegMask());
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isDef()) continue;
// Ignore two-addr defs.
if (MI->isRegTiedToUseOperand(i)) continue;
// Repeat for reg and all subregs.
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
LiveRegs.reset(*SubRegs);
}
// Examine all used registers and set/clear kill flag. When a
// register is used multiple times we only set the kill flag on
// the first use. Don't set kill flags on undef operands.
killedRegs.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
unsigned Reg = MO.getReg();
if ((Reg == 0) || MRI.isReserved(Reg)) continue;
bool kill = false;
if (!killedRegs.test(Reg)) {
kill = true;
// A register is not killed if any subregs are live...
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
if (LiveRegs.test(*SubRegs)) {
kill = false;
break;
}
}
// If subreg is not live, then register is killed if it became
// live in this instruction
if (kill)
kill = !LiveRegs.test(Reg);
}
if (MO.isKill() != kill) {
DEBUG(dbgs() << "Fixing " << MO << " in ");
// Warning: toggleKillFlag may invalidate MO.
toggleKillFlag(MI, MO);
DEBUG(MI->dump());
DEBUG(if (MI->getOpcode() == TargetOpcode::BUNDLE) {
MachineBasicBlock::instr_iterator Begin = MI;
MachineBasicBlock::instr_iterator End = getBundleEnd(MI);
while (++Begin != End)
DEBUG(Begin->dump());
});
}
killedRegs.set(Reg);
}
// Mark any used register (that is not using undef) and subregs as
// now live...
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
unsigned Reg = MO.getReg();
if ((Reg == 0) || MRI.isReserved(Reg)) continue;
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
LiveRegs.set(*SubRegs);
}
}
}
void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
SU->getInstr()->dump();
#endif
}
std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
std::string s;
raw_string_ostream oss(s);
if (SU == &EntrySU)
oss << "<entry>";
else if (SU == &ExitSU)
oss << "<exit>";
else
SU->getInstr()->print(oss, /*SkipOpers=*/true);
return oss.str();
}
/// Return the basic block label. It is not necessarilly unique because a block
/// contains multiple scheduling regions. But it is fine for visualization.
std::string ScheduleDAGInstrs::getDAGName() const {
return "dag." + BB->getFullName();
}
//===----------------------------------------------------------------------===//
// SchedDFSResult Implementation
//===----------------------------------------------------------------------===//
namespace llvm {
/// \brief Internal state used to compute SchedDFSResult.
class SchedDFSImpl {
SchedDFSResult &R;
/// Join DAG nodes into equivalence classes by their subtree.
IntEqClasses SubtreeClasses;
/// List PredSU, SuccSU pairs that represent data edges between subtrees.
std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs;
struct RootData {
unsigned NodeID;
unsigned ParentNodeID; // Parent node (member of the parent subtree).
unsigned SubInstrCount; // Instr count in this tree only, not children.
RootData(unsigned id): NodeID(id),
ParentNodeID(SchedDFSResult::InvalidSubtreeID),
SubInstrCount(0) {}
unsigned getSparseSetIndex() const { return NodeID; }
};
SparseSet<RootData> RootSet;
public:
SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) {
RootSet.setUniverse(R.DFSNodeData.size());
}
/// Return true if this node been visited by the DFS traversal.
///
/// During visitPostorderNode the Node's SubtreeID is assigned to the Node
/// ID. Later, SubtreeID is updated but remains valid.
bool isVisited(const SUnit *SU) const {
return R.DFSNodeData[SU->NodeNum].SubtreeID
!= SchedDFSResult::InvalidSubtreeID;
}
/// Initialize this node's instruction count. We don't need to flag the node
/// visited until visitPostorder because the DAG cannot have cycles.
void visitPreorder(const SUnit *SU) {
R.DFSNodeData[SU->NodeNum].InstrCount =
SU->getInstr()->isTransient() ? 0 : 1;
}
/// Called once for each node after all predecessors are visited. Revisit this
/// node's predecessors and potentially join them now that we know the ILP of
/// the other predecessors.
void visitPostorderNode(const SUnit *SU) {
// Mark this node as the root of a subtree. It may be joined with its
// successors later.
R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum;
RootData RData(SU->NodeNum);
RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1;
// If any predecessors are still in their own subtree, they either cannot be
// joined or are large enough to remain separate. If this parent node's
// total instruction count is not greater than a child subtree by at least
// the subtree limit, then try to join it now since splitting subtrees is
// only useful if multiple high-pressure paths are possible.
unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount;
for (SUnit::const_pred_iterator
PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
if (PI->getKind() != SDep::Data)
continue;
unsigned PredNum = PI->getSUnit()->NodeNum;
if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit)
joinPredSubtree(*PI, SU, /*CheckLimit=*/false);
// Either link or merge the TreeData entry from the child to the parent.
if (R.DFSNodeData[PredNum].SubtreeID == PredNum) {
// If the predecessor's parent is invalid, this is a tree edge and the
// current node is the parent.
if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID)
RootSet[PredNum].ParentNodeID = SU->NodeNum;
}
else if (RootSet.count(PredNum)) {
// The predecessor is not a root, but is still in the root set. This
// must be the new parent that it was just joined to. Note that
// RootSet[PredNum].ParentNodeID may either be invalid or may still be
// set to the original parent.
RData.SubInstrCount += RootSet[PredNum].SubInstrCount;
RootSet.erase(PredNum);
}
}
RootSet[SU->NodeNum] = RData;
}
/// Called once for each tree edge after calling visitPostOrderNode on the
/// predecessor. Increment the parent node's instruction count and
/// preemptively join this subtree to its parent's if it is small enough.
void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) {
R.DFSNodeData[Succ->NodeNum].InstrCount
+= R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount;
joinPredSubtree(PredDep, Succ);
}
/// Add a connection for cross edges.
void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) {
ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ));
}
/// Set each node's subtree ID to the representative ID and record connections
/// between trees.
void finalize() {
SubtreeClasses.compress();
R.DFSTreeData.resize(SubtreeClasses.getNumClasses());
assert(SubtreeClasses.getNumClasses() == RootSet.size()
&& "number of roots should match trees");
for (SparseSet<RootData>::const_iterator
RI = RootSet.begin(), RE = RootSet.end(); RI != RE; ++RI) {
unsigned TreeID = SubtreeClasses[RI->NodeID];
if (RI->ParentNodeID != SchedDFSResult::InvalidSubtreeID)
R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[RI->ParentNodeID];
R.DFSTreeData[TreeID].SubInstrCount = RI->SubInstrCount;
// Note that SubInstrCount may be greater than InstrCount if we joined
// subtrees across a cross edge. InstrCount will be attributed to the
// original parent, while SubInstrCount will be attributed to the joined
// parent.
}
R.SubtreeConnections.resize(SubtreeClasses.getNumClasses());
R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses());
DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n");
for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) {
R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx];
DEBUG(dbgs() << " SU(" << Idx << ") in tree "
<< R.DFSNodeData[Idx].SubtreeID << '\n');
}
for (std::vector<std::pair<const SUnit*, const SUnit*> >::const_iterator
I = ConnectionPairs.begin(), E = ConnectionPairs.end();
I != E; ++I) {
unsigned PredTree = SubtreeClasses[I->first->NodeNum];
unsigned SuccTree = SubtreeClasses[I->second->NodeNum];
if (PredTree == SuccTree)
continue;
unsigned Depth = I->first->getDepth();
addConnection(PredTree, SuccTree, Depth);
addConnection(SuccTree, PredTree, Depth);
}
}
protected:
/// Join the predecessor subtree with the successor that is its DFS
/// parent. Apply some heuristics before joining.
bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ,
bool CheckLimit = true) {
assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges");
// Check if the predecessor is already joined.
const SUnit *PredSU = PredDep.getSUnit();
unsigned PredNum = PredSU->NodeNum;
if (R.DFSNodeData[PredNum].SubtreeID != PredNum)
return false;
// Four is the magic number of successors before a node is considered a
// pinch point.
unsigned NumDataSucs = 0;
for (SUnit::const_succ_iterator SI = PredSU->Succs.begin(),
SE = PredSU->Succs.end(); SI != SE; ++SI) {
if (SI->getKind() == SDep::Data) {
if (++NumDataSucs >= 4)
return false;
}
}
if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit)
return false;
R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum;
SubtreeClasses.join(Succ->NodeNum, PredNum);
return true;
}
/// Called by finalize() to record a connection between trees.
void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) {
if (!Depth)
return;
do {
SmallVectorImpl<SchedDFSResult::Connection> &Connections =
R.SubtreeConnections[FromTree];
for (SmallVectorImpl<SchedDFSResult::Connection>::iterator
I = Connections.begin(), E = Connections.end(); I != E; ++I) {
if (I->TreeID == ToTree) {
I->Level = std::max(I->Level, Depth);
return;
}
}
Connections.push_back(SchedDFSResult::Connection(ToTree, Depth));
FromTree = R.DFSTreeData[FromTree].ParentTreeID;
} while (FromTree != SchedDFSResult::InvalidSubtreeID);
}
};
} // namespace llvm
namespace {
/// \brief Manage the stack used by a reverse depth-first search over the DAG.
class SchedDAGReverseDFS {
std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack;
public:
bool isComplete() const { return DFSStack.empty(); }
void follow(const SUnit *SU) {
DFSStack.push_back(std::make_pair(SU, SU->Preds.begin()));
}
void advance() { ++DFSStack.back().second; }
const SDep *backtrack() {
DFSStack.pop_back();
return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second);
}
const SUnit *getCurr() const { return DFSStack.back().first; }
SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; }
SUnit::const_pred_iterator getPredEnd() const {
return getCurr()->Preds.end();
}
};
} // anonymous
static bool hasDataSucc(const SUnit *SU) {
for (SUnit::const_succ_iterator
SI = SU->Succs.begin(), SE = SU->Succs.end(); SI != SE; ++SI) {
if (SI->getKind() == SDep::Data && !SI->getSUnit()->isBoundaryNode())
return true;
}
return false;
}
/// Compute an ILP metric for all nodes in the subDAG reachable via depth-first
/// search from this root.
void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) {
if (!IsBottomUp)
llvm_unreachable("Top-down ILP metric is unimplemnted");
SchedDFSImpl Impl(*this);
for (ArrayRef<SUnit>::const_iterator
SI = SUnits.begin(), SE = SUnits.end(); SI != SE; ++SI) {
const SUnit *SU = &*SI;
if (Impl.isVisited(SU) || hasDataSucc(SU))
continue;
SchedDAGReverseDFS DFS;
Impl.visitPreorder(SU);
DFS.follow(SU);
for (;;) {
// Traverse the leftmost path as far as possible.
while (DFS.getPred() != DFS.getPredEnd()) {
const SDep &PredDep = *DFS.getPred();
DFS.advance();
// Ignore non-data edges.
if (PredDep.getKind() != SDep::Data
|| PredDep.getSUnit()->isBoundaryNode()) {
continue;
}
// An already visited edge is a cross edge, assuming an acyclic DAG.
if (Impl.isVisited(PredDep.getSUnit())) {
Impl.visitCrossEdge(PredDep, DFS.getCurr());
continue;
}
Impl.visitPreorder(PredDep.getSUnit());
DFS.follow(PredDep.getSUnit());
}
// Visit the top of the stack in postorder and backtrack.
const SUnit *Child = DFS.getCurr();
const SDep *PredDep = DFS.backtrack();
Impl.visitPostorderNode(Child);
if (PredDep)
Impl.visitPostorderEdge(*PredDep, DFS.getCurr());
if (DFS.isComplete())
break;
}
}
Impl.finalize();
}
/// The root of the given SubtreeID was just scheduled. For all subtrees
/// connected to this tree, record the depth of the connection so that the
/// nearest connected subtrees can be prioritized.
void SchedDFSResult::scheduleTree(unsigned SubtreeID) {
for (SmallVectorImpl<Connection>::const_iterator
I = SubtreeConnections[SubtreeID].begin(),
E = SubtreeConnections[SubtreeID].end(); I != E; ++I) {
SubtreeConnectLevels[I->TreeID] =
std::max(SubtreeConnectLevels[I->TreeID], I->Level);
DEBUG(dbgs() << " Tree: " << I->TreeID
<< " @" << SubtreeConnectLevels[I->TreeID] << '\n');
}
}
LLVM_DUMP_METHOD
void ILPValue::print(raw_ostream &OS) const {
OS << InstrCount << " / " << Length << " = ";
if (!Length)
OS << "BADILP";
else
OS << format("%g", ((double)InstrCount / Length));
}
LLVM_DUMP_METHOD
void ILPValue::dump() const {
dbgs() << *this << '\n';
}
namespace llvm {
LLVM_DUMP_METHOD
raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {
Val.print(OS);
return OS;
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ErlangGC.cpp | //===-- ErlangGC.cpp - Erlang/OTP GC strategy -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Erlang/OTP runtime-compatible garbage collector
// (e.g. defines safe points, root initialization etc.)
//
// The frametable emitter is in ErlangGCPrinter.cpp.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCs.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
class ErlangGC : public GCStrategy {
public:
ErlangGC();
};
}
static GCRegistry::Add<ErlangGC> X("erlang",
"erlang-compatible garbage collector");
void llvm::linkErlangGC() {}
ErlangGC::ErlangGC() {
InitRoots = false;
NeededSafePoints = 1 << GC::PostCall;
UsesMetadata = true;
CustomRoots = false;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/DeadMachineInstructionElim.cpp | //===- DeadMachineInstructionElim.cpp - Remove dead machine instructions --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is an extremely simple MachineInstr-level dead-code-elimination pass.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "codegen-dce"
STATISTIC(NumDeletes, "Number of dead instructions deleted");
namespace {
class DeadMachineInstructionElim : public MachineFunctionPass {
bool runOnMachineFunction(MachineFunction &MF) override;
const TargetRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
BitVector LivePhysRegs;
public:
static char ID; // Pass identification, replacement for typeid
DeadMachineInstructionElim() : MachineFunctionPass(ID) {
initializeDeadMachineInstructionElimPass(*PassRegistry::getPassRegistry());
}
private:
bool isDead(const MachineInstr *MI) const;
};
}
char DeadMachineInstructionElim::ID = 0;
char &llvm::DeadMachineInstructionElimID = DeadMachineInstructionElim::ID;
INITIALIZE_PASS(DeadMachineInstructionElim, "dead-mi-elimination",
"Remove dead machine instructions", false, false)
bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
// Technically speaking inline asm without side effects and no defs can still
// be deleted. But there is so much bad inline asm code out there, we should
// let them be.
if (MI->isInlineAsm())
return false;
// Don't delete frame allocation labels.
if (MI->getOpcode() == TargetOpcode::LOCAL_ESCAPE)
return false;
// Don't delete instructions with side effects.
bool SawStore = false;
if (!MI->isSafeToMove(nullptr, SawStore) && !MI->isPHI())
return false;
// Examine each operand.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
// Don't delete live physreg defs, or any reserved register defs.
if (LivePhysRegs.test(Reg) || MRI->isReserved(Reg))
return false;
} else {
if (!MRI->use_nodbg_empty(Reg))
// This def has a non-debug use. Don't delete the instruction!
return false;
}
}
}
// If there are no defs with uses, the instruction is dead.
return true;
}
bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
bool AnyChanges = false;
MRI = &MF.getRegInfo();
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo();
// Loop over all instructions in all blocks, from bottom to top, so that it's
// more likely that chains of dependent but ultimately dead instructions will
// be cleaned up.
for (MachineFunction::reverse_iterator I = MF.rbegin(), E = MF.rend();
I != E; ++I) {
MachineBasicBlock *MBB = &*I;
// Start out assuming that reserved registers are live out of this block.
LivePhysRegs = MRI->getReservedRegs();
// Add live-ins from sucessors to LivePhysRegs. Normally, physregs are not
// live across blocks, but some targets (x86) can have flags live out of a
// block.
for (MachineBasicBlock::succ_iterator S = MBB->succ_begin(),
E = MBB->succ_end(); S != E; S++)
for (MachineBasicBlock::livein_iterator LI = (*S)->livein_begin();
LI != (*S)->livein_end(); LI++)
LivePhysRegs.set(*LI);
// Now scan the instructions and delete dead ones, tracking physreg
// liveness as we go.
for (MachineBasicBlock::reverse_iterator MII = MBB->rbegin(),
MIE = MBB->rend(); MII != MIE; ) {
MachineInstr *MI = &*MII;
// If the instruction is dead, delete it!
if (isDead(MI)) {
DEBUG(dbgs() << "DeadMachineInstructionElim: DELETING: " << *MI);
// It is possible that some DBG_VALUE instructions refer to this
// instruction. They get marked as undef and will be deleted
// in the live debug variable analysis.
MI->eraseFromParentAndMarkDBGValuesForRemoval();
AnyChanges = true;
++NumDeletes;
MIE = MBB->rend();
// MII is now pointing to the next instruction to process,
// so don't increment it.
continue;
}
// Record the physreg defs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
// Check the subreg set, not the alias set, because a def
// of a super-register may still be partially live after
// this def.
for (MCSubRegIterator SR(Reg, TRI,/*IncludeSelf=*/true);
SR.isValid(); ++SR)
LivePhysRegs.reset(*SR);
}
} else if (MO.isRegMask()) {
// Register mask of preserved registers. All clobbers are dead.
LivePhysRegs.clearBitsNotInMask(MO.getRegMask());
}
}
// Record the physreg uses, after the defs, in case a physreg is
// both defined and used in the same instruction.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
LivePhysRegs.set(*AI);
}
}
}
// We didn't delete the current instruction, so increment MII to
// the next one.
++MII;
}
}
LivePhysRegs.clear();
return AnyChanges;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/Spiller.h | //===-- llvm/CodeGen/Spiller.h - Spiller -*- C++ -*------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SPILLER_H
#define LLVM_LIB_CODEGEN_SPILLER_H
namespace llvm {
class LiveRangeEdit;
class MachineFunction;
class MachineFunctionPass;
class VirtRegMap;
/// Spiller interface.
///
/// Implementations are utility classes which insert spill or remat code on
/// demand.
class Spiller {
virtual void anchor();
public:
virtual ~Spiller() = 0;
/// spill - Spill the LRE.getParent() live interval.
virtual void spill(LiveRangeEdit &LRE) = 0;
};
/// Create and return a spiller that will insert spill code directly instead
/// of deferring though VirtRegMap.
Spiller *createInlineSpiller(MachineFunctionPass &pass,
MachineFunction &mf,
VirtRegMap &vrm);
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineVerifier.cpp | //===-- MachineVerifier.cpp - Machine Code Verifier -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Pass to verify generated machine code. The following is checked:
//
// Operand counts: All explicit operands must be present.
//
// Register classes: All physical and virtual register operands must be
// compatible with the register class required by the instruction descriptor.
//
// Register live intervals: Registers must be defined only once, and must be
// defined before use.
//
// The machine code verifier is enabled from LLVMTargetMachine.cpp with the
// command-line option -verify-machineinstrs, or by defining the environment
// variable LLVM_VERIFY_MACHINEINSTRS to the name of a file that will receive
// the verifier errors.
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
struct MachineVerifier {
MachineVerifier(Pass *pass, const char *b) :
PASS(pass),
Banner(b)
{}
bool runOnMachineFunction(MachineFunction &MF);
Pass *const PASS;
const char *Banner;
const MachineFunction *MF;
const TargetMachine *TM;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
unsigned foundErrors;
typedef SmallVector<unsigned, 16> RegVector;
typedef SmallVector<const uint32_t*, 4> RegMaskVector;
typedef DenseSet<unsigned> RegSet;
typedef DenseMap<unsigned, const MachineInstr*> RegMap;
typedef SmallPtrSet<const MachineBasicBlock*, 8> BlockSet;
const MachineInstr *FirstTerminator;
BlockSet FunctionBlocks;
BitVector regsReserved;
RegSet regsLive;
RegVector regsDefined, regsDead, regsKilled;
RegMaskVector regMasks;
RegSet regsLiveInButUnused;
SlotIndex lastIndex;
// Add Reg and any sub-registers to RV
void addRegWithSubRegs(RegVector &RV, unsigned Reg) {
RV.push_back(Reg);
if (TargetRegisterInfo::isPhysicalRegister(Reg))
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
RV.push_back(*SubRegs);
}
struct BBInfo {
// Is this MBB reachable from the MF entry point?
bool reachable;
// Vregs that must be live in because they are used without being
// defined. Map value is the user.
RegMap vregsLiveIn;
// Regs killed in MBB. They may be defined again, and will then be in both
// regsKilled and regsLiveOut.
RegSet regsKilled;
// Regs defined in MBB and live out. Note that vregs passing through may
// be live out without being mentioned here.
RegSet regsLiveOut;
// Vregs that pass through MBB untouched. This set is disjoint from
// regsKilled and regsLiveOut.
RegSet vregsPassed;
// Vregs that must pass through MBB because they are needed by a successor
// block. This set is disjoint from regsLiveOut.
RegSet vregsRequired;
// Set versions of block's predecessor and successor lists.
BlockSet Preds, Succs;
BBInfo() : reachable(false) {}
// Add register to vregsPassed if it belongs there. Return true if
// anything changed.
bool addPassed(unsigned Reg) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
return false;
if (regsKilled.count(Reg) || regsLiveOut.count(Reg))
return false;
return vregsPassed.insert(Reg).second;
}
// Same for a full set.
bool addPassed(const RegSet &RS) {
bool changed = false;
for (RegSet::const_iterator I = RS.begin(), E = RS.end(); I != E; ++I)
if (addPassed(*I))
changed = true;
return changed;
}
// Add register to vregsRequired if it belongs there. Return true if
// anything changed.
bool addRequired(unsigned Reg) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
return false;
if (regsLiveOut.count(Reg))
return false;
return vregsRequired.insert(Reg).second;
}
// Same for a full set.
bool addRequired(const RegSet &RS) {
bool changed = false;
for (RegSet::const_iterator I = RS.begin(), E = RS.end(); I != E; ++I)
if (addRequired(*I))
changed = true;
return changed;
}
// Same for a full map.
bool addRequired(const RegMap &RM) {
bool changed = false;
for (RegMap::const_iterator I = RM.begin(), E = RM.end(); I != E; ++I)
if (addRequired(I->first))
changed = true;
return changed;
}
// Live-out registers are either in regsLiveOut or vregsPassed.
bool isLiveOut(unsigned Reg) const {
return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
}
};
// Extra register info per MBB.
DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
bool isReserved(unsigned Reg) {
return Reg < regsReserved.size() && regsReserved.test(Reg);
}
bool isAllocatable(unsigned Reg) {
return Reg < TRI->getNumRegs() && MRI->isAllocatable(Reg);
}
// Analysis information if available
LiveVariables *LiveVars;
LiveIntervals *LiveInts;
LiveStacks *LiveStks;
SlotIndexes *Indexes;
void visitMachineFunctionBefore();
void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
void visitMachineBundleBefore(const MachineInstr *MI);
void visitMachineInstrBefore(const MachineInstr *MI);
void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
void visitMachineInstrAfter(const MachineInstr *MI);
void visitMachineBundleAfter(const MachineInstr *MI);
void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
void visitMachineFunctionAfter();
void report(const char *msg, const MachineFunction *MF);
void report(const char *msg, const MachineBasicBlock *MBB);
void report(const char *msg, const MachineInstr *MI);
void report(const char *msg, const MachineOperand *MO, unsigned MONum);
void report(const char *msg, const MachineFunction *MF,
const LiveInterval &LI);
void report(const char *msg, const MachineBasicBlock *MBB,
const LiveInterval &LI);
void report(const char *msg, const MachineFunction *MF,
const LiveRange &LR, unsigned Reg, unsigned LaneMask);
void report(const char *msg, const MachineBasicBlock *MBB,
const LiveRange &LR, unsigned Reg, unsigned LaneMask);
void verifyInlineAsm(const MachineInstr *MI);
void checkLiveness(const MachineOperand *MO, unsigned MONum);
void markReachable(const MachineBasicBlock *MBB);
void calcRegsPassed();
void checkPHIOps(const MachineBasicBlock *MBB);
void calcRegsRequired();
void verifyLiveVariables();
void verifyLiveIntervals();
void verifyLiveInterval(const LiveInterval&);
void verifyLiveRangeValue(const LiveRange&, const VNInfo*, unsigned,
unsigned);
void verifyLiveRangeSegment(const LiveRange&,
const LiveRange::const_iterator I, unsigned,
unsigned);
void verifyLiveRange(const LiveRange&, unsigned, unsigned LaneMask = 0);
void verifyStackFrame();
};
struct MachineVerifierPass : public MachineFunctionPass {
static char ID; // Pass ID, replacement for typeid
const std::string Banner;
MachineVerifierPass(const std::string &banner = nullptr)
: MachineFunctionPass(ID), Banner(banner) {
initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override {
MF.verify(this, Banner.c_str());
return false;
}
};
}
char MachineVerifierPass::ID = 0;
INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
"Verify generated machine code", false, false)
FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
return new MachineVerifierPass(Banner);
}
void MachineFunction::verify(Pass *p, const char *Banner) const {
MachineVerifier(p, Banner)
.runOnMachineFunction(const_cast<MachineFunction&>(*this));
}
bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
foundErrors = 0;
this->MF = &MF;
TM = &MF.getTarget();
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
LiveVars = nullptr;
LiveInts = nullptr;
LiveStks = nullptr;
Indexes = nullptr;
if (PASS) {
LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
// We don't want to verify LiveVariables if LiveIntervals is available.
if (!LiveInts)
LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
}
visitMachineFunctionBefore();
for (MachineFunction::const_iterator MFI = MF.begin(), MFE = MF.end();
MFI!=MFE; ++MFI) {
visitMachineBasicBlockBefore(MFI);
// Keep track of the current bundle header.
const MachineInstr *CurBundle = nullptr;
// Do we expect the next instruction to be part of the same bundle?
bool InBundle = false;
for (MachineBasicBlock::const_instr_iterator MBBI = MFI->instr_begin(),
MBBE = MFI->instr_end(); MBBI != MBBE; ++MBBI) {
if (MBBI->getParent() != MFI) {
report("Bad instruction parent pointer", MFI);
errs() << "Instruction: " << *MBBI;
continue;
}
// Check for consistent bundle flags.
if (InBundle && !MBBI->isBundledWithPred())
report("Missing BundledPred flag, "
"BundledSucc was set on predecessor", MBBI);
if (!InBundle && MBBI->isBundledWithPred())
report("BundledPred flag is set, "
"but BundledSucc not set on predecessor", MBBI);
// Is this a bundle header?
if (!MBBI->isInsideBundle()) {
if (CurBundle)
visitMachineBundleAfter(CurBundle);
CurBundle = MBBI;
visitMachineBundleBefore(CurBundle);
} else if (!CurBundle)
report("No bundle header", MBBI);
visitMachineInstrBefore(MBBI);
for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) {
const MachineInstr &MI = *MBBI;
const MachineOperand &Op = MI.getOperand(I);
if (Op.getParent() != &MI) {
// Make sure to use correct addOperand / RemoveOperand / ChangeTo
// functions when replacing operands of a MachineInstr.
report("Instruction has operand with wrong parent set", &MI);
}
visitMachineOperand(&Op, I);
}
visitMachineInstrAfter(MBBI);
// Was this the last bundled instruction?
InBundle = MBBI->isBundledWithSucc();
}
if (CurBundle)
visitMachineBundleAfter(CurBundle);
if (InBundle)
report("BundledSucc flag set on last instruction in block", &MFI->back());
visitMachineBasicBlockAfter(MFI);
}
visitMachineFunctionAfter();
if (foundErrors)
report_fatal_error("Found "+Twine(foundErrors)+" machine code errors.");
// Clean up.
regsLive.clear();
regsDefined.clear();
regsDead.clear();
regsKilled.clear();
regMasks.clear();
regsLiveInButUnused.clear();
MBBInfoMap.clear();
return false; // no changes
}
void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
assert(MF);
errs() << '\n';
if (!foundErrors++) {
if (Banner)
errs() << "# " << Banner << '\n';
MF->print(errs(), Indexes);
}
errs() << "*** Bad machine code: " << msg << " ***\n"
<< "- function: " << MF->getName() << "\n";
}
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
assert(MBB);
report(msg, MBB->getParent());
errs() << "- basic block: BB#" << MBB->getNumber()
<< ' ' << MBB->getName()
<< " (" << (const void*)MBB << ')';
if (Indexes)
errs() << " [" << Indexes->getMBBStartIdx(MBB)
<< ';' << Indexes->getMBBEndIdx(MBB) << ')';
errs() << '\n';
}
void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
assert(MI);
report(msg, MI->getParent());
errs() << "- instruction: ";
if (Indexes && Indexes->hasIndex(MI))
errs() << Indexes->getInstructionIndex(MI) << '\t';
MI->print(errs(), TM);
}
void MachineVerifier::report(const char *msg,
const MachineOperand *MO, unsigned MONum) {
assert(MO);
report(msg, MO->getParent());
errs() << "- operand " << MONum << ": ";
MO->print(errs(), TRI);
errs() << "\n";
}
void MachineVerifier::report(const char *msg, const MachineFunction *MF,
const LiveInterval &LI) {
report(msg, MF);
errs() << "- interval: " << LI << '\n';
}
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB,
const LiveInterval &LI) {
report(msg, MBB);
errs() << "- interval: " << LI << '\n';
}
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB,
const LiveRange &LR, unsigned Reg,
unsigned LaneMask) {
report(msg, MBB);
errs() << "- liverange: " << LR << '\n';
errs() << "- register: " << PrintReg(Reg, TRI) << '\n';
if (LaneMask != 0)
errs() << "- lanemask: " << format("%04X\n", LaneMask);
}
void MachineVerifier::report(const char *msg, const MachineFunction *MF,
const LiveRange &LR, unsigned Reg,
unsigned LaneMask) {
report(msg, MF);
errs() << "- liverange: " << LR << '\n';
errs() << "- register: " << PrintReg(Reg, TRI) << '\n';
if (LaneMask != 0)
errs() << "- lanemask: " << format("%04X\n", LaneMask);
}
void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
BBInfo &MInfo = MBBInfoMap[MBB];
if (!MInfo.reachable) {
MInfo.reachable = true;
for (MachineBasicBlock::const_succ_iterator SuI = MBB->succ_begin(),
SuE = MBB->succ_end(); SuI != SuE; ++SuI)
markReachable(*SuI);
}
}
void MachineVerifier::visitMachineFunctionBefore() {
lastIndex = SlotIndex();
regsReserved = MRI->getReservedRegs();
// A sub-register of a reserved register is also reserved
for (int Reg = regsReserved.find_first(); Reg>=0;
Reg = regsReserved.find_next(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
// FIXME: This should probably be:
// assert(regsReserved.test(*SubRegs) && "Non-reserved sub-register");
regsReserved.set(*SubRegs);
}
}
markReachable(&MF->front());
// Build a set of the basic blocks in the function.
FunctionBlocks.clear();
for (const auto &MBB : *MF) {
FunctionBlocks.insert(&MBB);
BBInfo &MInfo = MBBInfoMap[&MBB];
MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
if (MInfo.Preds.size() != MBB.pred_size())
report("MBB has duplicate entries in its predecessor list.", &MBB);
MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
if (MInfo.Succs.size() != MBB.succ_size())
report("MBB has duplicate entries in its successor list.", &MBB);
}
// Check that the register use lists are sane.
MRI->verifyUseLists();
verifyStackFrame();
}
// Does iterator point to a and b as the first two elements?
static bool matchPair(MachineBasicBlock::const_succ_iterator i,
const MachineBasicBlock *a, const MachineBasicBlock *b) {
if (*i == a)
return *++i == b;
if (*i == b)
return *++i == a;
return false;
}
void
MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
FirstTerminator = nullptr;
if (MRI->isSSA()) {
// If this block has allocatable physical registers live-in, check that
// it is an entry block or landing pad.
for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
LE = MBB->livein_end();
LI != LE; ++LI) {
unsigned reg = *LI;
if (isAllocatable(reg) && !MBB->isLandingPad() &&
MBB != MBB->getParent()->begin()) {
report("MBB has allocable live-in, but isn't entry or landing-pad.", MBB);
}
}
}
// Count the number of landing pad successors.
SmallPtrSet<MachineBasicBlock*, 4> LandingPadSuccs;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
if ((*I)->isLandingPad())
LandingPadSuccs.insert(*I);
if (!FunctionBlocks.count(*I))
report("MBB has successor that isn't part of the function.", MBB);
if (!MBBInfoMap[*I].Preds.count(MBB)) {
report("Inconsistent CFG", MBB);
errs() << "MBB is not in the predecessor list of the successor BB#"
<< (*I)->getNumber() << ".\n";
}
}
// Check the predecessor list.
for (MachineBasicBlock::const_pred_iterator I = MBB->pred_begin(),
E = MBB->pred_end(); I != E; ++I) {
if (!FunctionBlocks.count(*I))
report("MBB has predecessor that isn't part of the function.", MBB);
if (!MBBInfoMap[*I].Succs.count(MBB)) {
report("Inconsistent CFG", MBB);
errs() << "MBB is not in the successor list of the predecessor BB#"
<< (*I)->getNumber() << ".\n";
}
}
const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
const BasicBlock *BB = MBB->getBasicBlock();
if (LandingPadSuccs.size() > 1 &&
!(AsmInfo &&
AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
BB && isa<SwitchInst>(BB->getTerminator())))
report("MBB has more than one landing pad successor", MBB);
// Call AnalyzeBranch. If it succeeds, there several more conditions to check.
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*const_cast<MachineBasicBlock *>(MBB),
TBB, FBB, Cond)) {
// Ok, AnalyzeBranch thinks it knows what's going on with this block. Let's
// check whether its answers match up with reality.
if (!TBB && !FBB) {
// Block falls through to its successor.
MachineFunction::const_iterator MBBI = MBB;
++MBBI;
if (MBBI == MF->end()) {
// It's possible that the block legitimately ends with a noreturn
// call or an unreachable, in which case it won't actually fall
// out the bottom of the function.
} else if (MBB->succ_size() == LandingPadSuccs.size()) {
// It's possible that the block legitimately ends with a noreturn
// call or an unreachable, in which case it won't actuall fall
// out of the block.
} else if (MBB->succ_size() != 1+LandingPadSuccs.size()) {
report("MBB exits via unconditional fall-through but doesn't have "
"exactly one CFG successor!", MBB);
} else if (!MBB->isSuccessor(MBBI)) {
report("MBB exits via unconditional fall-through but its successor "
"differs from its CFG successor!", MBB);
}
if (!MBB->empty() && MBB->back().isBarrier() &&
!TII->isPredicated(&MBB->back())) {
report("MBB exits via unconditional fall-through but ends with a "
"barrier instruction!", MBB);
}
if (!Cond.empty()) {
report("MBB exits via unconditional fall-through but has a condition!",
MBB);
}
} else if (TBB && !FBB && Cond.empty()) {
// Block unconditionally branches somewhere.
// If the block has exactly one successor, that happens to be a
// landingpad, accept it as valid control flow.
if (MBB->succ_size() != 1+LandingPadSuccs.size() &&
(MBB->succ_size() != 1 || LandingPadSuccs.size() != 1 ||
*MBB->succ_begin() != *LandingPadSuccs.begin())) {
report("MBB exits via unconditional branch but doesn't have "
"exactly one CFG successor!", MBB);
} else if (!MBB->isSuccessor(TBB)) {
report("MBB exits via unconditional branch but the CFG "
"successor doesn't match the actual successor!", MBB);
}
if (MBB->empty()) {
report("MBB exits via unconditional branch but doesn't contain "
"any instructions!", MBB);
} else if (!MBB->back().isBarrier()) {
report("MBB exits via unconditional branch but doesn't end with a "
"barrier instruction!", MBB);
} else if (!MBB->back().isTerminator()) {
report("MBB exits via unconditional branch but the branch isn't a "
"terminator instruction!", MBB);
}
} else if (TBB && !FBB && !Cond.empty()) {
// Block conditionally branches somewhere, otherwise falls through.
MachineFunction::const_iterator MBBI = MBB;
++MBBI;
if (MBBI == MF->end()) {
report("MBB conditionally falls through out of function!", MBB);
} else if (MBB->succ_size() == 1) {
// A conditional branch with only one successor is weird, but allowed.
if (&*MBBI != TBB)
report("MBB exits via conditional branch/fall-through but only has "
"one CFG successor!", MBB);
else if (TBB != *MBB->succ_begin())
report("MBB exits via conditional branch/fall-through but the CFG "
"successor don't match the actual successor!", MBB);
} else if (MBB->succ_size() != 2) {
report("MBB exits via conditional branch/fall-through but doesn't have "
"exactly two CFG successors!", MBB);
} else if (!matchPair(MBB->succ_begin(), TBB, MBBI)) {
report("MBB exits via conditional branch/fall-through but the CFG "
"successors don't match the actual successors!", MBB);
}
if (MBB->empty()) {
report("MBB exits via conditional branch/fall-through but doesn't "
"contain any instructions!", MBB);
} else if (MBB->back().isBarrier()) {
report("MBB exits via conditional branch/fall-through but ends with a "
"barrier instruction!", MBB);
} else if (!MBB->back().isTerminator()) {
report("MBB exits via conditional branch/fall-through but the branch "
"isn't a terminator instruction!", MBB);
}
} else if (TBB && FBB) {
// Block conditionally branches somewhere, otherwise branches
// somewhere else.
if (MBB->succ_size() == 1) {
// A conditional branch with only one successor is weird, but allowed.
if (FBB != TBB)
report("MBB exits via conditional branch/branch through but only has "
"one CFG successor!", MBB);
else if (TBB != *MBB->succ_begin())
report("MBB exits via conditional branch/branch through but the CFG "
"successor don't match the actual successor!", MBB);
} else if (MBB->succ_size() != 2) {
report("MBB exits via conditional branch/branch but doesn't have "
"exactly two CFG successors!", MBB);
} else if (!matchPair(MBB->succ_begin(), TBB, FBB)) {
report("MBB exits via conditional branch/branch but the CFG "
"successors don't match the actual successors!", MBB);
}
if (MBB->empty()) {
report("MBB exits via conditional branch/branch but doesn't "
"contain any instructions!", MBB);
} else if (!MBB->back().isBarrier()) {
report("MBB exits via conditional branch/branch but doesn't end with a "
"barrier instruction!", MBB);
} else if (!MBB->back().isTerminator()) {
report("MBB exits via conditional branch/branch but the branch "
"isn't a terminator instruction!", MBB);
}
if (Cond.empty()) {
report("MBB exits via conditinal branch/branch but there's no "
"condition!", MBB);
}
} else {
report("AnalyzeBranch returned invalid data!", MBB);
}
}
regsLive.clear();
for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I) {
if (!TargetRegisterInfo::isPhysicalRegister(*I)) {
report("MBB live-in list contains non-physical register", MBB);
continue;
}
for (MCSubRegIterator SubRegs(*I, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
regsLive.insert(*SubRegs);
}
regsLiveInButUnused = regsLive;
const MachineFrameInfo *MFI = MF->getFrameInfo();
assert(MFI && "Function has no frame info");
BitVector PR = MFI->getPristineRegs(*MF);
for (int I = PR.find_first(); I>0; I = PR.find_next(I)) {
for (MCSubRegIterator SubRegs(I, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
regsLive.insert(*SubRegs);
}
regsKilled.clear();
regsDefined.clear();
if (Indexes)
lastIndex = Indexes->getMBBStartIdx(MBB);
}
// This function gets called for all bundle headers, including normal
// stand-alone unbundled instructions.
void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
if (Indexes && Indexes->hasIndex(MI)) {
SlotIndex idx = Indexes->getInstructionIndex(MI);
if (!(idx > lastIndex)) {
report("Instruction index out of order", MI);
errs() << "Last instruction was at " << lastIndex << '\n';
}
lastIndex = idx;
}
// Ensure non-terminators don't follow terminators.
// Ignore predicated terminators formed by if conversion.
// FIXME: If conversion shouldn't need to violate this rule.
if (MI->isTerminator() && !TII->isPredicated(MI)) {
if (!FirstTerminator)
FirstTerminator = MI;
} else if (FirstTerminator) {
report("Non-terminator instruction after the first terminator", MI);
errs() << "First terminator was:\t" << *FirstTerminator;
}
}
// The operands on an INLINEASM instruction must follow a template.
// Verify that the flag operands make sense.
void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
// The first two operands on INLINEASM are the asm string and global flags.
if (MI->getNumOperands() < 2) {
report("Too few operands on inline asm", MI);
return;
}
if (!MI->getOperand(0).isSymbol())
report("Asm string must be an external symbol", MI);
if (!MI->getOperand(1).isImm())
report("Asm flags must be an immediate", MI);
// Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
// Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16.
if (!isUInt<5>(MI->getOperand(1).getImm()))
report("Unknown asm flags", &MI->getOperand(1), 1);
static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
unsigned OpNo = InlineAsm::MIOp_FirstOperand;
unsigned NumOps;
for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
const MachineOperand &MO = MI->getOperand(OpNo);
// There may be implicit ops after the fixed operands.
if (!MO.isImm())
break;
NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
}
if (OpNo > MI->getNumOperands())
report("Missing operands in last group", MI);
// An optional MDNode follows the groups.
if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
++OpNo;
// All trailing operands must be implicit registers.
for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
const MachineOperand &MO = MI->getOperand(OpNo);
if (!MO.isReg() || !MO.isImplicit())
report("Expected implicit register after groups", &MO, OpNo);
}
}
void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
const MCInstrDesc &MCID = MI->getDesc();
if (MI->getNumOperands() < MCID.getNumOperands()) {
report("Too few operands", MI);
errs() << MCID.getNumOperands() << " operands expected, but "
<< MI->getNumOperands() << " given.\n";
}
// Check the tied operands.
if (MI->isInlineAsm())
verifyInlineAsm(MI);
// Check the MachineMemOperands for basic consistency.
for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
E = MI->memoperands_end(); I != E; ++I) {
if ((*I)->isLoad() && !MI->mayLoad())
report("Missing mayLoad flag", MI);
if ((*I)->isStore() && !MI->mayStore())
report("Missing mayStore flag", MI);
}
// Debug values must not have a slot index.
// Other instructions must have one, unless they are inside a bundle.
if (LiveInts) {
bool mapped = !LiveInts->isNotInMIMap(MI);
if (MI->isDebugValue()) {
if (mapped)
report("Debug instruction has a slot index", MI);
} else if (MI->isInsideBundle()) {
if (mapped)
report("Instruction inside bundle has a slot index", MI);
} else {
if (!mapped)
report("Missing slot index", MI);
}
}
StringRef ErrorInfo;
if (!TII->verifyInstruction(MI, ErrorInfo))
report(ErrorInfo.data(), MI);
}
void
MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
const MachineInstr *MI = MO->getParent();
const MCInstrDesc &MCID = MI->getDesc();
// The first MCID.NumDefs operands must be explicit register defines
if (MONum < MCID.getNumDefs()) {
const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
if (!MO->isReg())
report("Explicit definition must be a register", MO, MONum);
else if (!MO->isDef() && !MCOI.isOptionalDef())
report("Explicit definition marked as use", MO, MONum);
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < MCID.getNumOperands()) {
const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end.
if (MO->isReg() &&
!(MI->isVariadic() && MONum == MCID.getNumOperands()-1)) {
if (MO->isDef() && !MCOI.isOptionalDef())
report("Explicit operand marked as def", MO, MONum);
if (MO->isImplicit())
report("Explicit operand marked as implicit", MO, MONum);
}
int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
if (TiedTo != -1) {
if (!MO->isReg())
report("Tied use must be a register", MO, MONum);
else if (!MO->isTied())
report("Operand should be tied", MO, MONum);
else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
report("Tied def doesn't match MCInstrDesc", MO, MONum);
} else if (MO->isReg() && MO->isTied())
report("Explicit operand should not be tied", MO, MONum);
} else {
// ARM adds %reg0 operands to indicate predicates. We'll allow that.
if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
report("Extra explicit operand on non-variadic instruction", MO, MONum);
}
switch (MO->getType()) {
case MachineOperand::MO_Register: {
const unsigned Reg = MO->getReg();
if (!Reg)
return;
if (MRI->tracksLiveness() && !MI->isDebugValue())
checkLiveness(MO, MONum);
// Verify the consistency of tied operands.
if (MO->isTied()) {
unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
if (!OtherMO.isReg())
report("Must be tied to a register", MO, MONum);
if (!OtherMO.isTied())
report("Missing tie flags on tied operand", MO, MONum);
if (MI->findTiedOperandIdx(OtherIdx) != MONum)
report("Inconsistent tie links", MO, MONum);
if (MONum < MCID.getNumDefs()) {
if (OtherIdx < MCID.getNumOperands()) {
if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
report("Explicit def tied to explicit use without tie constraint",
MO, MONum);
} else {
if (!OtherMO.isImplicit())
report("Explicit def should be tied to implicit use", MO, MONum);
}
}
}
// Verify two-address constraints after leaving SSA form.
unsigned DefIdx;
if (!MRI->isSSA() && MO->isUse() &&
MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
Reg != MI->getOperand(DefIdx).getReg())
report("Two-address instruction operands must be identical", MO, MONum);
// Check register classes.
if (MONum < MCID.getNumOperands() && !MO->isImplicit()) {
unsigned SubIdx = MO->getSubReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (SubIdx) {
report("Illegal subregister index for physical register", MO, MONum);
return;
}
if (const TargetRegisterClass *DRC =
TII->getRegClass(MCID, MONum, TRI, *MF)) {
if (!DRC->contains(Reg)) {
report("Illegal physical register for instruction", MO, MONum);
errs() << TRI->getName(Reg) << " is not a "
<< TRI->getRegClassName(DRC) << " register.\n";
}
}
} else {
// Virtual register.
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
if (SubIdx) {
const TargetRegisterClass *SRC =
TRI->getSubClassWithSubReg(RC, SubIdx);
if (!SRC) {
report("Invalid subregister index for virtual register", MO, MONum);
errs() << "Register class " << TRI->getRegClassName(RC)
<< " does not support subreg index " << SubIdx << "\n";
return;
}
if (RC != SRC) {
report("Invalid register class for subregister index", MO, MONum);
errs() << "Register class " << TRI->getRegClassName(RC)
<< " does not fully support subreg index " << SubIdx << "\n";
return;
}
}
if (const TargetRegisterClass *DRC =
TII->getRegClass(MCID, MONum, TRI, *MF)) {
if (SubIdx) {
const TargetRegisterClass *SuperRC =
TRI->getLargestLegalSuperClass(RC, *MF);
if (!SuperRC) {
report("No largest legal super class exists.", MO, MONum);
return;
}
DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
if (!DRC) {
report("No matching super-reg register class.", MO, MONum);
return;
}
}
if (!RC->hasSuperClassEq(DRC)) {
report("Illegal virtual register for instruction", MO, MONum);
errs() << "Expected a " << TRI->getRegClassName(DRC)
<< " register, but got a " << TRI->getRegClassName(RC)
<< " register\n";
}
}
}
}
break;
}
case MachineOperand::MO_RegisterMask:
regMasks.push_back(MO->getRegMask());
break;
case MachineOperand::MO_MachineBasicBlock:
if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
report("PHI operand is not in the CFG", MO, MONum);
break;
case MachineOperand::MO_FrameIndex:
if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
LiveInts && !LiveInts->isNotInMIMap(MI)) {
LiveInterval &LI = LiveStks->getInterval(MO->getIndex());
SlotIndex Idx = LiveInts->getInstructionIndex(MI);
if (MI->mayLoad() && !LI.liveAt(Idx.getRegSlot(true))) {
report("Instruction loads from dead spill slot", MO, MONum);
errs() << "Live stack: " << LI << '\n';
}
if (MI->mayStore() && !LI.liveAt(Idx.getRegSlot())) {
report("Instruction stores to dead spill slot", MO, MONum);
errs() << "Live stack: " << LI << '\n';
}
}
break;
default:
break;
}
}
void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
const MachineInstr *MI = MO->getParent();
const unsigned Reg = MO->getReg();
// Both use and def operands can read a register.
if (MO->readsReg()) {
regsLiveInButUnused.erase(Reg);
if (MO->isKill())
addRegWithSubRegs(regsKilled, Reg);
// Check that LiveVars knows this kill.
if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
MO->isKill()) {
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
if (std::find(VI.Kills.begin(), VI.Kills.end(), MI) == VI.Kills.end())
report("Kill missing from LiveVariables", MO, MONum);
}
// Check LiveInts liveness and kill.
if (LiveInts && !LiveInts->isNotInMIMap(MI)) {
SlotIndex UseIdx = LiveInts->getInstructionIndex(MI);
// Check the cached regunit intervals.
if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isReserved(Reg)) {
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units)) {
LiveQueryResult LRQ = LR->Query(UseIdx);
if (!LRQ.valueIn()) {
report("No live segment at use", MO, MONum);
errs() << UseIdx << " is not live in " << PrintRegUnit(*Units, TRI)
<< ' ' << *LR << '\n';
}
if (MO->isKill() && !LRQ.isKill()) {
report("Live range continues after kill flag", MO, MONum);
errs() << PrintRegUnit(*Units, TRI) << ' ' << *LR << '\n';
}
}
}
}
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (LiveInts->hasInterval(Reg)) {
// This is a virtual register interval.
const LiveInterval &LI = LiveInts->getInterval(Reg);
LiveQueryResult LRQ = LI.Query(UseIdx);
if (!LRQ.valueIn()) {
report("No live segment at use", MO, MONum);
errs() << UseIdx << " is not live in " << LI << '\n';
}
// Check for extra kill flags.
// Note that we allow missing kill flags for now.
if (MO->isKill() && !LRQ.isKill()) {
report("Live range continues after kill flag", MO, MONum);
errs() << "Live range: " << LI << '\n';
}
} else {
report("Virtual register has no live interval", MO, MONum);
}
}
}
// Use of a dead register.
if (!regsLive.count(Reg)) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
// Reserved registers may be used even when 'dead'.
bool Bad = !isReserved(Reg);
// We are fine if just any subregister has a defined value.
if (Bad) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid();
++SubRegs) {
if (regsLive.count(*SubRegs)) {
Bad = false;
break;
}
}
}
// If there is an additional implicit-use of a super register we stop
// here. By definition we are fine if the super register is not
// (completely) dead, if the complete super register is dead we will
// get a report for its operand.
if (Bad) {
for (const MachineOperand &MOP : MI->uses()) {
if (!MOP.isReg())
continue;
if (!MOP.isImplicit())
continue;
for (MCSubRegIterator SubRegs(MOP.getReg(), TRI); SubRegs.isValid();
++SubRegs) {
if (*SubRegs == Reg) {
Bad = false;
break;
}
}
}
}
if (Bad)
report("Using an undefined physical register", MO, MONum);
} else if (MRI->def_empty(Reg)) {
report("Reading virtual register without a def", MO, MONum);
} else {
BBInfo &MInfo = MBBInfoMap[MI->getParent()];
// We don't know which virtual registers are live in, so only complain
// if vreg was killed in this MBB. Otherwise keep track of vregs that
// must be live in. PHI instructions are handled separately.
if (MInfo.regsKilled.count(Reg))
report("Using a killed virtual register", MO, MONum);
else if (!MI->isPHI())
MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
}
}
}
if (MO->isDef()) {
// Register defined.
// TODO: verify that earlyclobber ops are not used.
if (MO->isDead())
addRegWithSubRegs(regsDead, Reg);
else
addRegWithSubRegs(regsDefined, Reg);
// Verify SSA form.
if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) &&
std::next(MRI->def_begin(Reg)) != MRI->def_end())
report("Multiple virtual register defs in SSA form", MO, MONum);
// Check LiveInts for a live segment, but only for virtual registers.
if (LiveInts && TargetRegisterInfo::isVirtualRegister(Reg) &&
!LiveInts->isNotInMIMap(MI)) {
SlotIndex DefIdx = LiveInts->getInstructionIndex(MI);
DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
if (LiveInts->hasInterval(Reg)) {
const LiveInterval &LI = LiveInts->getInterval(Reg);
if (const VNInfo *VNI = LI.getVNInfoAt(DefIdx)) {
assert(VNI && "NULL valno is not allowed");
if (VNI->def != DefIdx) {
report("Inconsistent valno->def", MO, MONum);
errs() << "Valno " << VNI->id << " is not defined at "
<< DefIdx << " in " << LI << '\n';
}
} else {
report("No live segment at def", MO, MONum);
errs() << DefIdx << " is not live in " << LI << '\n';
}
// Check that, if the dead def flag is present, LiveInts agree.
if (MO->isDead()) {
LiveQueryResult LRQ = LI.Query(DefIdx);
if (!LRQ.isDeadDef()) {
report("Live range continues after dead def flag", MO, MONum);
errs() << "Live range: " << LI << '\n';
}
}
} else {
report("Virtual register has no Live interval", MO, MONum);
}
}
}
}
void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {
}
// This function gets called after visiting all instructions in a bundle. The
// argument points to the bundle header.
// Normal stand-alone instructions are also considered 'bundles', and this
// function is called for all of them.
void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
BBInfo &MInfo = MBBInfoMap[MI->getParent()];
set_union(MInfo.regsKilled, regsKilled);
set_subtract(regsLive, regsKilled); regsKilled.clear();
// Kill any masked registers.
while (!regMasks.empty()) {
const uint32_t *Mask = regMasks.pop_back_val();
for (RegSet::iterator I = regsLive.begin(), E = regsLive.end(); I != E; ++I)
if (TargetRegisterInfo::isPhysicalRegister(*I) &&
MachineOperand::clobbersPhysReg(Mask, *I))
regsDead.push_back(*I);
}
set_subtract(regsLive, regsDead); regsDead.clear();
set_union(regsLive, regsDefined); regsDefined.clear();
}
void
MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
MBBInfoMap[MBB].regsLiveOut = regsLive;
regsLive.clear();
if (Indexes) {
SlotIndex stop = Indexes->getMBBEndIdx(MBB);
if (!(stop > lastIndex)) {
report("Block ends before last instruction index", MBB);
errs() << "Block ends at " << stop
<< " last instruction was at " << lastIndex << '\n';
}
lastIndex = stop;
}
}
// Calculate the largest possible vregsPassed sets. These are the registers that
// can pass through an MBB live, but may not be live every time. It is assumed
// that all vregsPassed sets are empty before the call.
void MachineVerifier::calcRegsPassed() {
// First push live-out regs to successors' vregsPassed. Remember the MBBs that
// have any vregsPassed.
SmallPtrSet<const MachineBasicBlock*, 8> todo;
for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
if (!MInfo.reachable)
continue;
for (MachineBasicBlock::const_succ_iterator SuI = MBB.succ_begin(),
SuE = MBB.succ_end(); SuI != SuE; ++SuI) {
BBInfo &SInfo = MBBInfoMap[*SuI];
if (SInfo.addPassed(MInfo.regsLiveOut))
todo.insert(*SuI);
}
}
// Iteratively push vregsPassed to successors. This will converge to the same
// final state regardless of DenseSet iteration order.
while (!todo.empty()) {
const MachineBasicBlock *MBB = *todo.begin();
todo.erase(MBB);
BBInfo &MInfo = MBBInfoMap[MBB];
for (MachineBasicBlock::const_succ_iterator SuI = MBB->succ_begin(),
SuE = MBB->succ_end(); SuI != SuE; ++SuI) {
if (*SuI == MBB)
continue;
BBInfo &SInfo = MBBInfoMap[*SuI];
if (SInfo.addPassed(MInfo.vregsPassed))
todo.insert(*SuI);
}
}
}
// Calculate the set of virtual registers that must be passed through each basic
// block in order to satisfy the requirements of successor blocks. This is very
// similar to calcRegsPassed, only backwards.
void MachineVerifier::calcRegsRequired() {
// First push live-in regs to predecessors' vregsRequired.
SmallPtrSet<const MachineBasicBlock*, 8> todo;
for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
for (MachineBasicBlock::const_pred_iterator PrI = MBB.pred_begin(),
PrE = MBB.pred_end(); PrI != PrE; ++PrI) {
BBInfo &PInfo = MBBInfoMap[*PrI];
if (PInfo.addRequired(MInfo.vregsLiveIn))
todo.insert(*PrI);
}
}
// Iteratively push vregsRequired to predecessors. This will converge to the
// same final state regardless of DenseSet iteration order.
while (!todo.empty()) {
const MachineBasicBlock *MBB = *todo.begin();
todo.erase(MBB);
BBInfo &MInfo = MBBInfoMap[MBB];
for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(),
PrE = MBB->pred_end(); PrI != PrE; ++PrI) {
if (*PrI == MBB)
continue;
BBInfo &SInfo = MBBInfoMap[*PrI];
if (SInfo.addRequired(MInfo.vregsRequired))
todo.insert(*PrI);
}
}
}
// Check PHI instructions at the beginning of MBB. It is assumed that
// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) {
SmallPtrSet<const MachineBasicBlock*, 8> seen;
for (const auto &BBI : *MBB) {
if (!BBI.isPHI())
break;
seen.clear();
for (unsigned i = 1, e = BBI.getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI.getOperand(i).getReg();
const MachineBasicBlock *Pre = BBI.getOperand(i + 1).getMBB();
if (!Pre->isSuccessor(MBB))
continue;
seen.insert(Pre);
BBInfo &PrInfo = MBBInfoMap[Pre];
if (PrInfo.reachable && !PrInfo.isLiveOut(Reg))
report("PHI operand is not live-out from predecessor",
&BBI.getOperand(i), i);
}
// Did we see all predecessors?
for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(),
PrE = MBB->pred_end(); PrI != PrE; ++PrI) {
if (!seen.count(*PrI)) {
report("Missing PHI operand", &BBI);
errs() << "BB#" << (*PrI)->getNumber()
<< " is a predecessor according to the CFG.\n";
}
}
}
}
void MachineVerifier::visitMachineFunctionAfter() {
calcRegsPassed();
for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
// Skip unreachable MBBs.
if (!MInfo.reachable)
continue;
checkPHIOps(&MBB);
}
// Now check liveness info if available
calcRegsRequired();
// Check for killed virtual registers that should be live out.
for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
for (RegSet::iterator
I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
++I)
if (MInfo.regsKilled.count(*I)) {
report("Virtual register killed in block, but needed live out.", &MBB);
errs() << "Virtual register " << PrintReg(*I)
<< " is used after the block.\n";
}
}
if (!MF->empty()) {
BBInfo &MInfo = MBBInfoMap[&MF->front()];
for (RegSet::iterator
I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
++I)
report("Virtual register def doesn't dominate all uses.",
MRI->getVRegDef(*I));
}
if (LiveVars)
verifyLiveVariables();
if (LiveInts)
verifyLiveIntervals();
}
void MachineVerifier::verifyLiveVariables() {
assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
// Our vregsRequired should be identical to LiveVariables' AliveBlocks
if (MInfo.vregsRequired.count(Reg)) {
if (!VI.AliveBlocks.test(MBB.getNumber())) {
report("LiveVariables: Block missing from AliveBlocks", &MBB);
errs() << "Virtual register " << PrintReg(Reg)
<< " must be live through the block.\n";
}
} else {
if (VI.AliveBlocks.test(MBB.getNumber())) {
report("LiveVariables: Block should not be in AliveBlocks", &MBB);
errs() << "Virtual register " << PrintReg(Reg)
<< " is not needed live through the block.\n";
}
}
}
}
}
void MachineVerifier::verifyLiveIntervals() {
assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
// Spilling and splitting may leave unused registers around. Skip them.
if (MRI->reg_nodbg_empty(Reg))
continue;
if (!LiveInts->hasInterval(Reg)) {
report("Missing live interval for virtual register", MF);
errs() << PrintReg(Reg, TRI) << " still has defs or uses\n";
continue;
}
const LiveInterval &LI = LiveInts->getInterval(Reg);
assert(Reg == LI.reg && "Invalid reg to interval mapping");
verifyLiveInterval(LI);
}
// Verify all the cached regunit intervals.
for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
verifyLiveRange(*LR, i);
}
void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
const VNInfo *VNI, unsigned Reg,
unsigned LaneMask) {
if (VNI->isUnused())
return;
const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
if (!DefVNI) {
report("Valno not live at def and not marked unused", MF, LR, Reg,
LaneMask);
errs() << "Valno #" << VNI->id << '\n';
return;
}
if (DefVNI != VNI) {
report("Live segment at def has different valno", MF, LR, Reg, LaneMask);
errs() << "Valno #" << VNI->id << " is defined at " << VNI->def
<< " where valno #" << DefVNI->id << " is live\n";
return;
}
const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
if (!MBB) {
report("Invalid definition index", MF, LR, Reg, LaneMask);
errs() << "Valno #" << VNI->id << " is defined at " << VNI->def
<< " in " << LR << '\n';
return;
}
if (VNI->isPHIDef()) {
if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
report("PHIDef value is not defined at MBB start", MBB, LR, Reg,
LaneMask);
errs() << "Valno #" << VNI->id << " is defined at " << VNI->def
<< ", not at the beginning of BB#" << MBB->getNumber() << '\n';
}
return;
}
// Non-PHI def.
const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
if (!MI) {
report("No instruction at def index", MBB, LR, Reg, LaneMask);
errs() << "Valno #" << VNI->id << " is defined at " << VNI->def << '\n';
return;
}
if (Reg != 0) {
bool hasDef = false;
bool isEarlyClobber = false;
for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
if (!MOI->isReg() || !MOI->isDef())
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (MOI->getReg() != Reg)
continue;
} else {
if (!TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) ||
!TRI->hasRegUnit(MOI->getReg(), Reg))
continue;
}
if (LaneMask != 0 &&
(TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask) == 0)
continue;
hasDef = true;
if (MOI->isEarlyClobber())
isEarlyClobber = true;
}
if (!hasDef) {
report("Defining instruction does not modify register", MI);
errs() << "Valno #" << VNI->id << " in " << LR << '\n';
}
// Early clobber defs begin at USE slots, but other defs must begin at
// DEF slots.
if (isEarlyClobber) {
if (!VNI->def.isEarlyClobber()) {
report("Early clobber def must be at an early-clobber slot", MBB, LR,
Reg, LaneMask);
errs() << "Valno #" << VNI->id << " is defined at " << VNI->def << '\n';
}
} else if (!VNI->def.isRegister()) {
report("Non-PHI, non-early clobber def must be at a register slot",
MBB, LR, Reg, LaneMask);
errs() << "Valno #" << VNI->id << " is defined at " << VNI->def << '\n';
}
}
}
void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
const LiveRange::const_iterator I,
unsigned Reg, unsigned LaneMask) {
const LiveRange::Segment &S = *I;
const VNInfo *VNI = S.valno;
assert(VNI && "Live segment has no valno");
if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
report("Foreign valno in live segment", MF, LR, Reg, LaneMask);
errs() << S << " has a bad valno\n";
}
if (VNI->isUnused()) {
report("Live segment valno is marked unused", MF, LR, Reg, LaneMask);
errs() << S << '\n';
}
const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
if (!MBB) {
report("Bad start of live segment, no basic block", MF, LR, Reg, LaneMask);
errs() << S << '\n';
return;
}
SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
if (S.start != MBBStartIdx && S.start != VNI->def) {
report("Live segment must begin at MBB entry or valno def", MBB, LR, Reg,
LaneMask);
errs() << S << '\n';
}
const MachineBasicBlock *EndMBB =
LiveInts->getMBBFromIndex(S.end.getPrevSlot());
if (!EndMBB) {
report("Bad end of live segment, no basic block", MF, LR, Reg, LaneMask);
errs() << S << '\n';
return;
}
// No more checks for live-out segments.
if (S.end == LiveInts->getMBBEndIdx(EndMBB))
return;
// RegUnit intervals are allowed dead phis.
if (!TargetRegisterInfo::isVirtualRegister(Reg) && VNI->isPHIDef() &&
S.start == VNI->def && S.end == VNI->def.getDeadSlot())
return;
// The live segment is ending inside EndMBB
const MachineInstr *MI =
LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
if (!MI) {
report("Live segment doesn't end at a valid instruction", EndMBB, LR, Reg,
LaneMask);
errs() << S << '\n';
return;
}
// The block slot must refer to a basic block boundary.
if (S.end.isBlock()) {
report("Live segment ends at B slot of an instruction", EndMBB, LR, Reg,
LaneMask);
errs() << S << '\n';
}
if (S.end.isDead()) {
// Segment ends on the dead slot.
// That means there must be a dead def.
if (!SlotIndex::isSameInstr(S.start, S.end)) {
report("Live segment ending at dead slot spans instructions", EndMBB, LR,
Reg, LaneMask);
errs() << S << '\n';
}
}
// A live segment can only end at an early-clobber slot if it is being
// redefined by an early-clobber def.
if (S.end.isEarlyClobber()) {
if (I+1 == LR.end() || (I+1)->start != S.end) {
report("Live segment ending at early clobber slot must be "
"redefined by an EC def in the same instruction", EndMBB, LR, Reg,
LaneMask);
errs() << S << '\n';
}
}
// The following checks only apply to virtual registers. Physreg liveness
// is too weird to check.
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
// A live segment can end with either a redefinition, a kill flag on a
// use, or a dead flag on a def.
bool hasRead = false;
bool hasSubRegDef = false;
for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
if (!MOI->isReg() || MOI->getReg() != Reg)
continue;
if (LaneMask != 0 &&
(LaneMask & TRI->getSubRegIndexLaneMask(MOI->getSubReg())) == 0)
continue;
if (MOI->isDef() && MOI->getSubReg() != 0)
hasSubRegDef = true;
if (MOI->readsReg())
hasRead = true;
}
if (!S.end.isDead()) {
if (!hasRead) {
// When tracking subregister liveness, the main range must start new
// values on partial register writes, even if there is no read.
if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask != 0 ||
!hasSubRegDef) {
report("Instruction ending live segment doesn't read the register",
MI);
errs() << S << " in " << LR << '\n';
}
}
}
}
// Now check all the basic blocks in this live segment.
MachineFunction::const_iterator MFI = MBB;
// Is this live segment the beginning of a non-PHIDef VN?
if (S.start == VNI->def && !VNI->isPHIDef()) {
// Not live-in to any blocks.
if (MBB == EndMBB)
return;
// Skip this block.
++MFI;
}
for (;;) {
assert(LiveInts->isLiveInToMBB(LR, MFI));
// We don't know how to track physregs into a landing pad.
if (!TargetRegisterInfo::isVirtualRegister(Reg) &&
MFI->isLandingPad()) {
if (&*MFI == EndMBB)
break;
++MFI;
continue;
}
// Is VNI a PHI-def in the current block?
bool IsPHI = VNI->isPHIDef() &&
VNI->def == LiveInts->getMBBStartIdx(MFI);
// Check that VNI is live-out of all predecessors.
for (MachineBasicBlock::const_pred_iterator PI = MFI->pred_begin(),
PE = MFI->pred_end(); PI != PE; ++PI) {
SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI);
const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
// All predecessors must have a live-out value.
if (!PVNI) {
report("Register not marked live out of predecessor", *PI, LR, Reg,
LaneMask);
errs() << "Valno #" << VNI->id << " live into BB#" << MFI->getNumber()
<< '@' << LiveInts->getMBBStartIdx(MFI) << ", not live before "
<< PEnd << '\n';
continue;
}
// Only PHI-defs can take different predecessor values.
if (!IsPHI && PVNI != VNI) {
report("Different value live out of predecessor", *PI, LR, Reg,
LaneMask);
errs() << "Valno #" << PVNI->id << " live out of BB#"
<< (*PI)->getNumber() << '@' << PEnd
<< "\nValno #" << VNI->id << " live into BB#" << MFI->getNumber()
<< '@' << LiveInts->getMBBStartIdx(MFI) << '\n';
}
}
if (&*MFI == EndMBB)
break;
++MFI;
}
}
void MachineVerifier::verifyLiveRange(const LiveRange &LR, unsigned Reg,
unsigned LaneMask) {
for (const VNInfo *VNI : LR.valnos)
verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
verifyLiveRangeSegment(LR, I, Reg, LaneMask);
}
void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
unsigned Reg = LI.reg;
assert(TargetRegisterInfo::isVirtualRegister(Reg));
verifyLiveRange(LI, Reg);
unsigned Mask = 0;
unsigned MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
for (const LiveInterval::SubRange &SR : LI.subranges()) {
if ((Mask & SR.LaneMask) != 0)
report("Lane masks of sub ranges overlap in live interval", MF, LI);
if ((SR.LaneMask & ~MaxMask) != 0)
report("Subrange lanemask is invalid", MF, LI);
Mask |= SR.LaneMask;
verifyLiveRange(SR, LI.reg, SR.LaneMask);
if (!LI.covers(SR))
report("A Subrange is not covered by the main range", MF, LI);
}
// Check the LI only has one connected component.
ConnectedVNInfoEqClasses ConEQ(*LiveInts);
unsigned NumComp = ConEQ.Classify(&LI);
if (NumComp > 1) {
report("Multiple connected components in live interval", MF, LI);
for (unsigned comp = 0; comp != NumComp; ++comp) {
errs() << comp << ": valnos";
for (LiveInterval::const_vni_iterator I = LI.vni_begin(),
E = LI.vni_end(); I!=E; ++I)
if (comp == ConEQ.getEqClass(*I))
errs() << ' ' << (*I)->id;
errs() << '\n';
}
}
}
namespace {
// FrameSetup and FrameDestroy can have zero adjustment, so using a single
// integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
// value is zero.
// We use a bool plus an integer to capture the stack state.
struct StackStateOfBB {
StackStateOfBB() : EntryValue(0), ExitValue(0), EntryIsSetup(false),
ExitIsSetup(false) { }
StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
ExitIsSetup(ExitSetup) { }
// Can be negative, which means we are setting up a frame.
int EntryValue;
int ExitValue;
bool EntryIsSetup;
bool ExitIsSetup;
};
}
/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
/// by a FrameDestroy <n>, stack adjustments are identical on all
/// CFG edges to a merge point, and frame is destroyed at end of a return block.
void MachineVerifier::verifyStackFrame() {
unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
SmallVector<StackStateOfBB, 8> SPState;
SPState.resize(MF->getNumBlockIDs());
SmallPtrSet<const MachineBasicBlock*, 8> Reachable;
// Visit the MBBs in DFS order.
for (df_ext_iterator<const MachineFunction*,
SmallPtrSet<const MachineBasicBlock*, 8> >
DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
DFI != DFE; ++DFI) {
const MachineBasicBlock *MBB = *DFI;
StackStateOfBB BBState;
// Check the exit state of the DFS stack predecessor.
if (DFI.getPathLength() >= 2) {
const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
assert(Reachable.count(StackPred) &&
"DFS stack predecessor is already visited.\n");
BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
BBState.ExitValue = BBState.EntryValue;
BBState.ExitIsSetup = BBState.EntryIsSetup;
}
// Update stack state by checking contents of MBB.
for (const auto &I : *MBB) {
if (I.getOpcode() == FrameSetupOpcode) {
// The first operand of a FrameOpcode should be i32.
int Size = I.getOperand(0).getImm();
assert(Size >= 0 &&
"Value should be non-negative in FrameSetup and FrameDestroy.\n");
if (BBState.ExitIsSetup)
report("FrameSetup is after another FrameSetup", &I);
BBState.ExitValue -= Size;
BBState.ExitIsSetup = true;
}
if (I.getOpcode() == FrameDestroyOpcode) {
// The first operand of a FrameOpcode should be i32.
int Size = I.getOperand(0).getImm();
assert(Size >= 0 &&
"Value should be non-negative in FrameSetup and FrameDestroy.\n");
if (!BBState.ExitIsSetup)
report("FrameDestroy is not after a FrameSetup", &I);
int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
BBState.ExitValue;
if (BBState.ExitIsSetup && AbsSPAdj != Size) {
report("FrameDestroy <n> is after FrameSetup <m>", &I);
errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
<< AbsSPAdj << ">.\n";
}
BBState.ExitValue += Size;
BBState.ExitIsSetup = false;
}
}
SPState[MBB->getNumber()] = BBState;
// Make sure the exit state of any predecessor is consistent with the entry
// state.
for (MachineBasicBlock::const_pred_iterator I = MBB->pred_begin(),
E = MBB->pred_end(); I != E; ++I) {
if (Reachable.count(*I) &&
(SPState[(*I)->getNumber()].ExitValue != BBState.EntryValue ||
SPState[(*I)->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
report("The exit stack state of a predecessor is inconsistent.", MBB);
errs() << "Predecessor BB#" << (*I)->getNumber() << " has exit state ("
<< SPState[(*I)->getNumber()].ExitValue << ", "
<< SPState[(*I)->getNumber()].ExitIsSetup
<< "), while BB#" << MBB->getNumber() << " has entry state ("
<< BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
}
}
// Make sure the entry state of any successor is consistent with the exit
// state.
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
if (Reachable.count(*I) &&
(SPState[(*I)->getNumber()].EntryValue != BBState.ExitValue ||
SPState[(*I)->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
report("The entry stack state of a successor is inconsistent.", MBB);
errs() << "Successor BB#" << (*I)->getNumber() << " has entry state ("
<< SPState[(*I)->getNumber()].EntryValue << ", "
<< SPState[(*I)->getNumber()].EntryIsSetup
<< "), while BB#" << MBB->getNumber() << " has exit state ("
<< BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
}
}
// Make sure a basic block with return ends with zero stack adjustment.
if (!MBB->empty() && MBB->back().isReturn()) {
if (BBState.ExitIsSetup)
report("A return block ends with a FrameSetup.", MBB);
if (BBState.ExitValue)
report("A return block ends with a nonzero stack adjustment.", MBB);
}
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LLVMTargetMachine.cpp | //===-- LLVMTargetMachine.cpp - Implement the LLVMTargetMachine class -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LLVMTargetMachine class.
//
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetMachine.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Scalar.h"
using namespace llvm;
// Enable or disable FastISel. Both options are needed, because
// FastISel is enabled by default with -fast, and we wish to be
// able to enable or disable fast-isel independently from -O0.
static cl::opt<cl::boolOrDefault>
EnableFastISelOption("fast-isel", cl::Hidden,
cl::desc("Enable the \"fast\" instruction selector"));
void LLVMTargetMachine::initAsmInfo() {
MRI = TheTarget.createMCRegInfo(getTargetTriple().str());
MII = TheTarget.createMCInstrInfo();
// FIXME: Having an MCSubtargetInfo on the target machine is a hack due
// to some backends having subtarget feature dependent module level
// code generation. This is similar to the hack in the AsmPrinter for
// module level assembly etc.
STI = TheTarget.createMCSubtargetInfo(getTargetTriple().str(), getTargetCPU(),
getTargetFeatureString());
MCAsmInfo *TmpAsmInfo =
TheTarget.createMCAsmInfo(*MRI, getTargetTriple().str());
// TargetSelect.h moved to a different directory between LLVM 2.9 and 3.0,
// and if the old one gets included then MCAsmInfo will be NULL and
// we'll crash later.
// Provide the user with a useful error message about what's wrong.
assert(TmpAsmInfo && "MCAsmInfo not initialized. "
"Make sure you include the correct TargetSelect.h"
"and that InitializeAllTargetMCs() is being invoked!");
if (Options.DisableIntegratedAS)
TmpAsmInfo->setUseIntegratedAssembler(false);
if (Options.CompressDebugSections)
TmpAsmInfo->setCompressDebugSections(true);
AsmInfo = TmpAsmInfo;
}
LLVMTargetMachine::LLVMTargetMachine(const Target &T,
StringRef DataLayoutString,
const Triple &TT, StringRef CPU,
StringRef FS, TargetOptions Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
: TargetMachine(T, DataLayoutString, TT, CPU, FS, Options) {
CodeGenInfo = T.createMCCodeGenInfo(TT.str(), RM, CM, OL);
}
TargetIRAnalysis LLVMTargetMachine::getTargetIRAnalysis() {
return TargetIRAnalysis([this](Function &F) {
return TargetTransformInfo(BasicTTIImpl(this, F));
});
}
/// addPassesToX helper drives creation and initialization of TargetPassConfig.
static MCContext *
addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM,
bool DisableVerify, AnalysisID StartBefore,
AnalysisID StartAfter, AnalysisID StopAfter,
MachineFunctionInitializer *MFInitializer = nullptr) {
// Add internal analysis passes from the target machine.
PM.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis()));
// Targets may override createPassConfig to provide a target-specific
// subclass.
TargetPassConfig *PassConfig = TM->createPassConfig(PM);
PassConfig->setStartStopPasses(StartBefore, StartAfter, StopAfter);
// Set PassConfig options provided by TargetMachine.
PassConfig->setDisableVerify(DisableVerify);
PM.add(PassConfig);
PassConfig->addIRPasses();
PassConfig->addCodeGenPrepare();
PassConfig->addPassesToHandleExceptions();
PassConfig->addISelPrepare();
// Install a MachineModuleInfo class, which is an immutable pass that holds
// all the per-module stuff we're generating, including MCContext.
MachineModuleInfo *MMI = new MachineModuleInfo(
*TM->getMCAsmInfo(), *TM->getMCRegisterInfo(), TM->getObjFileLowering());
PM.add(MMI);
// Set up a MachineFunction for the rest of CodeGen to work on.
PM.add(new MachineFunctionAnalysis(*TM, MFInitializer));
// Enable FastISel with -fast, but allow that to be overridden.
if (EnableFastISelOption == cl::BOU_TRUE ||
(TM->getOptLevel() == CodeGenOpt::None &&
EnableFastISelOption != cl::BOU_FALSE))
TM->setFastISel(true);
// Ask the target for an isel.
if (PassConfig->addInstSelector())
return nullptr;
PassConfig->addMachinePasses();
PassConfig->setInitialized();
return &MMI->getContext();
}
bool LLVMTargetMachine::addPassesToEmitFile(
PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
bool DisableVerify, AnalysisID StartBefore, AnalysisID StartAfter,
AnalysisID StopAfter, MachineFunctionInitializer *MFInitializer) {
// Add common CodeGen passes.
MCContext *Context =
addPassesToGenerateCode(this, PM, DisableVerify, StartBefore, StartAfter,
StopAfter, MFInitializer);
if (!Context)
return true;
if (StopAfter) {
PM.add(createPrintMIRPass(outs()));
return false;
}
if (Options.MCOptions.MCSaveTempLabels)
Context->setAllowTemporaryLabels(false);
const MCSubtargetInfo &STI = *getMCSubtargetInfo();
const MCAsmInfo &MAI = *getMCAsmInfo();
const MCRegisterInfo &MRI = *getMCRegisterInfo();
const MCInstrInfo &MII = *getMCInstrInfo();
std::unique_ptr<MCStreamer> AsmStreamer;
switch (FileType) {
case CGFT_AssemblyFile: {
MCInstPrinter *InstPrinter = getTarget().createMCInstPrinter(
getTargetTriple(), MAI.getAssemblerDialect(), MAI, MII, MRI);
// Create a code emitter if asked to show the encoding.
MCCodeEmitter *MCE = nullptr;
if (Options.MCOptions.ShowMCEncoding)
MCE = getTarget().createMCCodeEmitter(MII, MRI, *Context);
MCAsmBackend *MAB =
getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
auto FOut = llvm::make_unique<formatted_raw_ostream>(Out);
MCStreamer *S = getTarget().createAsmStreamer(
*Context, std::move(FOut), Options.MCOptions.AsmVerbose,
Options.MCOptions.MCUseDwarfDirectory, InstPrinter, MCE, MAB,
Options.MCOptions.ShowMCInst);
AsmStreamer.reset(S);
break;
}
case CGFT_ObjectFile: {
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(MII, MRI, *Context);
MCAsmBackend *MAB =
getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
if (!MCE || !MAB)
return true;
// Don't waste memory on names of temp labels.
Context->setUseNamesOnTempLabels(false);
Triple T(getTargetTriple().str());
AsmStreamer.reset(getTarget().createMCObjectStreamer(
T, *Context, *MAB, Out, MCE, STI, Options.MCOptions.MCRelaxAll,
/*DWARFMustBeAtTheEnd*/ true));
break;
}
case CGFT_Null:
// The Null output is intended for use for performance analysis and testing,
// not real users.
AsmStreamer.reset(getTarget().createNullStreamer(*Context));
break;
}
// Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
FunctionPass *Printer =
getTarget().createAsmPrinter(*this, std::move(AsmStreamer));
if (!Printer)
return true;
PM.add(Printer);
return false;
}
/// addPassesToEmitMC - Add passes to the specified pass manager to get
/// machine code emitted with the MCJIT. This method returns true if machine
/// code is not supported. It fills the MCContext Ctx pointer which can be
/// used to build custom MCStreamer.
///
bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
raw_pwrite_stream &Out,
bool DisableVerify) {
// Add common CodeGen passes.
Ctx = addPassesToGenerateCode(this, PM, DisableVerify, nullptr, nullptr,
nullptr);
if (!Ctx)
return true;
if (Options.MCOptions.MCSaveTempLabels)
Ctx->setAllowTemporaryLabels(false);
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
const MCRegisterInfo &MRI = *getMCRegisterInfo();
MCCodeEmitter *MCE =
getTarget().createMCCodeEmitter(*getMCInstrInfo(), MRI, *Ctx);
MCAsmBackend *MAB =
getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
if (!MCE || !MAB)
return true;
const Triple &T = getTargetTriple();
const MCSubtargetInfo &STI = *getMCSubtargetInfo();
std::unique_ptr<MCStreamer> AsmStreamer(getTarget().createMCObjectStreamer(
T, *Ctx, *MAB, Out, MCE, STI, Options.MCOptions.MCRelaxAll,
/*DWARFMustBeAtTheEnd*/ true));
// Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
FunctionPass *Printer =
getTarget().createAsmPrinter(*this, std::move(AsmStreamer));
if (!Printer)
return true;
PM.add(Printer);
return false; // success!
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/UnreachableBlockElim.cpp | //===-- UnreachableBlockElim.cpp - Remove unreachable blocks for codegen --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass is an extremely simple version of the SimplifyCFG pass. Its sole
// job is to delete LLVM basic blocks that are not reachable from the entry
// node. To do this, it performs a simple depth first traversal of the CFG,
// then deletes any unvisited nodes.
//
// Note that this pass is really a hack. In particular, the instruction
// selectors for various targets should just not generate code for unreachable
// blocks. Until LLVM has a more systematic way of defining instruction
// selectors, however, we cannot really expect them to handle additional
// complexity.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
namespace {
class UnreachableBlockElim : public FunctionPass {
bool runOnFunction(Function &F) override;
public:
static char ID; // Pass identification, replacement for typeid
UnreachableBlockElim() : FunctionPass(ID) {
initializeUnreachableBlockElimPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<DominatorTreeWrapperPass>();
}
};
}
char UnreachableBlockElim::ID = 0;
INITIALIZE_PASS(UnreachableBlockElim, "unreachableblockelim",
"Remove unreachable blocks from the CFG", false, false)
FunctionPass *llvm::createUnreachableBlockEliminationPass() {
return new UnreachableBlockElim();
}
bool UnreachableBlockElim::runOnFunction(Function &F) {
SmallPtrSet<BasicBlock*, 8> Reachable;
// Mark all reachable blocks.
for (BasicBlock *BB : depth_first_ext(&F, Reachable))
(void)BB/* Mark all reachable blocks */;
// Loop over all dead blocks, remembering them and deleting all instructions
// in them.
std::vector<BasicBlock*> DeadBlocks;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
if (!Reachable.count(I)) {
BasicBlock *BB = I;
DeadBlocks.push_back(BB);
while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
PN->replaceAllUsesWith(Constant::getNullValue(PN->getType()));
BB->getInstList().pop_front();
}
for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
(*SI)->removePredecessor(BB);
BB->dropAllReferences();
}
// Actually remove the blocks now.
for (unsigned i = 0, e = DeadBlocks.size(); i != e; ++i) {
DeadBlocks[i]->eraseFromParent();
}
return !DeadBlocks.empty();
}
namespace {
class UnreachableMachineBlockElim : public MachineFunctionPass {
bool runOnMachineFunction(MachineFunction &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
MachineModuleInfo *MMI;
public:
static char ID; // Pass identification, replacement for typeid
UnreachableMachineBlockElim() : MachineFunctionPass(ID) {}
};
}
char UnreachableMachineBlockElim::ID = 0;
INITIALIZE_PASS(UnreachableMachineBlockElim, "unreachable-mbb-elimination",
"Remove unreachable machine basic blocks", false, false)
char &llvm::UnreachableMachineBlockElimID = UnreachableMachineBlockElim::ID;
void UnreachableMachineBlockElim::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
SmallPtrSet<MachineBasicBlock*, 8> Reachable;
bool ModifiedPHI = false;
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
MachineDominatorTree *MDT = getAnalysisIfAvailable<MachineDominatorTree>();
MachineLoopInfo *MLI = getAnalysisIfAvailable<MachineLoopInfo>();
// Mark all reachable blocks.
for (MachineBasicBlock *BB : depth_first_ext(&F, Reachable))
(void)BB/* Mark all reachable blocks */;
// Loop over all dead blocks, remembering them and deleting all instructions
// in them.
std::vector<MachineBasicBlock*> DeadBlocks;
for (MachineFunction::iterator I = F.begin(), E = F.end(); I != E; ++I) {
MachineBasicBlock *BB = I;
// Test for deadness.
if (!Reachable.count(BB)) {
DeadBlocks.push_back(BB);
// Update dominator and loop info.
if (MLI) MLI->removeBlock(BB);
if (MDT && MDT->getNode(BB)) MDT->eraseNode(BB);
while (BB->succ_begin() != BB->succ_end()) {
MachineBasicBlock* succ = *BB->succ_begin();
MachineBasicBlock::iterator start = succ->begin();
while (start != succ->end() && start->isPHI()) {
for (unsigned i = start->getNumOperands() - 1; i >= 2; i-=2)
if (start->getOperand(i).isMBB() &&
start->getOperand(i).getMBB() == BB) {
start->RemoveOperand(i);
start->RemoveOperand(i-1);
}
start++;
}
BB->removeSuccessor(BB->succ_begin());
}
}
}
// Actually remove the blocks now.
for (unsigned i = 0, e = DeadBlocks.size(); i != e; ++i)
DeadBlocks[i]->eraseFromParent();
// Cleanup PHI nodes.
for (MachineFunction::iterator I = F.begin(), E = F.end(); I != E; ++I) {
MachineBasicBlock *BB = I;
// Prune unneeded PHI entries.
SmallPtrSet<MachineBasicBlock*, 8> preds(BB->pred_begin(),
BB->pred_end());
MachineBasicBlock::iterator phi = BB->begin();
while (phi != BB->end() && phi->isPHI()) {
for (unsigned i = phi->getNumOperands() - 1; i >= 2; i-=2)
if (!preds.count(phi->getOperand(i).getMBB())) {
phi->RemoveOperand(i);
phi->RemoveOperand(i-1);
ModifiedPHI = true;
}
if (phi->getNumOperands() == 3) {
unsigned Input = phi->getOperand(1).getReg();
unsigned Output = phi->getOperand(0).getReg();
MachineInstr* temp = phi;
++phi;
temp->eraseFromParent();
ModifiedPHI = true;
if (Input != Output) {
MachineRegisterInfo &MRI = F.getRegInfo();
MRI.constrainRegClass(Input, MRI.getRegClass(Output));
MRI.replaceRegWith(Output, Input);
}
continue;
}
++phi;
}
}
F.RenumberBlocks();
return (!DeadBlocks.empty() || ModifiedPHI);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TargetOptionsImpl.cpp | //===-- TargetOptionsImpl.cpp - Options that apply to all targets ----------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the methods in the TargetOptions.
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
/// DisableFramePointerElim - This returns true if frame pointer elimination
/// optimization should be disabled for the given machine function.
bool TargetOptions::DisableFramePointerElim(const MachineFunction &MF) const {
// Check to see if we should eliminate all frame pointers.
if (MF.getSubtarget().getFrameLowering()->noFramePointerElim(MF))
return true;
// Check to see if we should eliminate non-leaf frame pointers.
if (MF.getFunction()->hasFnAttribute("no-frame-pointer-elim-non-leaf"))
return MF.getFrameInfo()->hasCalls();
return false;
}
/// LessPreciseFPMAD - This flag return true when -enable-fp-mad option
/// is specified on the command line. When this flag is off(default), the
/// code generator is not allowed to generate mad (multiply add) if the
/// result is "less precise" than doing those operations individually.
bool TargetOptions::LessPreciseFPMAD() const {
return UnsafeFPMath || LessPreciseFPMADOption;
}
/// HonorSignDependentRoundingFPMath - Return true if the codegen must assume
/// that the rounding mode of the FPU can change from its default.
bool TargetOptions::HonorSignDependentRoundingFPMath() const {
return !UnsafeFPMath && HonorSignDependentRoundingFPMathOption;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/Analysis.cpp | //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines several CodeGen-specific LLVM IR analysis utilities.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Analysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
using namespace llvm;
/// Compute the linearized index of a member in a nested aggregate/struct/array
/// by recursing and accumulating CurIndex as long as there are indices in the
/// index list.
unsigned llvm::ComputeLinearIndex(Type *Ty,
const unsigned *Indices,
const unsigned *IndicesEnd,
unsigned CurIndex) {
// Base case: We're done.
if (Indices && Indices == IndicesEnd)
return CurIndex;
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
EI != EE; ++EI) {
if (Indices && *Indices == unsigned(EI - EB))
return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
}
assert(!Indices && "Unexpected out of bound");
return CurIndex;
}
// Given an array type, recursively traverse the elements.
else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
unsigned NumElts = ATy->getNumElements();
// Compute the Linear offset when jumping one element of the array
unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
if (Indices) {
assert(*Indices < NumElts && "Unexpected out of bound");
// If the indice is inside the array, compute the index to the requested
// elt and recurse inside the element with the end of the indices list
CurIndex += EltLinearOffset* *Indices;
return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
}
CurIndex += EltLinearOffset*NumElts;
return CurIndex;
}
// We haven't found the type we're looking for, so keep searching.
return CurIndex + 1;
}
/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
/// EVTs that represent all the individual underlying
/// non-aggregate types that comprise it.
///
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = DL.getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
EI != EE; ++EI)
ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
StartingOffset + SL->getElementOffset(EI - EB));
return;
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize);
return;
}
// Interpret void as zero return values.
if (Ty->isVoidTy())
return;
// Base case: we can get an EVT for this LLVM IR type.
ValueVTs.push_back(TLI.getValueType(DL, Ty));
if (Offsets)
Offsets->push_back(StartingOffset);
}
/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
GlobalValue *llvm::ExtractTypeInfo(Value *V) {
V = V->stripPointerCasts();
GlobalValue *GV = dyn_cast<GlobalValue>(V);
GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
if (Var && Var->getName() == "llvm.eh.catch.all.value") {
assert(Var->hasInitializer() &&
"The EH catch-all value must have an initializer");
Value *Init = Var->getInitializer();
GV = dyn_cast<GlobalValue>(Init);
if (!GV) V = cast<ConstantPointerNull>(Init);
}
assert((GV || isa<ConstantPointerNull>(V)) &&
"TypeInfo must be a global variable or NULL");
return GV;
}
/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
/// processed uses a memory 'm' constraint.
bool
llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
const TargetLowering &TLI) {
for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
InlineAsm::ConstraintInfo &CI = CInfos[i];
for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
if (CType == TargetLowering::C_Memory)
return true;
}
// Indirect operand accesses access memory.
if (CI.isIndirect)
return true;
}
return false;
}
/// getFCmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR floating-point condition code. This includes
/// consideration of global floating-point math flags.
///
ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
switch (Pred) {
case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
case FCmpInst::FCMP_OGT: return ISD::SETOGT;
case FCmpInst::FCMP_OGE: return ISD::SETOGE;
case FCmpInst::FCMP_OLT: return ISD::SETOLT;
case FCmpInst::FCMP_OLE: return ISD::SETOLE;
case FCmpInst::FCMP_ONE: return ISD::SETONE;
case FCmpInst::FCMP_ORD: return ISD::SETO;
case FCmpInst::FCMP_UNO: return ISD::SETUO;
case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
case FCmpInst::FCMP_UGT: return ISD::SETUGT;
case FCmpInst::FCMP_UGE: return ISD::SETUGE;
case FCmpInst::FCMP_ULT: return ISD::SETULT;
case FCmpInst::FCMP_ULE: return ISD::SETULE;
case FCmpInst::FCMP_UNE: return ISD::SETUNE;
case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
default: llvm_unreachable("Invalid FCmp predicate opcode!");
}
}
ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
switch (CC) {
case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
default: return CC;
}
}
/// getICmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR integer condition code.
///
ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
switch (Pred) {
case ICmpInst::ICMP_EQ: return ISD::SETEQ;
case ICmpInst::ICMP_NE: return ISD::SETNE;
case ICmpInst::ICMP_SLE: return ISD::SETLE;
case ICmpInst::ICMP_ULE: return ISD::SETULE;
case ICmpInst::ICMP_SGE: return ISD::SETGE;
case ICmpInst::ICMP_UGE: return ISD::SETUGE;
case ICmpInst::ICMP_SLT: return ISD::SETLT;
case ICmpInst::ICMP_ULT: return ISD::SETULT;
case ICmpInst::ICMP_SGT: return ISD::SETGT;
case ICmpInst::ICMP_UGT: return ISD::SETUGT;
default:
llvm_unreachable("Invalid ICmp predicate opcode!");
}
}
static bool isNoopBitcast(Type *T1, Type *T2,
const TargetLoweringBase& TLI) {
return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
(isa<VectorType>(T1) && isa<VectorType>(T2) &&
TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
}
/// Look through operations that will be free to find the earliest source of
/// this value.
///
/// @param ValLoc If V has aggegate type, we will be interested in a particular
/// scalar component. This records its address; the reverse of this list gives a
/// sequence of indices appropriate for an extractvalue to locate the important
/// value. This value is updated during the function and on exit will indicate
/// similar information for the Value returned.
///
/// @param DataBits If this function looks through truncate instructions, this
/// will record the smallest size attained.
static const Value *getNoopInput(const Value *V,
SmallVectorImpl<unsigned> &ValLoc,
unsigned &DataBits,
const TargetLoweringBase &TLI,
const DataLayout &DL) {
while (true) {
// Try to look through V1; if V1 is not an instruction, it can't be looked
// through.
const Instruction *I = dyn_cast<Instruction>(V);
if (!I || I->getNumOperands() == 0) return V;
const Value *NoopInput = nullptr;
Value *Op = I->getOperand(0);
if (isa<BitCastInst>(I)) {
// Look through truly no-op bitcasts.
if (isNoopBitcast(Op->getType(), I->getType(), TLI))
NoopInput = Op;
} else if (isa<GetElementPtrInst>(I)) {
// Look through getelementptr
if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
NoopInput = Op;
} else if (isa<IntToPtrInst>(I)) {
// Look through inttoptr.
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
DL.getPointerSizeInBits() ==
cast<IntegerType>(Op->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<PtrToIntInst>(I)) {
// Look through ptrtoint.
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
DL.getPointerSizeInBits() ==
cast<IntegerType>(I->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<TruncInst>(I) &&
TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
NoopInput = Op;
} else if (isa<CallInst>(I)) {
// Look through call (skipping callee)
for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
i != e; ++i) {
unsigned attrInd = i - I->op_begin() + 1;
if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
NoopInput = *i;
break;
}
}
} else if (isa<InvokeInst>(I)) {
// Look through invoke (skipping BB, BB, Callee)
for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
i != e; ++i) {
unsigned attrInd = i - I->op_begin() + 1;
if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
NoopInput = *i;
break;
}
}
} else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
// Value may come from either the aggregate or the scalar
ArrayRef<unsigned> InsertLoc = IVI->getIndices();
if (ValLoc.size() >= InsertLoc.size() &&
std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
// The type being inserted is a nested sub-type of the aggregate; we
// have to remove those initial indices to get the location we're
// interested in for the operand.
ValLoc.resize(ValLoc.size() - InsertLoc.size());
NoopInput = IVI->getInsertedValueOperand();
} else {
// The struct we're inserting into has the value we're interested in, no
// change of address.
NoopInput = Op;
}
} else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
// The part we're interested in will inevitably be some sub-section of the
// previous aggregate. Combine the two paths to obtain the true address of
// our element.
ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
NoopInput = Op;
}
// Terminate if we couldn't find anything to look through.
if (!NoopInput)
return V;
V = NoopInput;
}
}
/// Return true if this scalar return value only has bits discarded on its path
/// from the "tail call" to the "ret". This includes the obvious noop
/// instructions handled by getNoopInput above as well as free truncations (or
/// extensions prior to the call).
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
SmallVectorImpl<unsigned> &RetIndices,
SmallVectorImpl<unsigned> &CallIndices,
bool AllowDifferingSizes,
const TargetLoweringBase &TLI,
const DataLayout &DL) {
// Trace the sub-value needed by the return value as far back up the graph as
// possible, in the hope that it will intersect with the value produced by the
// call. In the simple case with no "returned" attribute, the hope is actually
// that we end up back at the tail call instruction itself.
unsigned BitsRequired = UINT_MAX;
RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
// If this slot in the value returned is undef, it doesn't matter what the
// call puts there, it'll be fine.
if (isa<UndefValue>(RetVal))
return true;
// Now do a similar search up through the graph to find where the value
// actually returned by the "tail call" comes from. In the simple case without
// a "returned" attribute, the search will be blocked immediately and the loop
// a Noop.
unsigned BitsProvided = UINT_MAX;
CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
// There's no hope if we can't actually trace them to (the same part of!) the
// same value.
if (CallVal != RetVal || CallIndices != RetIndices)
return false;
// However, intervening truncates may have made the call non-tail. Make sure
// all the bits that are needed by the "ret" have been provided by the "tail
// call". FIXME: with sufficiently cunning bit-tracking, we could look through
// extensions too.
if (BitsProvided < BitsRequired ||
(!AllowDifferingSizes && BitsProvided != BitsRequired))
return false;
return true;
}
/// For an aggregate type, determine whether a given index is within bounds or
/// not.
static bool indexReallyValid(CompositeType *T, unsigned Idx) {
if (ArrayType *AT = dyn_cast<ArrayType>(T))
return Idx < AT->getNumElements();
return Idx < cast<StructType>(T)->getNumElements();
}
/// Move the given iterators to the next leaf type in depth first traversal.
///
/// Performs a depth-first traversal of the type as specified by its arguments,
/// stopping at the next leaf node (which may be a legitimate scalar type or an
/// empty struct or array).
///
/// @param SubTypes List of the partial components making up the type from
/// outermost to innermost non-empty aggregate. The element currently
/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
///
/// @param Path Set of extractvalue indices leading from the outermost type
/// (SubTypes[0]) to the leaf node currently represented.
///
/// @returns true if a new type was found, false otherwise. Calling this
/// function again on a finished iterator will repeatedly return
/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
/// aggregate or a non-aggregate
static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
SmallVectorImpl<unsigned> &Path) {
// First march back up the tree until we can successfully increment one of the
// coordinates in Path.
while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
Path.pop_back();
SubTypes.pop_back();
}
// If we reached the top, then the iterator is done.
if (Path.empty())
return false;
// We know there's *some* valid leaf now, so march back down the tree picking
// out the left-most element at each node.
++Path.back();
Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
while (DeeperType->isAggregateType()) {
CompositeType *CT = cast<CompositeType>(DeeperType);
if (!indexReallyValid(CT, 0))
return true;
SubTypes.push_back(CT);
Path.push_back(0);
DeeperType = CT->getTypeAtIndex(0U);
}
return true;
}
/// Find the first non-empty, scalar-like type in Next and setup the iterator
/// components.
///
/// Assuming Next is an aggregate of some kind, this function will traverse the
/// tree from left to right (i.e. depth-first) looking for the first
/// non-aggregate type which will play a role in function return.
///
/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
/// i32 in that type.
static bool firstRealType(Type *Next,
SmallVectorImpl<CompositeType *> &SubTypes,
SmallVectorImpl<unsigned> &Path) {
// First initialise the iterator components to the first "leaf" node
// (i.e. node with no valid sub-type at any index, so {} does count as a leaf
// despite nominally being an aggregate).
while (Next->isAggregateType() &&
indexReallyValid(cast<CompositeType>(Next), 0)) {
SubTypes.push_back(cast<CompositeType>(Next));
Path.push_back(0);
Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
}
// If there's no Path now, Next was originally scalar already (or empty
// leaf). We're done.
if (Path.empty())
return true;
// Otherwise, use normal iteration to keep looking through the tree until we
// find a non-aggregate type.
while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
if (!advanceToNextLeafType(SubTypes, Path))
return false;
}
return true;
}
/// Set the iterator data-structures to the next non-empty, non-aggregate
/// subtype.
static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
SmallVectorImpl<unsigned> &Path) {
do {
if (!advanceToNextLeafType(SubTypes, Path))
return false;
assert(!Path.empty() && "found a leaf but didn't set the path?");
} while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
return true;
}
/// Test if the given instruction is in a position to be optimized
/// with a tail-call. This roughly means that it's in a block with
/// a return and there's nothing that needs to be scheduled
/// between it and the return.
///
/// This function only tests target-independent requirements.
bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
const Instruction *I = CS.getInstruction();
const BasicBlock *ExitBB = I->getParent();
const TerminatorInst *Term = ExitBB->getTerminator();
const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
// The block must end in a return statement or unreachable.
//
// FIXME: Decline tailcall if it's not guaranteed and if the block ends in
// an unreachable, for now. The way tailcall optimization is currently
// implemented means it will add an epilogue followed by a jump. That is
// not profitable. Also, if the callee is a special function (e.g.
// longjmp on x86), it can end up causing miscompilation that has not
// been fully understood.
if (!Ret &&
(!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
return false;
// If I will have a chain, make sure no other instruction that will have a
// chain interposes between I and the return.
if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
!isSafeToSpeculativelyExecute(I))
for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
if (&*BBI == I)
break;
// Debug info intrinsics do not get in the way of tail call optimization.
if (isa<DbgInfoIntrinsic>(BBI))
continue;
if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
!isSafeToSpeculativelyExecute(BBI))
return false;
}
const Function *F = ExitBB->getParent();
return returnTypeIsEligibleForTailCall(
F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
}
bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
const Instruction *I,
const ReturnInst *Ret,
const TargetLoweringBase &TLI) {
// If the block ends with a void return or unreachable, it doesn't matter
// what the call's return type is.
if (!Ret || Ret->getNumOperands() == 0) return true;
// If the return value is undef, it doesn't matter what the call's
// return type is.
if (isa<UndefValue>(Ret->getOperand(0))) return true;
// Make sure the attributes attached to each return are compatible.
AttrBuilder CallerAttrs(F->getAttributes(),
AttributeSet::ReturnIndex);
AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
AttributeSet::ReturnIndex);
// Noalias is completely benign as far as calling convention goes, it
// shouldn't affect whether the call is a tail call.
CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
bool AllowDifferingSizes = true;
if (CallerAttrs.contains(Attribute::ZExt)) {
if (!CalleeAttrs.contains(Attribute::ZExt))
return false;
AllowDifferingSizes = false;
CallerAttrs.removeAttribute(Attribute::ZExt);
CalleeAttrs.removeAttribute(Attribute::ZExt);
} else if (CallerAttrs.contains(Attribute::SExt)) {
if (!CalleeAttrs.contains(Attribute::SExt))
return false;
AllowDifferingSizes = false;
CallerAttrs.removeAttribute(Attribute::SExt);
CalleeAttrs.removeAttribute(Attribute::SExt);
}
// If they're still different, there's some facet we don't understand
// (currently only "inreg", but in future who knows). It may be OK but the
// only safe option is to reject the tail call.
if (CallerAttrs != CalleeAttrs)
return false;
const Value *RetVal = Ret->getOperand(0), *CallVal = I;
SmallVector<unsigned, 4> RetPath, CallPath;
SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
// Nothing's actually returned, it doesn't matter what the callee put there
// it's a valid tail call.
if (RetEmpty)
return true;
// Iterate pairwise through each of the value types making up the tail call
// and the corresponding return. For each one we want to know whether it's
// essentially going directly from the tail call to the ret, via operations
// that end up not generating any code.
//
// We allow a certain amount of covariance here. For example it's permitted
// for the tail call to define more bits than the ret actually cares about
// (e.g. via a truncate).
do {
if (CallEmpty) {
// We've exhausted the values produced by the tail call instruction, the
// rest are essentially undef. The type doesn't really matter, but we need
// *something*.
Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
CallVal = UndefValue::get(SlotType);
}
// The manipulations performed when we're looking through an insertvalue or
// an extractvalue would happen at the front of the RetPath list, so since
// we have to copy it anyway it's more efficient to create a reversed copy.
SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
// Finally, we can check whether the value produced by the tail call at this
// index is compatible with the value we return.
if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
AllowDifferingSizes, TLI,
F->getParent()->getDataLayout()))
return false;
CallEmpty = !nextRealType(CallSubTypes, CallPath);
} while(nextRealType(RetSubTypes, RetPath));
return true;
}
bool llvm::canBeOmittedFromSymbolTable(const GlobalValue *GV) {
if (!GV->hasLinkOnceODRLinkage())
return false;
if (GV->hasUnnamedAddr())
return true;
// If it is a non constant variable, it needs to be uniqued across shared
// objects.
if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
if (!Var->isConstant())
return false;
}
// An alias can point to a variable. We could try to resolve the alias to
// decide, but for now just don't hide them.
if (isa<GlobalAlias>(GV))
return false;
GlobalStatus GS;
if (GlobalStatus::analyzeGlobal(GV, GS))
return false;
return !GS.IsCompared;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineFunctionPrinterPass.cpp | //===-- MachineFunctionPrinterPass.cpp ------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// MachineFunctionPrinterPass implementation.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
/// MachineFunctionPrinterPass - This is a pass to dump the IR of a
/// MachineFunction.
///
struct MachineFunctionPrinterPass : public MachineFunctionPass {
static char ID;
raw_ostream &OS;
const std::string Banner;
MachineFunctionPrinterPass() : MachineFunctionPass(ID), OS(dbgs()) { }
MachineFunctionPrinterPass(raw_ostream &os, const std::string &banner)
: MachineFunctionPass(ID), OS(os), Banner(banner) {}
StringRef getPassName() const override { return "MachineFunction Printer"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override {
OS << "# " << Banner << ":\n";
MF.print(OS, getAnalysisIfAvailable<SlotIndexes>());
return false;
}
};
char MachineFunctionPrinterPass::ID = 0;
}
char &llvm::MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID;
INITIALIZE_PASS(MachineFunctionPrinterPass, "machineinstr-printer",
"Machine Function Printer", false, false)
namespace llvm {
/// Returns a newly-created MachineFunction Printer pass. The
/// default banner is empty.
///
MachineFunctionPass *createMachineFunctionPrinterPass(raw_ostream &OS,
const std::string &Banner){
return new MachineFunctionPrinterPass(OS, Banner);
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/DFAPacketizer.cpp | //=- llvm/CodeGen/DFAPacketizer.cpp - DFA Packetizer for VLIW -*- C++ -*-=====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This class implements a deterministic finite automaton (DFA) based
// packetizing mechanism for VLIW architectures. It provides APIs to
// determine whether there exists a legal mapping of instructions to
// functional unit assignments in a packet. The DFA is auto-generated from
// the target's Schedule.td file.
//
// A DFA consists of 3 major elements: states, inputs, and transitions. For
// the packetizing mechanism, the input is the set of instruction classes for
// a target. The state models all possible combinations of functional unit
// consumption for a given set of instructions in a packet. A transition
// models the addition of an instruction to a packet. In the DFA constructed
// by this class, if an instruction can be added to a packet, then a valid
// transition exists from the corresponding state. Invalid transitions
// indicate that the instruction cannot be added to the current packet.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
DFAPacketizer::DFAPacketizer(const InstrItineraryData *I, const int (*SIT)[2],
const unsigned *SET):
InstrItins(I), CurrentState(0), DFAStateInputTable(SIT),
DFAStateEntryTable(SET) {}
//
// ReadTable - Read the DFA transition table and update CachedTable.
//
// Format of the transition tables:
// DFAStateInputTable[][2] = pairs of <Input, Transition> for all valid
// transitions
// DFAStateEntryTable[i] = Index of the first entry in DFAStateInputTable
// for the ith state
//
void DFAPacketizer::ReadTable(unsigned int state) {
unsigned ThisState = DFAStateEntryTable[state];
unsigned NextStateInTable = DFAStateEntryTable[state+1];
// Early exit in case CachedTable has already contains this
// state's transitions.
if (CachedTable.count(UnsignPair(state,
DFAStateInputTable[ThisState][0])))
return;
for (unsigned i = ThisState; i < NextStateInTable; i++)
CachedTable[UnsignPair(state, DFAStateInputTable[i][0])] =
DFAStateInputTable[i][1];
}
// canReserveResources - Check if the resources occupied by a MCInstrDesc
// are available in the current state.
bool DFAPacketizer::canReserveResources(const llvm::MCInstrDesc *MID) {
unsigned InsnClass = MID->getSchedClass();
const llvm::InstrStage *IS = InstrItins->beginStage(InsnClass);
unsigned FuncUnits = IS->getUnits();
UnsignPair StateTrans = UnsignPair(CurrentState, FuncUnits);
ReadTable(CurrentState);
return (CachedTable.count(StateTrans) != 0);
}
// reserveResources - Reserve the resources occupied by a MCInstrDesc and
// change the current state to reflect that change.
void DFAPacketizer::reserveResources(const llvm::MCInstrDesc *MID) {
unsigned InsnClass = MID->getSchedClass();
const llvm::InstrStage *IS = InstrItins->beginStage(InsnClass);
unsigned FuncUnits = IS->getUnits();
UnsignPair StateTrans = UnsignPair(CurrentState, FuncUnits);
ReadTable(CurrentState);
assert(CachedTable.count(StateTrans) != 0);
CurrentState = CachedTable[StateTrans];
}
// canReserveResources - Check if the resources occupied by a machine
// instruction are available in the current state.
bool DFAPacketizer::canReserveResources(llvm::MachineInstr *MI) {
const llvm::MCInstrDesc &MID = MI->getDesc();
return canReserveResources(&MID);
}
// reserveResources - Reserve the resources occupied by a machine
// instruction and change the current state to reflect that change.
void DFAPacketizer::reserveResources(llvm::MachineInstr *MI) {
const llvm::MCInstrDesc &MID = MI->getDesc();
reserveResources(&MID);
}
namespace llvm {
// DefaultVLIWScheduler - This class extends ScheduleDAGInstrs and overrides
// Schedule method to build the dependence graph.
class DefaultVLIWScheduler : public ScheduleDAGInstrs {
public:
DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
bool IsPostRA);
// Schedule - Actual scheduling work.
void schedule() override;
};
}
DefaultVLIWScheduler::DefaultVLIWScheduler(MachineFunction &MF,
MachineLoopInfo &MLI, bool IsPostRA)
: ScheduleDAGInstrs(MF, &MLI, IsPostRA) {
CanHandleTerminators = true;
}
void DefaultVLIWScheduler::schedule() {
// Build the scheduling graph.
buildSchedGraph(nullptr);
}
// VLIWPacketizerList Ctor
VLIWPacketizerList::VLIWPacketizerList(MachineFunction &MF,
MachineLoopInfo &MLI, bool IsPostRA)
: MF(MF) {
TII = MF.getSubtarget().getInstrInfo();
ResourceTracker = TII->CreateTargetScheduleState(MF.getSubtarget());
VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, IsPostRA);
}
// VLIWPacketizerList Dtor
VLIWPacketizerList::~VLIWPacketizerList() {
if (VLIWScheduler)
delete VLIWScheduler;
if (ResourceTracker)
delete ResourceTracker;
}
// endPacket - End the current packet, bundle packet instructions and reset
// DFA state.
void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
MachineInstr *MI) {
if (CurrentPacketMIs.size() > 1) {
MachineInstr *MIFirst = CurrentPacketMIs.front();
finalizeBundle(*MBB, MIFirst, MI);
}
CurrentPacketMIs.clear();
ResourceTracker->clearResources();
}
// PacketizeMIs - Bundle machine instructions into packets.
void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
MachineBasicBlock::iterator BeginItr,
MachineBasicBlock::iterator EndItr) {
assert(VLIWScheduler && "VLIW Scheduler is not initialized!");
VLIWScheduler->startBlock(MBB);
VLIWScheduler->enterRegion(MBB, BeginItr, EndItr,
std::distance(BeginItr, EndItr));
VLIWScheduler->schedule();
// Generate MI -> SU map.
MIToSUnit.clear();
for (unsigned i = 0, e = VLIWScheduler->SUnits.size(); i != e; ++i) {
SUnit *SU = &VLIWScheduler->SUnits[i];
MIToSUnit[SU->getInstr()] = SU;
}
// The main packetizer loop.
for (; BeginItr != EndItr; ++BeginItr) {
MachineInstr *MI = BeginItr;
this->initPacketizerState();
// End the current packet if needed.
if (this->isSoloInstruction(MI)) {
endPacket(MBB, MI);
continue;
}
// Ignore pseudo instructions.
if (this->ignorePseudoInstruction(MI, MBB))
continue;
SUnit *SUI = MIToSUnit[MI];
assert(SUI && "Missing SUnit Info!");
// Ask DFA if machine resource is available for MI.
bool ResourceAvail = ResourceTracker->canReserveResources(MI);
if (ResourceAvail) {
// Dependency check for MI with instructions in CurrentPacketMIs.
for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(),
VE = CurrentPacketMIs.end(); VI != VE; ++VI) {
MachineInstr *MJ = *VI;
SUnit *SUJ = MIToSUnit[MJ];
assert(SUJ && "Missing SUnit Info!");
// Is it legal to packetize SUI and SUJ together.
if (!this->isLegalToPacketizeTogether(SUI, SUJ)) {
// Allow packetization if dependency can be pruned.
if (!this->isLegalToPruneDependencies(SUI, SUJ)) {
// End the packet if dependency cannot be pruned.
endPacket(MBB, MI);
break;
} // !isLegalToPruneDependencies.
} // !isLegalToPacketizeTogether.
} // For all instructions in CurrentPacketMIs.
} else {
// End the packet if resource is not available.
endPacket(MBB, MI);
}
// Add MI to the current packet.
BeginItr = this->addToPacket(MI);
} // For all instructions in BB.
// End any packet left behind.
endPacket(MBB, EndItr);
VLIWScheduler->exitRegion();
VLIWScheduler->finishBlock();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegAllocBase.cpp | //===-- RegAllocBase.cpp - Register Allocator Base Class ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RegAllocBase class which provides common functionality
// for LiveIntervalUnion-based register allocators.
//
//===----------------------------------------------------------------------===//
#include "RegAllocBase.h"
#include "Spiller.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveRegMatrix.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Target/TargetRegisterInfo.h"
#ifndef NDEBUG
#include "llvm/ADT/SparseBitVector.h"
#endif
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Timer.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumNewQueued , "Number of new live ranges queued");
// Temporary verification option until we can put verification inside
// MachineVerifier.
static cl::opt<bool, true>
VerifyRegAlloc("verify-regalloc", cl::location(RegAllocBase::VerifyEnabled),
cl::desc("Verify during register allocation"));
const char RegAllocBase::TimerGroupName[] = "Register Allocation";
bool RegAllocBase::VerifyEnabled = false;
//===----------------------------------------------------------------------===//
// RegAllocBase Implementation
//===----------------------------------------------------------------------===//
// Pin the vtable to this file.
void RegAllocBase::anchor() {}
void RegAllocBase::init(VirtRegMap &vrm,
LiveIntervals &lis,
LiveRegMatrix &mat) {
TRI = &vrm.getTargetRegInfo();
MRI = &vrm.getRegInfo();
VRM = &vrm;
LIS = &lis;
Matrix = &mat;
MRI->freezeReservedRegs(vrm.getMachineFunction());
RegClassInfo.runOnMachineFunction(vrm.getMachineFunction());
}
// Visit all the live registers. If they are already assigned to a physical
// register, unify them with the corresponding LiveIntervalUnion, otherwise push
// them on the priority queue for later assignment.
void RegAllocBase::seedLiveRegs() {
NamedRegionTimer T("Seed Live Regs", TimerGroupName, TimePassesIsEnabled);
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
enqueue(&LIS->getInterval(Reg));
}
}
// Top-level driver to manage the queue of unassigned VirtRegs and call the
// selectOrSplit implementation.
void RegAllocBase::allocatePhysRegs() {
seedLiveRegs();
// Continue assigning vregs one at a time to available physical registers.
while (LiveInterval *VirtReg = dequeue()) {
assert(!VRM->hasPhys(VirtReg->reg) && "Register already assigned");
// Unused registers can appear when the spiller coalesces snippets.
if (MRI->reg_nodbg_empty(VirtReg->reg)) {
DEBUG(dbgs() << "Dropping unused " << *VirtReg << '\n');
aboutToRemoveInterval(*VirtReg);
LIS->removeInterval(VirtReg->reg);
continue;
}
// Invalidate all interference queries, live ranges could have changed.
Matrix->invalidateVirtRegs();
// selectOrSplit requests the allocator to return an available physical
// register if possible and populate a list of new live intervals that
// result from splitting.
DEBUG(dbgs() << "\nselectOrSplit "
<< TRI->getRegClassName(MRI->getRegClass(VirtReg->reg))
<< ':' << *VirtReg << " w=" << VirtReg->weight << '\n');
typedef SmallVector<unsigned, 4> VirtRegVec;
VirtRegVec SplitVRegs;
unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs);
if (AvailablePhysReg == ~0u) {
// selectOrSplit failed to find a register!
// Probably caused by an inline asm.
MachineInstr *MI = nullptr;
for (MachineRegisterInfo::reg_instr_iterator
I = MRI->reg_instr_begin(VirtReg->reg), E = MRI->reg_instr_end();
I != E; ) {
MachineInstr *TmpMI = &*(I++);
if (TmpMI->isInlineAsm()) {
MI = TmpMI;
break;
}
}
if (MI)
MI->emitError("inline assembly requires more registers than available");
else
report_fatal_error("ran out of registers during register allocation");
// Keep going after reporting the error.
VRM->assignVirt2Phys(VirtReg->reg,
RegClassInfo.getOrder(MRI->getRegClass(VirtReg->reg)).front());
continue;
}
if (AvailablePhysReg)
Matrix->assign(*VirtReg, AvailablePhysReg);
for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end();
I != E; ++I) {
LiveInterval *SplitVirtReg = &LIS->getInterval(*I);
assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned");
if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) {
DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n');
aboutToRemoveInterval(*SplitVirtReg);
LIS->removeInterval(SplitVirtReg->reg);
continue;
}
DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
"expect split value in virtual register");
enqueue(SplitVirtReg);
++NumNewQueued;
}
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/CMakeLists.txt | add_llvm_library(LLVMCodeGen
AggressiveAntiDepBreaker.cpp
AllocationOrder.cpp
Analysis.cpp
AtomicExpandPass.cpp
BasicTargetTransformInfo.cpp
BranchFolding.cpp
CalcSpillWeights.cpp
CallingConvLower.cpp
CodeGen.cpp
CodeGenPrepare.cpp
CoreCLRGC.cpp
CriticalAntiDepBreaker.cpp
DFAPacketizer.cpp
DeadMachineInstructionElim.cpp
DwarfEHPrepare.cpp
EarlyIfConversion.cpp
EdgeBundles.cpp
ErlangGC.cpp
ExecutionDepsFix.cpp
ExpandISelPseudos.cpp
ExpandPostRAPseudos.cpp
FaultMaps.cpp
GCMetadata.cpp
GCMetadataPrinter.cpp
GCRootLowering.cpp
GCStrategy.cpp
GlobalMerge.cpp
IfConversion.cpp
ImplicitNullChecks.cpp
InlineSpiller.cpp
InterferenceCache.cpp
InterleavedAccessPass.cpp
IntrinsicLowering.cpp
LLVMTargetMachine.cpp
LatencyPriorityQueue.cpp
LexicalScopes.cpp
LiveDebugVariables.cpp
LiveInterval.cpp
LiveIntervalAnalysis.cpp
LiveIntervalUnion.cpp
LiveRangeCalc.cpp
LiveRangeEdit.cpp
LiveRegMatrix.cpp
LivePhysRegs.cpp
LiveStackAnalysis.cpp
LiveVariables.cpp
LocalStackSlotAllocation.cpp
MachineBasicBlock.cpp
MachineBlockFrequencyInfo.cpp
MachineBlockPlacement.cpp
MachineBranchProbabilityInfo.cpp
MachineCSE.cpp
MachineCombiner.cpp
MachineCopyPropagation.cpp
MachineDominators.cpp
MachineDominanceFrontier.cpp
MachineFunction.cpp
MachineFunctionAnalysis.cpp
MachineFunctionPass.cpp
MachineFunctionPrinterPass.cpp
MachineInstr.cpp
MachineInstrBundle.cpp
MachineLICM.cpp
MachineLoopInfo.cpp
MachineModuleInfo.cpp
MachineModuleInfoImpls.cpp
MachinePassRegistry.cpp
MachinePostDominators.cpp
MachineRegisterInfo.cpp
MachineRegionInfo.cpp
MachineSSAUpdater.cpp
MachineScheduler.cpp
MachineSink.cpp
MachineTraceMetrics.cpp
MachineVerifier.cpp
MIRPrinter.cpp
MIRPrintingPass.cpp
OcamlGC.cpp
OptimizePHIs.cpp
PHIElimination.cpp
PHIEliminationUtils.cpp
Passes.cpp
PeepholeOptimizer.cpp
PostRASchedulerList.cpp
ProcessImplicitDefs.cpp
PrologEpilogInserter.cpp
PseudoSourceValue.cpp
RegAllocBase.cpp
RegAllocBasic.cpp
RegAllocFast.cpp
RegAllocGreedy.cpp
RegAllocPBQP.cpp
RegisterClassInfo.cpp
RegisterCoalescer.cpp
RegisterPressure.cpp
RegisterScavenging.cpp
ScheduleDAG.cpp
ScheduleDAGInstrs.cpp
ScheduleDAGPrinter.cpp
ScoreboardHazardRecognizer.cpp
ShrinkWrap.cpp
ShadowStackGC.cpp
ShadowStackGCLowering.cpp
SjLjEHPrepare.cpp
SlotIndexes.cpp
SpillPlacement.cpp
SplitKit.cpp
StackColoring.cpp
StackProtector.cpp
StackSlotColoring.cpp
StackMapLivenessAnalysis.cpp
StackMaps.cpp
StatepointExampleGC.cpp
TailDuplication.cpp
TargetFrameLoweringImpl.cpp
TargetInstrInfo.cpp
TargetLoweringBase.cpp
TargetLoweringObjectFileImpl.cpp
TargetOptionsImpl.cpp
TargetRegisterInfo.cpp
TargetSchedule.cpp
TwoAddressInstructionPass.cpp
UnreachableBlockElim.cpp
VirtRegMap.cpp
# WinEHPrepare.cpp
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/CodeGen
${LLVM_MAIN_INCLUDE_DIR}/llvm/CodeGen/PBQP
)
add_dependencies(LLVMCodeGen intrinsics_gen)
add_subdirectory(SelectionDAG)
add_subdirectory(AsmPrinter)
add_subdirectory(MIRParser)
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TwoAddressInstructionPass.cpp | //===-- TwoAddressInstructionPass.cpp - Two-Address instruction pass ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the TwoAddress instruction pass which is used
// by most register allocators. Two-Address instructions are rewritten
// from:
//
// A = B op C
//
// to:
//
// A = B
// A op= C
//
// Note that if a register allocator chooses to use this pass, that it
// has to be capable of handling the non-SSA nature of these rewritten
// virtual registers.
//
// It is also worth noting that the duplicate operand of the two
// address instruction is removed.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "twoaddrinstr"
STATISTIC(NumTwoAddressInstrs, "Number of two-address instructions");
STATISTIC(NumCommuted , "Number of instructions commuted to coalesce");
STATISTIC(NumAggrCommuted , "Number of instructions aggressively commuted");
STATISTIC(NumConvertedTo3Addr, "Number of instructions promoted to 3-address");
STATISTIC(Num3AddrSunk, "Number of 3-address instructions sunk");
STATISTIC(NumReSchedUps, "Number of instructions re-scheduled up");
STATISTIC(NumReSchedDowns, "Number of instructions re-scheduled down");
// Temporary flag to disable rescheduling.
static cl::opt<bool>
EnableRescheduling("twoaddr-reschedule",
cl::desc("Coalesce copies by rescheduling (default=true)"),
cl::init(true), cl::Hidden);
namespace {
class TwoAddressInstructionPass : public MachineFunctionPass {
MachineFunction *MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const InstrItineraryData *InstrItins;
MachineRegisterInfo *MRI;
LiveVariables *LV;
LiveIntervals *LIS;
AliasAnalysis *AA;
CodeGenOpt::Level OptLevel;
// The current basic block being processed.
MachineBasicBlock *MBB;
// DistanceMap - Keep track the distance of a MI from the start of the
// current basic block.
DenseMap<MachineInstr*, unsigned> DistanceMap;
// Set of already processed instructions in the current block.
SmallPtrSet<MachineInstr*, 8> Processed;
// SrcRegMap - A map from virtual registers to physical registers which are
// likely targets to be coalesced to due to copies from physical registers to
// virtual registers. e.g. v1024 = move r0.
DenseMap<unsigned, unsigned> SrcRegMap;
// DstRegMap - A map from virtual registers to physical registers which are
// likely targets to be coalesced to due to copies to physical registers from
// virtual registers. e.g. r1 = move v1024.
DenseMap<unsigned, unsigned> DstRegMap;
bool sink3AddrInstruction(MachineInstr *MI, unsigned Reg,
MachineBasicBlock::iterator OldPos);
bool isRevCopyChain(unsigned FromReg, unsigned ToReg, int Maxlen);
bool noUseAfterLastDef(unsigned Reg, unsigned Dist, unsigned &LastDef);
bool isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC,
MachineInstr *MI, unsigned Dist);
bool commuteInstruction(MachineBasicBlock::iterator &mi,
unsigned RegB, unsigned RegC, unsigned Dist);
bool isProfitableToConv3Addr(unsigned RegA, unsigned RegB);
bool convertInstTo3Addr(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned RegA, unsigned RegB, unsigned Dist);
bool isDefTooClose(unsigned Reg, unsigned Dist, MachineInstr *MI);
bool rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned Reg);
bool rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned Reg);
bool tryInstructionTransform(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned SrcIdx, unsigned DstIdx,
unsigned Dist, bool shouldOnlyCommute);
void scanUses(unsigned DstReg);
void processCopy(MachineInstr *MI);
typedef SmallVector<std::pair<unsigned, unsigned>, 4> TiedPairList;
typedef SmallDenseMap<unsigned, TiedPairList> TiedOperandMap;
bool collectTiedOperands(MachineInstr *MI, TiedOperandMap&);
void processTiedPairs(MachineInstr *MI, TiedPairList&, unsigned &Dist);
void eliminateRegSequence(MachineBasicBlock::iterator&);
public:
static char ID; // Pass identification, replacement for typeid
TwoAddressInstructionPass() : MachineFunctionPass(ID) {
initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<LiveVariables>();
AU.addPreserved<SlotIndexes>();
AU.addPreserved<LiveIntervals>();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
MachineFunctionPass::getAnalysisUsage(AU);
}
/// runOnMachineFunction - Pass entry point.
bool runOnMachineFunction(MachineFunction&) override;
};
} // end anonymous namespace
char TwoAddressInstructionPass::ID = 0;
INITIALIZE_PASS_BEGIN(TwoAddressInstructionPass, "twoaddressinstruction",
"Two-Address instruction pass", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(TwoAddressInstructionPass, "twoaddressinstruction",
"Two-Address instruction pass", false, false)
char &llvm::TwoAddressInstructionPassID = TwoAddressInstructionPass::ID;
static bool isPlainlyKilled(MachineInstr *MI, unsigned Reg, LiveIntervals *LIS);
/// sink3AddrInstruction - A two-address instruction has been converted to a
/// three-address instruction to avoid clobbering a register. Try to sink it
/// past the instruction that would kill the above mentioned register to reduce
/// register pressure.
bool TwoAddressInstructionPass::
sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
MachineBasicBlock::iterator OldPos) {
// FIXME: Shouldn't we be trying to do this before we three-addressify the
// instruction? After this transformation is done, we no longer need
// the instruction to be in three-address form.
// Check if it's safe to move this instruction.
bool SeenStore = true; // Be conservative.
if (!MI->isSafeToMove(AA, SeenStore))
return false;
unsigned DefReg = 0;
SmallSet<unsigned, 4> UseRegs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isUse() && MOReg != SavedReg)
UseRegs.insert(MO.getReg());
if (!MO.isDef())
continue;
if (MO.isImplicit())
// Don't try to move it if it implicitly defines a register.
return false;
if (DefReg)
// For now, don't move any instructions that define multiple registers.
return false;
DefReg = MO.getReg();
}
// Find the instruction that kills SavedReg.
MachineInstr *KillMI = nullptr;
if (LIS) {
LiveInterval &LI = LIS->getInterval(SavedReg);
assert(LI.end() != LI.begin() &&
"Reg should not have empty live interval.");
SlotIndex MBBEndIdx = LIS->getMBBEndIdx(MBB).getPrevSlot();
LiveInterval::const_iterator I = LI.find(MBBEndIdx);
if (I != LI.end() && I->start < MBBEndIdx)
return false;
--I;
KillMI = LIS->getInstructionFromIndex(I->end);
}
if (!KillMI) {
for (MachineRegisterInfo::use_nodbg_iterator
UI = MRI->use_nodbg_begin(SavedReg),
UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
MachineOperand &UseMO = *UI;
if (!UseMO.isKill())
continue;
KillMI = UseMO.getParent();
break;
}
}
// If we find the instruction that kills SavedReg, and it is in an
// appropriate location, we can try to sink the current instruction
// past it.
if (!KillMI || KillMI->getParent() != MBB || KillMI == MI ||
KillMI == OldPos || KillMI->isTerminator())
return false;
// If any of the definitions are used by another instruction between the
// position and the kill use, then it's not safe to sink it.
//
// FIXME: This can be sped up if there is an easy way to query whether an
// instruction is before or after another instruction. Then we can use
// MachineRegisterInfo def / use instead.
MachineOperand *KillMO = nullptr;
MachineBasicBlock::iterator KillPos = KillMI;
++KillPos;
unsigned NumVisited = 0;
for (MachineBasicBlock::iterator I = std::next(OldPos); I != KillPos; ++I) {
MachineInstr *OtherMI = I;
// DBG_VALUE cannot be counted against the limit.
if (OtherMI->isDebugValue())
continue;
if (NumVisited > 30) // FIXME: Arbitrary limit to reduce compile time cost.
return false;
++NumVisited;
for (unsigned i = 0, e = OtherMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = OtherMI->getOperand(i);
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (DefReg == MOReg)
return false;
if (MO.isKill() || (LIS && isPlainlyKilled(OtherMI, MOReg, LIS))) {
if (OtherMI == KillMI && MOReg == SavedReg)
// Save the operand that kills the register. We want to unset the kill
// marker if we can sink MI past it.
KillMO = &MO;
else if (UseRegs.count(MOReg))
// One of the uses is killed before the destination.
return false;
}
}
}
assert(KillMO && "Didn't find kill");
if (!LIS) {
// Update kill and LV information.
KillMO->setIsKill(false);
KillMO = MI->findRegisterUseOperand(SavedReg, false, TRI);
KillMO->setIsKill(true);
if (LV)
LV->replaceKillInstruction(SavedReg, KillMI, MI);
}
// Move instruction to its destination.
MBB->remove(MI);
MBB->insert(KillPos, MI);
if (LIS)
LIS->handleMove(MI);
++Num3AddrSunk;
return true;
}
/// getSingleDef -- return the MachineInstr* if it is the single def of the Reg
/// in current BB.
static MachineInstr *getSingleDef(unsigned Reg, MachineBasicBlock *BB,
const MachineRegisterInfo *MRI) {
MachineInstr *Ret = nullptr;
for (MachineInstr &DefMI : MRI->def_instructions(Reg)) {
if (DefMI.getParent() != BB || DefMI.isDebugValue())
continue;
if (!Ret)
Ret = &DefMI;
else if (Ret != &DefMI)
return nullptr;
}
return Ret;
}
/// Check if there is a reversed copy chain from FromReg to ToReg:
/// %Tmp1 = copy %Tmp2;
/// %FromReg = copy %Tmp1;
/// %ToReg = add %FromReg ...
/// %Tmp2 = copy %ToReg;
/// MaxLen specifies the maximum length of the copy chain the func
/// can walk through.
bool TwoAddressInstructionPass::isRevCopyChain(unsigned FromReg, unsigned ToReg,
int Maxlen) {
unsigned TmpReg = FromReg;
for (int i = 0; i < Maxlen; i++) {
MachineInstr *Def = getSingleDef(TmpReg, MBB, MRI);
if (!Def || !Def->isCopy())
return false;
TmpReg = Def->getOperand(1).getReg();
if (TmpReg == ToReg)
return true;
}
return false;
}
/// noUseAfterLastDef - Return true if there are no intervening uses between the
/// last instruction in the MBB that defines the specified register and the
/// two-address instruction which is being processed. It also returns the last
/// def location by reference
bool TwoAddressInstructionPass::noUseAfterLastDef(unsigned Reg, unsigned Dist,
unsigned &LastDef) {
LastDef = 0;
unsigned LastUse = Dist;
for (MachineOperand &MO : MRI->reg_operands(Reg)) {
MachineInstr *MI = MO.getParent();
if (MI->getParent() != MBB || MI->isDebugValue())
continue;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
if (DI == DistanceMap.end())
continue;
if (MO.isUse() && DI->second < LastUse)
LastUse = DI->second;
if (MO.isDef() && DI->second > LastDef)
LastDef = DI->second;
}
return !(LastUse > LastDef && LastUse < Dist);
}
/// isCopyToReg - Return true if the specified MI is a copy instruction or
/// a extract_subreg instruction. It also returns the source and destination
/// registers and whether they are physical registers by reference.
static bool isCopyToReg(MachineInstr &MI, const TargetInstrInfo *TII,
unsigned &SrcReg, unsigned &DstReg,
bool &IsSrcPhys, bool &IsDstPhys) {
SrcReg = 0;
DstReg = 0;
if (MI.isCopy()) {
DstReg = MI.getOperand(0).getReg();
SrcReg = MI.getOperand(1).getReg();
} else if (MI.isInsertSubreg() || MI.isSubregToReg()) {
DstReg = MI.getOperand(0).getReg();
SrcReg = MI.getOperand(2).getReg();
} else
return false;
IsSrcPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
IsDstPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
return true;
}
/// isPLainlyKilled - Test if the given register value, which is used by the
// given instruction, is killed by the given instruction.
static bool isPlainlyKilled(MachineInstr *MI, unsigned Reg,
LiveIntervals *LIS) {
if (LIS && TargetRegisterInfo::isVirtualRegister(Reg) &&
!LIS->isNotInMIMap(MI)) {
// FIXME: Sometimes tryInstructionTransform() will add instructions and
// test whether they can be folded before keeping them. In this case it
// sets a kill before recursively calling tryInstructionTransform() again.
// If there is no interval available, we assume that this instruction is
// one of those. A kill flag is manually inserted on the operand so the
// check below will handle it.
LiveInterval &LI = LIS->getInterval(Reg);
// This is to match the kill flag version where undefs don't have kill
// flags.
if (!LI.hasAtLeastOneValue())
return false;
SlotIndex useIdx = LIS->getInstructionIndex(MI);
LiveInterval::const_iterator I = LI.find(useIdx);
assert(I != LI.end() && "Reg must be live-in to use.");
return !I->end.isBlock() && SlotIndex::isSameInstr(I->end, useIdx);
}
return MI->killsRegister(Reg);
}
/// isKilled - Test if the given register value, which is used by the given
/// instruction, is killed by the given instruction. This looks through
/// coalescable copies to see if the original value is potentially not killed.
///
/// For example, in this code:
///
/// %reg1034 = copy %reg1024
/// %reg1035 = copy %reg1025<kill>
/// %reg1036 = add %reg1034<kill>, %reg1035<kill>
///
/// %reg1034 is not considered to be killed, since it is copied from a
/// register which is not killed. Treating it as not killed lets the
/// normal heuristics commute the (two-address) add, which lets
/// coalescing eliminate the extra copy.
///
/// If allowFalsePositives is true then likely kills are treated as kills even
/// if it can't be proven that they are kills.
static bool isKilled(MachineInstr &MI, unsigned Reg,
const MachineRegisterInfo *MRI,
const TargetInstrInfo *TII,
LiveIntervals *LIS,
bool allowFalsePositives) {
MachineInstr *DefMI = &MI;
for (;;) {
// All uses of physical registers are likely to be kills.
if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
(allowFalsePositives || MRI->hasOneUse(Reg)))
return true;
if (!isPlainlyKilled(DefMI, Reg, LIS))
return false;
if (TargetRegisterInfo::isPhysicalRegister(Reg))
return true;
MachineRegisterInfo::def_iterator Begin = MRI->def_begin(Reg);
// If there are multiple defs, we can't do a simple analysis, so just
// go with what the kill flag says.
if (std::next(Begin) != MRI->def_end())
return true;
DefMI = Begin->getParent();
bool IsSrcPhys, IsDstPhys;
unsigned SrcReg, DstReg;
// If the def is something other than a copy, then it isn't going to
// be coalesced, so follow the kill flag.
if (!isCopyToReg(*DefMI, TII, SrcReg, DstReg, IsSrcPhys, IsDstPhys))
return true;
Reg = SrcReg;
}
}
/// isTwoAddrUse - Return true if the specified MI uses the specified register
/// as a two-address use. If so, return the destination register by reference.
static bool isTwoAddrUse(MachineInstr &MI, unsigned Reg, unsigned &DstReg) {
for (unsigned i = 0, NumOps = MI.getNumOperands(); i != NumOps; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
continue;
unsigned ti;
if (MI.isRegTiedToDefOperand(i, &ti)) {
DstReg = MI.getOperand(ti).getReg();
return true;
}
}
return false;
}
/// findOnlyInterestingUse - Given a register, if has a single in-basic block
/// use, return the use instruction if it's a copy or a two-address use.
static
MachineInstr *findOnlyInterestingUse(unsigned Reg, MachineBasicBlock *MBB,
MachineRegisterInfo *MRI,
const TargetInstrInfo *TII,
bool &IsCopy,
unsigned &DstReg, bool &IsDstPhys) {
if (!MRI->hasOneNonDBGUse(Reg))
// None or more than one use.
return nullptr;
MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(Reg);
if (UseMI.getParent() != MBB)
return nullptr;
unsigned SrcReg;
bool IsSrcPhys;
if (isCopyToReg(UseMI, TII, SrcReg, DstReg, IsSrcPhys, IsDstPhys)) {
IsCopy = true;
return &UseMI;
}
IsDstPhys = false;
if (isTwoAddrUse(UseMI, Reg, DstReg)) {
IsDstPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
return &UseMI;
}
return nullptr;
}
/// getMappedReg - Return the physical register the specified virtual register
/// might be mapped to.
static unsigned
getMappedReg(unsigned Reg, DenseMap<unsigned, unsigned> &RegMap) {
while (TargetRegisterInfo::isVirtualRegister(Reg)) {
DenseMap<unsigned, unsigned>::iterator SI = RegMap.find(Reg);
if (SI == RegMap.end())
return 0;
Reg = SI->second;
}
if (TargetRegisterInfo::isPhysicalRegister(Reg))
return Reg;
return 0;
}
/// regsAreCompatible - Return true if the two registers are equal or aliased.
///
static bool
regsAreCompatible(unsigned RegA, unsigned RegB, const TargetRegisterInfo *TRI) {
if (RegA == RegB)
return true;
if (!RegA || !RegB)
return false;
return TRI->regsOverlap(RegA, RegB);
}
/// isProfitableToCommute - Return true if it's potentially profitable to commute
/// the two-address instruction that's being processed.
bool
TwoAddressInstructionPass::
isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC,
MachineInstr *MI, unsigned Dist) {
if (OptLevel == CodeGenOpt::None)
return false;
// Determine if it's profitable to commute this two address instruction. In
// general, we want no uses between this instruction and the definition of
// the two-address register.
// e.g.
// %reg1028<def> = EXTRACT_SUBREG %reg1027<kill>, 1
// %reg1029<def> = MOV8rr %reg1028
// %reg1029<def> = SHR8ri %reg1029, 7, %EFLAGS<imp-def,dead>
// insert => %reg1030<def> = MOV8rr %reg1028
// %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %EFLAGS<imp-def,dead>
// In this case, it might not be possible to coalesce the second MOV8rr
// instruction if the first one is coalesced. So it would be profitable to
// commute it:
// %reg1028<def> = EXTRACT_SUBREG %reg1027<kill>, 1
// %reg1029<def> = MOV8rr %reg1028
// %reg1029<def> = SHR8ri %reg1029, 7, %EFLAGS<imp-def,dead>
// insert => %reg1030<def> = MOV8rr %reg1029
// %reg1030<def> = ADD8rr %reg1029<kill>, %reg1028<kill>, %EFLAGS<imp-def,dead>
if (!isPlainlyKilled(MI, regC, LIS))
return false;
// Ok, we have something like:
// %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %EFLAGS<imp-def,dead>
// let's see if it's worth commuting it.
// Look for situations like this:
// %reg1024<def> = MOV r1
// %reg1025<def> = MOV r0
// %reg1026<def> = ADD %reg1024, %reg1025
// r0 = MOV %reg1026
// Commute the ADD to hopefully eliminate an otherwise unavoidable copy.
unsigned ToRegA = getMappedReg(regA, DstRegMap);
if (ToRegA) {
unsigned FromRegB = getMappedReg(regB, SrcRegMap);
unsigned FromRegC = getMappedReg(regC, SrcRegMap);
bool CompB = FromRegB && regsAreCompatible(FromRegB, ToRegA, TRI);
bool CompC = FromRegC && regsAreCompatible(FromRegC, ToRegA, TRI);
// Compute if any of the following are true:
// -RegB is not tied to a register and RegC is compatible with RegA.
// -RegB is tied to the wrong physical register, but RegC is.
// -RegB is tied to the wrong physical register, and RegC isn't tied.
if ((!FromRegB && CompC) || (FromRegB && !CompB && (!FromRegC || CompC)))
return true;
// Don't compute if any of the following are true:
// -RegC is not tied to a register and RegB is compatible with RegA.
// -RegC is tied to the wrong physical register, but RegB is.
// -RegC is tied to the wrong physical register, and RegB isn't tied.
if ((!FromRegC && CompB) || (FromRegC && !CompC && (!FromRegB || CompB)))
return false;
}
// If there is a use of regC between its last def (could be livein) and this
// instruction, then bail.
unsigned LastDefC = 0;
if (!noUseAfterLastDef(regC, Dist, LastDefC))
return false;
// If there is a use of regB between its last def (could be livein) and this
// instruction, then go ahead and make this transformation.
unsigned LastDefB = 0;
if (!noUseAfterLastDef(regB, Dist, LastDefB))
return true;
// Look for situation like this:
// %reg101 = MOV %reg100
// %reg102 = ...
// %reg103 = ADD %reg102, %reg101
// ... = %reg103 ...
// %reg100 = MOV %reg103
// If there is a reversed copy chain from reg101 to reg103, commute the ADD
// to eliminate an otherwise unavoidable copy.
// FIXME:
// We can extend the logic further: If an pair of operands in an insn has
// been merged, the insn could be regarded as a virtual copy, and the virtual
// copy could also be used to construct a copy chain.
// To more generally minimize register copies, ideally the logic of two addr
// instruction pass should be integrated with register allocation pass where
// interference graph is available.
if (isRevCopyChain(regC, regA, 3))
return true;
if (isRevCopyChain(regB, regA, 3))
return false;
// Since there are no intervening uses for both registers, then commute
// if the def of regC is closer. Its live interval is shorter.
return LastDefB && LastDefC && LastDefC > LastDefB;
}
/// commuteInstruction - Commute a two-address instruction and update the basic
/// block, distance map, and live variables if needed. Return true if it is
/// successful.
bool TwoAddressInstructionPass::
commuteInstruction(MachineBasicBlock::iterator &mi,
unsigned RegB, unsigned RegC, unsigned Dist) {
MachineInstr *MI = mi;
DEBUG(dbgs() << "2addr: COMMUTING : " << *MI);
MachineInstr *NewMI = TII->commuteInstruction(MI);
if (NewMI == nullptr) {
DEBUG(dbgs() << "2addr: COMMUTING FAILED!\n");
return false;
}
DEBUG(dbgs() << "2addr: COMMUTED TO: " << *NewMI);
assert(NewMI == MI &&
"TargetInstrInfo::commuteInstruction() should not return a new "
"instruction unless it was requested.");
// Update source register map.
unsigned FromRegC = getMappedReg(RegC, SrcRegMap);
if (FromRegC) {
unsigned RegA = MI->getOperand(0).getReg();
SrcRegMap[RegA] = FromRegC;
}
return true;
}
/// isProfitableToConv3Addr - Return true if it is profitable to convert the
/// given 2-address instruction to a 3-address one.
bool
TwoAddressInstructionPass::isProfitableToConv3Addr(unsigned RegA,unsigned RegB){
// Look for situations like this:
// %reg1024<def> = MOV r1
// %reg1025<def> = MOV r0
// %reg1026<def> = ADD %reg1024, %reg1025
// r2 = MOV %reg1026
// Turn ADD into a 3-address instruction to avoid a copy.
unsigned FromRegB = getMappedReg(RegB, SrcRegMap);
if (!FromRegB)
return false;
unsigned ToRegA = getMappedReg(RegA, DstRegMap);
return (ToRegA && !regsAreCompatible(FromRegB, ToRegA, TRI));
}
/// convertInstTo3Addr - Convert the specified two-address instruction into a
/// three address one. Return true if this transformation was successful.
bool
TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned RegA, unsigned RegB,
unsigned Dist) {
// FIXME: Why does convertToThreeAddress() need an iterator reference?
MachineFunction::iterator MFI = MBB;
MachineInstr *NewMI = TII->convertToThreeAddress(MFI, mi, LV);
assert(MBB == MFI && "convertToThreeAddress changed iterator reference");
if (!NewMI)
return false;
DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi);
DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI);
bool Sunk = false;
if (LIS)
LIS->ReplaceMachineInstrInMaps(mi, NewMI);
if (NewMI->findRegisterUseOperand(RegB, false, TRI))
// FIXME: Temporary workaround. If the new instruction doesn't
// uses RegB, convertToThreeAddress must have created more
// then one instruction.
Sunk = sink3AddrInstruction(NewMI, RegB, mi);
MBB->erase(mi); // Nuke the old inst.
if (!Sunk) {
DistanceMap.insert(std::make_pair(NewMI, Dist));
mi = NewMI;
nmi = std::next(mi);
}
// Update source and destination register maps.
SrcRegMap.erase(RegA);
DstRegMap.erase(RegB);
return true;
}
/// scanUses - Scan forward recursively for only uses, update maps if the use
/// is a copy or a two-address instruction.
void
TwoAddressInstructionPass::scanUses(unsigned DstReg) {
SmallVector<unsigned, 4> VirtRegPairs;
bool IsDstPhys;
bool IsCopy = false;
unsigned NewReg = 0;
unsigned Reg = DstReg;
while (MachineInstr *UseMI = findOnlyInterestingUse(Reg, MBB, MRI, TII,IsCopy,
NewReg, IsDstPhys)) {
if (IsCopy && !Processed.insert(UseMI).second)
break;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UseMI);
if (DI != DistanceMap.end())
// Earlier in the same MBB.Reached via a back edge.
break;
if (IsDstPhys) {
VirtRegPairs.push_back(NewReg);
break;
}
bool isNew = SrcRegMap.insert(std::make_pair(NewReg, Reg)).second;
if (!isNew)
assert(SrcRegMap[NewReg] == Reg && "Can't map to two src registers!");
VirtRegPairs.push_back(NewReg);
Reg = NewReg;
}
if (!VirtRegPairs.empty()) {
unsigned ToReg = VirtRegPairs.back();
VirtRegPairs.pop_back();
while (!VirtRegPairs.empty()) {
unsigned FromReg = VirtRegPairs.back();
VirtRegPairs.pop_back();
bool isNew = DstRegMap.insert(std::make_pair(FromReg, ToReg)).second;
if (!isNew)
assert(DstRegMap[FromReg] == ToReg &&"Can't map to two dst registers!");
ToReg = FromReg;
}
bool isNew = DstRegMap.insert(std::make_pair(DstReg, ToReg)).second;
if (!isNew)
assert(DstRegMap[DstReg] == ToReg && "Can't map to two dst registers!");
}
}
/// processCopy - If the specified instruction is not yet processed, process it
/// if it's a copy. For a copy instruction, we find the physical registers the
/// source and destination registers might be mapped to. These are kept in
/// point-to maps used to determine future optimizations. e.g.
/// v1024 = mov r0
/// v1025 = mov r1
/// v1026 = add v1024, v1025
/// r1 = mov r1026
/// If 'add' is a two-address instruction, v1024, v1026 are both potentially
/// coalesced to r0 (from the input side). v1025 is mapped to r1. v1026 is
/// potentially joined with r1 on the output side. It's worthwhile to commute
/// 'add' to eliminate a copy.
void TwoAddressInstructionPass::processCopy(MachineInstr *MI) {
if (Processed.count(MI))
return;
bool IsSrcPhys, IsDstPhys;
unsigned SrcReg, DstReg;
if (!isCopyToReg(*MI, TII, SrcReg, DstReg, IsSrcPhys, IsDstPhys))
return;
if (IsDstPhys && !IsSrcPhys)
DstRegMap.insert(std::make_pair(SrcReg, DstReg));
else if (!IsDstPhys && IsSrcPhys) {
bool isNew = SrcRegMap.insert(std::make_pair(DstReg, SrcReg)).second;
if (!isNew)
assert(SrcRegMap[DstReg] == SrcReg &&
"Can't map to two src physical registers!");
scanUses(DstReg);
}
Processed.insert(MI);
return;
}
/// rescheduleMIBelowKill - If there is one more local instruction that reads
/// 'Reg' and it kills 'Reg, consider moving the instruction below the kill
/// instruction in order to eliminate the need for the copy.
bool TwoAddressInstructionPass::
rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned Reg) {
// Bail immediately if we don't have LV or LIS available. We use them to find
// kills efficiently.
if (!LV && !LIS)
return false;
MachineInstr *MI = &*mi;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
if (DI == DistanceMap.end())
// Must be created from unfolded load. Don't waste time trying this.
return false;
MachineInstr *KillMI = nullptr;
if (LIS) {
LiveInterval &LI = LIS->getInterval(Reg);
assert(LI.end() != LI.begin() &&
"Reg should not have empty live interval.");
SlotIndex MBBEndIdx = LIS->getMBBEndIdx(MBB).getPrevSlot();
LiveInterval::const_iterator I = LI.find(MBBEndIdx);
if (I != LI.end() && I->start < MBBEndIdx)
return false;
--I;
KillMI = LIS->getInstructionFromIndex(I->end);
} else {
KillMI = LV->getVarInfo(Reg).findKill(MBB);
}
if (!KillMI || MI == KillMI || KillMI->isCopy() || KillMI->isCopyLike())
// Don't mess with copies, they may be coalesced later.
return false;
if (KillMI->hasUnmodeledSideEffects() || KillMI->isCall() ||
KillMI->isBranch() || KillMI->isTerminator())
// Don't move pass calls, etc.
return false;
unsigned DstReg;
if (isTwoAddrUse(*KillMI, Reg, DstReg))
return false;
bool SeenStore = true;
if (!MI->isSafeToMove(AA, SeenStore))
return false;
if (TII->getInstrLatency(InstrItins, MI) > 1)
// FIXME: Needs more sophisticated heuristics.
return false;
SmallSet<unsigned, 2> Uses;
SmallSet<unsigned, 2> Kills;
SmallSet<unsigned, 2> Defs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isDef())
Defs.insert(MOReg);
else {
Uses.insert(MOReg);
if (MOReg != Reg && (MO.isKill() ||
(LIS && isPlainlyKilled(MI, MOReg, LIS))))
Kills.insert(MOReg);
}
}
// Move the copies connected to MI down as well.
MachineBasicBlock::iterator Begin = MI;
MachineBasicBlock::iterator AfterMI = std::next(Begin);
MachineBasicBlock::iterator End = AfterMI;
while (End->isCopy() && Defs.count(End->getOperand(1).getReg())) {
Defs.insert(End->getOperand(0).getReg());
++End;
}
// Check if the reschedule will not break depedencies.
unsigned NumVisited = 0;
MachineBasicBlock::iterator KillPos = KillMI;
++KillPos;
for (MachineBasicBlock::iterator I = End; I != KillPos; ++I) {
MachineInstr *OtherMI = I;
// DBG_VALUE cannot be counted against the limit.
if (OtherMI->isDebugValue())
continue;
if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost.
return false;
++NumVisited;
if (OtherMI->hasUnmodeledSideEffects() || OtherMI->isCall() ||
OtherMI->isBranch() || OtherMI->isTerminator())
// Don't move pass calls, etc.
return false;
for (unsigned i = 0, e = OtherMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = OtherMI->getOperand(i);
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isDef()) {
if (Uses.count(MOReg))
// Physical register use would be clobbered.
return false;
if (!MO.isDead() && Defs.count(MOReg))
// May clobber a physical register def.
// FIXME: This may be too conservative. It's ok if the instruction
// is sunken completely below the use.
return false;
} else {
if (Defs.count(MOReg))
return false;
bool isKill = MO.isKill() ||
(LIS && isPlainlyKilled(OtherMI, MOReg, LIS));
if (MOReg != Reg &&
((isKill && Uses.count(MOReg)) || Kills.count(MOReg)))
// Don't want to extend other live ranges and update kills.
return false;
if (MOReg == Reg && !isKill)
// We can't schedule across a use of the register in question.
return false;
// Ensure that if this is register in question, its the kill we expect.
assert((MOReg != Reg || OtherMI == KillMI) &&
"Found multiple kills of a register in a basic block");
}
}
}
// Move debug info as well.
while (Begin != MBB->begin() && std::prev(Begin)->isDebugValue())
--Begin;
nmi = End;
MachineBasicBlock::iterator InsertPos = KillPos;
if (LIS) {
// We have to move the copies first so that the MBB is still well-formed
// when calling handleMove().
for (MachineBasicBlock::iterator MBBI = AfterMI; MBBI != End;) {
MachineInstr *CopyMI = MBBI;
++MBBI;
MBB->splice(InsertPos, MBB, CopyMI);
LIS->handleMove(CopyMI);
InsertPos = CopyMI;
}
End = std::next(MachineBasicBlock::iterator(MI));
}
// Copies following MI may have been moved as well.
MBB->splice(InsertPos, MBB, Begin, End);
DistanceMap.erase(DI);
// Update live variables
if (LIS) {
LIS->handleMove(MI);
} else {
LV->removeVirtualRegisterKilled(Reg, KillMI);
LV->addVirtualRegisterKilled(Reg, MI);
}
DEBUG(dbgs() << "\trescheduled below kill: " << *KillMI);
return true;
}
/// isDefTooClose - Return true if the re-scheduling will put the given
/// instruction too close to the defs of its register dependencies.
bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist,
MachineInstr *MI) {
for (MachineInstr &DefMI : MRI->def_instructions(Reg)) {
if (DefMI.getParent() != MBB || DefMI.isCopy() || DefMI.isCopyLike())
continue;
if (&DefMI == MI)
return true; // MI is defining something KillMI uses
DenseMap<MachineInstr*, unsigned>::iterator DDI = DistanceMap.find(&DefMI);
if (DDI == DistanceMap.end())
return true; // Below MI
unsigned DefDist = DDI->second;
assert(Dist > DefDist && "Visited def already?");
if (TII->getInstrLatency(InstrItins, &DefMI) > (Dist - DefDist))
return true;
}
return false;
}
/// rescheduleKillAboveMI - If there is one more local instruction that reads
/// 'Reg' and it kills 'Reg, consider moving the kill instruction above the
/// current two-address instruction in order to eliminate the need for the
/// copy.
bool TwoAddressInstructionPass::
rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned Reg) {
// Bail immediately if we don't have LV or LIS available. We use them to find
// kills efficiently.
if (!LV && !LIS)
return false;
MachineInstr *MI = &*mi;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
if (DI == DistanceMap.end())
// Must be created from unfolded load. Don't waste time trying this.
return false;
MachineInstr *KillMI = nullptr;
if (LIS) {
LiveInterval &LI = LIS->getInterval(Reg);
assert(LI.end() != LI.begin() &&
"Reg should not have empty live interval.");
SlotIndex MBBEndIdx = LIS->getMBBEndIdx(MBB).getPrevSlot();
LiveInterval::const_iterator I = LI.find(MBBEndIdx);
if (I != LI.end() && I->start < MBBEndIdx)
return false;
--I;
KillMI = LIS->getInstructionFromIndex(I->end);
} else {
KillMI = LV->getVarInfo(Reg).findKill(MBB);
}
if (!KillMI || MI == KillMI || KillMI->isCopy() || KillMI->isCopyLike())
// Don't mess with copies, they may be coalesced later.
return false;
unsigned DstReg;
if (isTwoAddrUse(*KillMI, Reg, DstReg))
return false;
bool SeenStore = true;
if (!KillMI->isSafeToMove(AA, SeenStore))
return false;
SmallSet<unsigned, 2> Uses;
SmallSet<unsigned, 2> Kills;
SmallSet<unsigned, 2> Defs;
SmallSet<unsigned, 2> LiveDefs;
for (unsigned i = 0, e = KillMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = KillMI->getOperand(i);
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
if (MO.isUse()) {
if (!MOReg)
continue;
if (isDefTooClose(MOReg, DI->second, MI))
return false;
bool isKill = MO.isKill() || (LIS && isPlainlyKilled(KillMI, MOReg, LIS));
if (MOReg == Reg && !isKill)
return false;
Uses.insert(MOReg);
if (isKill && MOReg != Reg)
Kills.insert(MOReg);
} else if (TargetRegisterInfo::isPhysicalRegister(MOReg)) {
Defs.insert(MOReg);
if (!MO.isDead())
LiveDefs.insert(MOReg);
}
}
// Check if the reschedule will not break depedencies.
unsigned NumVisited = 0;
MachineBasicBlock::iterator KillPos = KillMI;
for (MachineBasicBlock::iterator I = mi; I != KillPos; ++I) {
MachineInstr *OtherMI = I;
// DBG_VALUE cannot be counted against the limit.
if (OtherMI->isDebugValue())
continue;
if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost.
return false;
++NumVisited;
if (OtherMI->hasUnmodeledSideEffects() || OtherMI->isCall() ||
OtherMI->isBranch() || OtherMI->isTerminator())
// Don't move pass calls, etc.
return false;
SmallVector<unsigned, 2> OtherDefs;
for (unsigned i = 0, e = OtherMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = OtherMI->getOperand(i);
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isUse()) {
if (Defs.count(MOReg))
// Moving KillMI can clobber the physical register if the def has
// not been seen.
return false;
if (Kills.count(MOReg))
// Don't want to extend other live ranges and update kills.
return false;
if (OtherMI != MI && MOReg == Reg &&
!(MO.isKill() || (LIS && isPlainlyKilled(OtherMI, MOReg, LIS))))
// We can't schedule across a use of the register in question.
return false;
} else {
OtherDefs.push_back(MOReg);
}
}
for (unsigned i = 0, e = OtherDefs.size(); i != e; ++i) {
unsigned MOReg = OtherDefs[i];
if (Uses.count(MOReg))
return false;
if (TargetRegisterInfo::isPhysicalRegister(MOReg) &&
LiveDefs.count(MOReg))
return false;
// Physical register def is seen.
Defs.erase(MOReg);
}
}
// Move the old kill above MI, don't forget to move debug info as well.
MachineBasicBlock::iterator InsertPos = mi;
while (InsertPos != MBB->begin() && std::prev(InsertPos)->isDebugValue())
--InsertPos;
MachineBasicBlock::iterator From = KillMI;
MachineBasicBlock::iterator To = std::next(From);
while (std::prev(From)->isDebugValue())
--From;
MBB->splice(InsertPos, MBB, From, To);
nmi = std::prev(InsertPos); // Backtrack so we process the moved instr.
DistanceMap.erase(DI);
// Update live variables
if (LIS) {
LIS->handleMove(KillMI);
} else {
LV->removeVirtualRegisterKilled(Reg, KillMI);
LV->addVirtualRegisterKilled(Reg, MI);
}
DEBUG(dbgs() << "\trescheduled kill: " << *KillMI);
return true;
}
/// tryInstructionTransform - For the case where an instruction has a single
/// pair of tied register operands, attempt some transformations that may
/// either eliminate the tied operands or improve the opportunities for
/// coalescing away the register copy. Returns true if no copy needs to be
/// inserted to untie mi's operands (either because they were untied, or
/// because mi was rescheduled, and will be visited again later). If the
/// shouldOnlyCommute flag is true, only instruction commutation is attempted.
bool TwoAddressInstructionPass::
tryInstructionTransform(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned SrcIdx, unsigned DstIdx,
unsigned Dist, bool shouldOnlyCommute) {
if (OptLevel == CodeGenOpt::None)
return false;
MachineInstr &MI = *mi;
unsigned regA = MI.getOperand(DstIdx).getReg();
unsigned regB = MI.getOperand(SrcIdx).getReg();
assert(TargetRegisterInfo::isVirtualRegister(regB) &&
"cannot make instruction into two-address form");
bool regBKilled = isKilled(MI, regB, MRI, TII, LIS, true);
if (TargetRegisterInfo::isVirtualRegister(regA))
scanUses(regA);
// Check if it is profitable to commute the operands.
unsigned SrcOp1, SrcOp2;
unsigned regC = 0;
unsigned regCIdx = ~0U;
bool TryCommute = false;
bool AggressiveCommute = false;
if (MI.isCommutable() && MI.getNumOperands() >= 3 &&
TII->findCommutedOpIndices(&MI, SrcOp1, SrcOp2)) {
if (SrcIdx == SrcOp1)
regCIdx = SrcOp2;
else if (SrcIdx == SrcOp2)
regCIdx = SrcOp1;
if (regCIdx != ~0U) {
regC = MI.getOperand(regCIdx).getReg();
if (!regBKilled && isKilled(MI, regC, MRI, TII, LIS, false))
// If C dies but B does not, swap the B and C operands.
// This makes the live ranges of A and C joinable.
TryCommute = true;
else if (isProfitableToCommute(regA, regB, regC, &MI, Dist)) {
TryCommute = true;
AggressiveCommute = true;
}
}
}
// If the instruction is convertible to 3 Addr, instead
// of returning try 3 Addr transformation aggresively and
// use this variable to check later. Because it might be better.
// For example, we can just use `leal (%rsi,%rdi), %eax` and `ret`
// instead of the following code.
// addl %esi, %edi
// movl %edi, %eax
// ret
bool Commuted = false;
// If it's profitable to commute, try to do so.
if (TryCommute && commuteInstruction(mi, regB, regC, Dist)) {
Commuted = true;
++NumCommuted;
if (AggressiveCommute)
++NumAggrCommuted;
if (!MI.isConvertibleTo3Addr())
return false;
}
if (shouldOnlyCommute)
return false;
// If there is one more use of regB later in the same MBB, consider
// re-schedule this MI below it.
if (!Commuted && EnableRescheduling && rescheduleMIBelowKill(mi, nmi, regB)) {
++NumReSchedDowns;
return true;
}
if (MI.isConvertibleTo3Addr()) {
// This instruction is potentially convertible to a true
// three-address instruction. Check if it is profitable.
if (!regBKilled || isProfitableToConv3Addr(regA, regB)) {
// Try to convert it.
if (convertInstTo3Addr(mi, nmi, regA, regB, Dist)) {
++NumConvertedTo3Addr;
return true; // Done with this instruction.
}
}
}
// Return if it is commuted but 3 addr conversion is failed.
if (Commuted)
return false;
// If there is one more use of regB later in the same MBB, consider
// re-schedule it before this MI if it's legal.
if (EnableRescheduling && rescheduleKillAboveMI(mi, nmi, regB)) {
++NumReSchedUps;
return true;
}
// If this is an instruction with a load folded into it, try unfolding
// the load, e.g. avoid this:
// movq %rdx, %rcx
// addq (%rax), %rcx
// in favor of this:
// movq (%rax), %rcx
// addq %rdx, %rcx
// because it's preferable to schedule a load than a register copy.
if (MI.mayLoad() && !regBKilled) {
// Determine if a load can be unfolded.
unsigned LoadRegIndex;
unsigned NewOpc =
TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
/*UnfoldLoad=*/true,
/*UnfoldStore=*/false,
&LoadRegIndex);
if (NewOpc != 0) {
const MCInstrDesc &UnfoldMCID = TII->get(NewOpc);
if (UnfoldMCID.getNumDefs() == 1) {
// Unfold the load.
DEBUG(dbgs() << "2addr: UNFOLDING: " << MI);
const TargetRegisterClass *RC =
TRI->getAllocatableClass(
TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI, *MF));
unsigned Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
if (!TII->unfoldMemoryOperand(*MF, &MI, Reg,
/*UnfoldLoad=*/true,/*UnfoldStore=*/false,
NewMIs)) {
DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
return false;
}
assert(NewMIs.size() == 2 &&
"Unfolded a load into multiple instructions!");
// The load was previously folded, so this is the only use.
NewMIs[1]->addRegisterKilled(Reg, TRI);
// Tentatively insert the instructions into the block so that they
// look "normal" to the transformation logic.
MBB->insert(mi, NewMIs[0]);
MBB->insert(mi, NewMIs[1]);
DEBUG(dbgs() << "2addr: NEW LOAD: " << *NewMIs[0]
<< "2addr: NEW INST: " << *NewMIs[1]);
// Transform the instruction, now that it no longer has a load.
unsigned NewDstIdx = NewMIs[1]->findRegisterDefOperandIdx(regA);
unsigned NewSrcIdx = NewMIs[1]->findRegisterUseOperandIdx(regB);
MachineBasicBlock::iterator NewMI = NewMIs[1];
bool TransformResult =
tryInstructionTransform(NewMI, mi, NewSrcIdx, NewDstIdx, Dist, true);
(void)TransformResult;
assert(!TransformResult &&
"tryInstructionTransform() should return false.");
if (NewMIs[1]->getOperand(NewSrcIdx).isKill()) {
// Success, or at least we made an improvement. Keep the unfolded
// instructions and discard the original.
if (LV) {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isUse()) {
if (MO.isKill()) {
if (NewMIs[0]->killsRegister(MO.getReg()))
LV->replaceKillInstruction(MO.getReg(), &MI, NewMIs[0]);
else {
assert(NewMIs[1]->killsRegister(MO.getReg()) &&
"Kill missing after load unfold!");
LV->replaceKillInstruction(MO.getReg(), &MI, NewMIs[1]);
}
}
} else if (LV->removeVirtualRegisterDead(MO.getReg(), &MI)) {
if (NewMIs[1]->registerDefIsDead(MO.getReg()))
LV->addVirtualRegisterDead(MO.getReg(), NewMIs[1]);
else {
assert(NewMIs[0]->registerDefIsDead(MO.getReg()) &&
"Dead flag missing after load unfold!");
LV->addVirtualRegisterDead(MO.getReg(), NewMIs[0]);
}
}
}
}
LV->addVirtualRegisterKilled(Reg, NewMIs[1]);
}
SmallVector<unsigned, 4> OrigRegs;
if (LIS) {
for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
MOE = MI.operands_end(); MOI != MOE; ++MOI) {
if (MOI->isReg())
OrigRegs.push_back(MOI->getReg());
}
}
MI.eraseFromParent();
// Update LiveIntervals.
if (LIS) {
MachineBasicBlock::iterator Begin(NewMIs[0]);
MachineBasicBlock::iterator End(NewMIs[1]);
LIS->repairIntervalsInRange(MBB, Begin, End, OrigRegs);
}
mi = NewMIs[1];
} else {
// Transforming didn't eliminate the tie and didn't lead to an
// improvement. Clean up the unfolded instructions and keep the
// original.
DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
NewMIs[0]->eraseFromParent();
NewMIs[1]->eraseFromParent();
}
}
}
}
return false;
}
// Collect tied operands of MI that need to be handled.
// Rewrite trivial cases immediately.
// Return true if any tied operands where found, including the trivial ones.
bool TwoAddressInstructionPass::
collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
const MCInstrDesc &MCID = MI->getDesc();
bool AnyOps = false;
unsigned NumOps = MI->getNumOperands();
for (unsigned SrcIdx = 0; SrcIdx < NumOps; ++SrcIdx) {
unsigned DstIdx = 0;
if (!MI->isRegTiedToDefOperand(SrcIdx, &DstIdx))
continue;
AnyOps = true;
MachineOperand &SrcMO = MI->getOperand(SrcIdx);
MachineOperand &DstMO = MI->getOperand(DstIdx);
unsigned SrcReg = SrcMO.getReg();
unsigned DstReg = DstMO.getReg();
// Tied constraint already satisfied?
if (SrcReg == DstReg)
continue;
assert(SrcReg && SrcMO.isUse() && "two address instruction invalid");
// Deal with <undef> uses immediately - simply rewrite the src operand.
if (SrcMO.isUndef() && !DstMO.getSubReg()) {
// Constrain the DstReg register class if required.
if (TargetRegisterInfo::isVirtualRegister(DstReg))
if (const TargetRegisterClass *RC = TII->getRegClass(MCID, SrcIdx,
TRI, *MF))
MRI->constrainRegClass(DstReg, RC);
SrcMO.setReg(DstReg);
SrcMO.setSubReg(0);
DEBUG(dbgs() << "\t\trewrite undef:\t" << *MI);
continue;
}
TiedOperands[SrcReg].push_back(std::make_pair(SrcIdx, DstIdx));
}
return AnyOps;
}
// Process a list of tied MI operands that all use the same source register.
// The tied pairs are of the form (SrcIdx, DstIdx).
void
TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
TiedPairList &TiedPairs,
unsigned &Dist) {
bool IsEarlyClobber = false;
for (unsigned tpi = 0, tpe = TiedPairs.size(); tpi != tpe; ++tpi) {
const MachineOperand &DstMO = MI->getOperand(TiedPairs[tpi].second);
IsEarlyClobber |= DstMO.isEarlyClobber();
}
bool RemovedKillFlag = false;
bool AllUsesCopied = true;
unsigned LastCopiedReg = 0;
SlotIndex LastCopyIdx;
unsigned RegB = 0;
unsigned SubRegB = 0;
for (unsigned tpi = 0, tpe = TiedPairs.size(); tpi != tpe; ++tpi) {
unsigned SrcIdx = TiedPairs[tpi].first;
unsigned DstIdx = TiedPairs[tpi].second;
const MachineOperand &DstMO = MI->getOperand(DstIdx);
unsigned RegA = DstMO.getReg();
// Grab RegB from the instruction because it may have changed if the
// instruction was commuted.
RegB = MI->getOperand(SrcIdx).getReg();
SubRegB = MI->getOperand(SrcIdx).getSubReg();
if (RegA == RegB) {
// The register is tied to multiple destinations (or else we would
// not have continued this far), but this use of the register
// already matches the tied destination. Leave it.
AllUsesCopied = false;
continue;
}
LastCopiedReg = RegA;
assert(TargetRegisterInfo::isVirtualRegister(RegB) &&
"cannot make instruction into two-address form");
#ifndef NDEBUG
// First, verify that we don't have a use of "a" in the instruction
// (a = b + a for example) because our transformation will not
// work. This should never occur because we are in SSA form.
for (unsigned i = 0; i != MI->getNumOperands(); ++i)
assert(i == DstIdx ||
!MI->getOperand(i).isReg() ||
MI->getOperand(i).getReg() != RegA);
#endif
// Emit a copy.
MachineInstrBuilder MIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(TargetOpcode::COPY), RegA);
// If this operand is folding a truncation, the truncation now moves to the
// copy so that the register classes remain valid for the operands.
MIB.addReg(RegB, 0, SubRegB);
const TargetRegisterClass *RC = MRI->getRegClass(RegB);
if (SubRegB) {
if (TargetRegisterInfo::isVirtualRegister(RegA)) {
assert(TRI->getMatchingSuperRegClass(RC, MRI->getRegClass(RegA),
SubRegB) &&
"tied subregister must be a truncation");
// The superreg class will not be used to constrain the subreg class.
RC = nullptr;
}
else {
assert(TRI->getMatchingSuperReg(RegA, SubRegB, MRI->getRegClass(RegB))
&& "tied subregister must be a truncation");
}
}
// Update DistanceMap.
MachineBasicBlock::iterator PrevMI = MI;
--PrevMI;
DistanceMap.insert(std::make_pair(PrevMI, Dist));
DistanceMap[MI] = ++Dist;
if (LIS) {
LastCopyIdx = LIS->InsertMachineInstrInMaps(PrevMI).getRegSlot();
if (TargetRegisterInfo::isVirtualRegister(RegA)) {
LiveInterval &LI = LIS->getInterval(RegA);
VNInfo *VNI = LI.getNextValue(LastCopyIdx, LIS->getVNInfoAllocator());
SlotIndex endIdx =
LIS->getInstructionIndex(MI).getRegSlot(IsEarlyClobber);
LI.addSegment(LiveInterval::Segment(LastCopyIdx, endIdx, VNI));
}
}
DEBUG(dbgs() << "\t\tprepend:\t" << *MIB);
MachineOperand &MO = MI->getOperand(SrcIdx);
assert(MO.isReg() && MO.getReg() == RegB && MO.isUse() &&
"inconsistent operand info for 2-reg pass");
if (MO.isKill()) {
MO.setIsKill(false);
RemovedKillFlag = true;
}
// Make sure regA is a legal regclass for the SrcIdx operand.
if (TargetRegisterInfo::isVirtualRegister(RegA) &&
TargetRegisterInfo::isVirtualRegister(RegB))
MRI->constrainRegClass(RegA, RC);
MO.setReg(RegA);
// The getMatchingSuper asserts guarantee that the register class projected
// by SubRegB is compatible with RegA with no subregister. So regardless of
// whether the dest oper writes a subreg, the source oper should not.
MO.setSubReg(0);
// Propagate SrcRegMap.
SrcRegMap[RegA] = RegB;
}
if (AllUsesCopied) {
if (!IsEarlyClobber) {
// Replace other (un-tied) uses of regB with LastCopiedReg.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() == RegB && MO.getSubReg() == SubRegB &&
MO.isUse()) {
if (MO.isKill()) {
MO.setIsKill(false);
RemovedKillFlag = true;
}
MO.setReg(LastCopiedReg);
MO.setSubReg(0);
}
}
}
// Update live variables for regB.
if (RemovedKillFlag && LV && LV->getVarInfo(RegB).removeKill(MI)) {
MachineBasicBlock::iterator PrevMI = MI;
--PrevMI;
LV->addVirtualRegisterKilled(RegB, PrevMI);
}
// Update LiveIntervals.
if (LIS) {
LiveInterval &LI = LIS->getInterval(RegB);
SlotIndex MIIdx = LIS->getInstructionIndex(MI);
LiveInterval::const_iterator I = LI.find(MIIdx);
assert(I != LI.end() && "RegB must be live-in to use.");
SlotIndex UseIdx = MIIdx.getRegSlot(IsEarlyClobber);
if (I->end == UseIdx)
LI.removeSegment(LastCopyIdx, UseIdx);
}
} else if (RemovedKillFlag) {
// Some tied uses of regB matched their destination registers, so
// regB is still used in this instruction, but a kill flag was
// removed from a different tied use of regB, so now we need to add
// a kill flag to one of the remaining uses of regB.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() == RegB && MO.isUse()) {
MO.setIsKill(true);
break;
}
}
}
}
/// runOnMachineFunction - Reduce two-address instructions to two operands.
///
bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
MF = &Func;
const TargetMachine &TM = MF->getTarget();
MRI = &MF->getRegInfo();
TII = MF->getSubtarget().getInstrInfo();
TRI = MF->getSubtarget().getRegisterInfo();
InstrItins = MF->getSubtarget().getInstrItineraryData();
LV = getAnalysisIfAvailable<LiveVariables>();
LIS = getAnalysisIfAvailable<LiveIntervals>();
AA = &getAnalysis<AliasAnalysis>();
OptLevel = TM.getOptLevel();
bool MadeChange = false;
DEBUG(dbgs() << "********** REWRITING TWO-ADDR INSTRS **********\n");
DEBUG(dbgs() << "********** Function: "
<< MF->getName() << '\n');
// This pass takes the function out of SSA form.
MRI->leaveSSA();
TiedOperandMap TiedOperands;
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
MBB = MBBI;
unsigned Dist = 0;
DistanceMap.clear();
SrcRegMap.clear();
DstRegMap.clear();
Processed.clear();
for (MachineBasicBlock::iterator mi = MBB->begin(), me = MBB->end();
mi != me; ) {
MachineBasicBlock::iterator nmi = std::next(mi);
if (mi->isDebugValue()) {
mi = nmi;
continue;
}
// Expand REG_SEQUENCE instructions. This will position mi at the first
// expanded instruction.
if (mi->isRegSequence())
eliminateRegSequence(mi);
DistanceMap.insert(std::make_pair(mi, ++Dist));
processCopy(&*mi);
// First scan through all the tied register uses in this instruction
// and record a list of pairs of tied operands for each register.
if (!collectTiedOperands(mi, TiedOperands)) {
mi = nmi;
continue;
}
++NumTwoAddressInstrs;
MadeChange = true;
DEBUG(dbgs() << '\t' << *mi);
// If the instruction has a single pair of tied operands, try some
// transformations that may either eliminate the tied operands or
// improve the opportunities for coalescing away the register copy.
if (TiedOperands.size() == 1) {
SmallVectorImpl<std::pair<unsigned, unsigned> > &TiedPairs
= TiedOperands.begin()->second;
if (TiedPairs.size() == 1) {
unsigned SrcIdx = TiedPairs[0].first;
unsigned DstIdx = TiedPairs[0].second;
unsigned SrcReg = mi->getOperand(SrcIdx).getReg();
unsigned DstReg = mi->getOperand(DstIdx).getReg();
if (SrcReg != DstReg &&
tryInstructionTransform(mi, nmi, SrcIdx, DstIdx, Dist, false)) {
// The tied operands have been eliminated or shifted further down the
// block to ease elimination. Continue processing with 'nmi'.
TiedOperands.clear();
mi = nmi;
continue;
}
}
}
// Now iterate over the information collected above.
for (TiedOperandMap::iterator OI = TiedOperands.begin(),
OE = TiedOperands.end(); OI != OE; ++OI) {
processTiedPairs(mi, OI->second, Dist);
DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
}
// Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
if (mi->isInsertSubreg()) {
// From %reg = INSERT_SUBREG %reg, %subreg, subidx
// To %reg:subidx = COPY %subreg
unsigned SubIdx = mi->getOperand(3).getImm();
mi->RemoveOperand(3);
assert(mi->getOperand(0).getSubReg() == 0 && "Unexpected subreg idx");
mi->getOperand(0).setSubReg(SubIdx);
mi->getOperand(0).setIsUndef(mi->getOperand(1).isUndef());
mi->RemoveOperand(1);
mi->setDesc(TII->get(TargetOpcode::COPY));
DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
}
// Clear TiedOperands here instead of at the top of the loop
// since most instructions do not have tied operands.
TiedOperands.clear();
mi = nmi;
}
}
if (LIS)
MF->verify(this, "After two-address instruction pass");
return MadeChange;
}
/// Eliminate a REG_SEQUENCE instruction as part of the de-ssa process.
///
/// The instruction is turned into a sequence of sub-register copies:
///
/// %dst = REG_SEQUENCE %v1, ssub0, %v2, ssub1
///
/// Becomes:
///
/// %dst:ssub0<def,undef> = COPY %v1
/// %dst:ssub1<def> = COPY %v2
///
void TwoAddressInstructionPass::
eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
MachineInstr *MI = MBBI;
unsigned DstReg = MI->getOperand(0).getReg();
if (MI->getOperand(0).getSubReg() ||
TargetRegisterInfo::isPhysicalRegister(DstReg) ||
!(MI->getNumOperands() & 1)) {
DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << *MI);
llvm_unreachable(nullptr);
}
SmallVector<unsigned, 4> OrigRegs;
if (LIS) {
OrigRegs.push_back(MI->getOperand(0).getReg());
for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2)
OrigRegs.push_back(MI->getOperand(i).getReg());
}
bool DefEmitted = false;
for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2) {
MachineOperand &UseMO = MI->getOperand(i);
unsigned SrcReg = UseMO.getReg();
unsigned SubIdx = MI->getOperand(i+1).getImm();
// Nothing needs to be inserted for <undef> operands.
if (UseMO.isUndef())
continue;
// Defer any kill flag to the last operand using SrcReg. Otherwise, we
// might insert a COPY that uses SrcReg after is was killed.
bool isKill = UseMO.isKill();
if (isKill)
for (unsigned j = i + 2; j < e; j += 2)
if (MI->getOperand(j).getReg() == SrcReg) {
MI->getOperand(j).setIsKill();
UseMO.setIsKill(false);
isKill = false;
break;
}
// Insert the sub-register copy.
MachineInstr *CopyMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(TargetOpcode::COPY))
.addReg(DstReg, RegState::Define, SubIdx)
.addOperand(UseMO);
// The first def needs an <undef> flag because there is no live register
// before it.
if (!DefEmitted) {
CopyMI->getOperand(0).setIsUndef(true);
// Return an iterator pointing to the first inserted instr.
MBBI = CopyMI;
}
DefEmitted = true;
// Update LiveVariables' kill info.
if (LV && isKill && !TargetRegisterInfo::isPhysicalRegister(SrcReg))
LV->replaceKillInstruction(SrcReg, MI, CopyMI);
DEBUG(dbgs() << "Inserted: " << *CopyMI);
}
MachineBasicBlock::iterator EndMBBI =
std::next(MachineBasicBlock::iterator(MI));
if (!DefEmitted) {
DEBUG(dbgs() << "Turned: " << *MI << " into an IMPLICIT_DEF");
MI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
MI->RemoveOperand(j);
} else {
DEBUG(dbgs() << "Eliminated: " << *MI);
MI->eraseFromParent();
}
// Udpate LiveIntervals.
if (LIS)
LIS->repairIntervalsInRange(MBB, MBBI, EndMBBI, OrigRegs);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/PseudoSourceValue.cpp | //===-- llvm/CodeGen/PseudoSourceValue.cpp ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the PseudoSourceValue class.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
using namespace llvm;
namespace {
struct PSVGlobalsTy {
// PseudoSourceValues are immutable so don't need locking.
const PseudoSourceValue PSVs[4];
sys::Mutex Lock; // Guards FSValues, but not the values inside it.
std::map<int, const PseudoSourceValue *> FSValues;
PSVGlobalsTy() : PSVs() {}
~PSVGlobalsTy() {
for (std::map<int, const PseudoSourceValue *>::iterator
I = FSValues.begin(), E = FSValues.end(); I != E; ++I) {
delete I->second;
}
}
};
static ManagedStatic<PSVGlobalsTy> PSVGlobals;
} // anonymous namespace
const PseudoSourceValue *PseudoSourceValue::getStack()
{ return &PSVGlobals->PSVs[0]; }
const PseudoSourceValue *PseudoSourceValue::getGOT()
{ return &PSVGlobals->PSVs[1]; }
const PseudoSourceValue *PseudoSourceValue::getJumpTable()
{ return &PSVGlobals->PSVs[2]; }
const PseudoSourceValue *PseudoSourceValue::getConstantPool()
{ return &PSVGlobals->PSVs[3]; }
static const char *const PSVNames[] = {
"Stack",
"GOT",
"JumpTable",
"ConstantPool"
};
PseudoSourceValue::PseudoSourceValue(bool isFixed) : isFixed(isFixed) {}
PseudoSourceValue::~PseudoSourceValue() {}
void PseudoSourceValue::printCustom(raw_ostream &O) const {
O << PSVNames[this - PSVGlobals->PSVs];
}
const PseudoSourceValue *PseudoSourceValue::getFixedStack(int FI) {
PSVGlobalsTy &PG = *PSVGlobals;
sys::ScopedLock locked(PG.Lock);
const PseudoSourceValue *&V = PG.FSValues[FI];
if (!V)
V = new FixedStackPseudoSourceValue(FI);
return V;
}
bool PseudoSourceValue::isConstant(const MachineFrameInfo *) const {
if (this == getStack())
return false;
if (this == getGOT() ||
this == getConstantPool() ||
this == getJumpTable())
return true;
llvm_unreachable("Unknown PseudoSourceValue!");
}
bool PseudoSourceValue::isAliased(const MachineFrameInfo *MFI) const {
if (this == getStack() ||
this == getGOT() ||
this == getConstantPool() ||
this == getJumpTable())
return false;
llvm_unreachable("Unknown PseudoSourceValue!");
}
bool PseudoSourceValue::mayAlias(const MachineFrameInfo *MFI) const {
if (this == getGOT() ||
this == getConstantPool() ||
this == getJumpTable())
return false;
return true;
}
bool FixedStackPseudoSourceValue::isConstant(const MachineFrameInfo *MFI) const{
return MFI && MFI->isImmutableObjectIndex(FI);
}
bool FixedStackPseudoSourceValue::isAliased(const MachineFrameInfo *MFI) const {
if (!MFI)
return true;
return MFI->isAliasedObjectIndex(FI);
}
bool FixedStackPseudoSourceValue::mayAlias(const MachineFrameInfo *MFI) const {
if (!MFI)
return true;
// Spill slots will not alias any LLVM IR value.
return !MFI->isSpillSlotObjectIndex(FI);
}
void FixedStackPseudoSourceValue::printCustom(raw_ostream &OS) const {
OS << "FixedStack" << FI;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineModuleInfo.cpp | //===-- llvm/CodeGen/MachineModuleInfo.cpp ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Analysis/LibCallSemantics.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
using namespace llvm::dwarf;
// Handle the Pass registration stuff necessary to use DataLayout's.
INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
"Machine Module Information", false, false)
char MachineModuleInfo::ID = 0;
// Out of line virtual method.
MachineModuleInfoImpl::~MachineModuleInfoImpl() {}
namespace llvm {
class MMIAddrLabelMapCallbackPtr : CallbackVH {
MMIAddrLabelMap *Map;
public:
MMIAddrLabelMapCallbackPtr() : Map(nullptr) {}
MMIAddrLabelMapCallbackPtr(Value *V) : CallbackVH(V), Map(nullptr) {}
void setPtr(BasicBlock *BB) {
ValueHandleBase::operator=(BB);
}
void setMap(MMIAddrLabelMap *map) { Map = map; }
void deleted() override;
void allUsesReplacedWith(Value *V2) override;
};
class MMIAddrLabelMap {
MCContext &Context;
struct AddrLabelSymEntry {
/// Symbols - The symbols for the label.
TinyPtrVector<MCSymbol *> Symbols;
Function *Fn; // The containing function of the BasicBlock.
unsigned Index; // The index in BBCallbacks for the BasicBlock.
};
DenseMap<AssertingVH<BasicBlock>, AddrLabelSymEntry> AddrLabelSymbols;
/// BBCallbacks - Callbacks for the BasicBlock's that we have entries for. We
/// use this so we get notified if a block is deleted or RAUWd.
std::vector<MMIAddrLabelMapCallbackPtr> BBCallbacks;
/// DeletedAddrLabelsNeedingEmission - This is a per-function list of symbols
/// whose corresponding BasicBlock got deleted. These symbols need to be
/// emitted at some point in the file, so AsmPrinter emits them after the
/// function body.
DenseMap<AssertingVH<Function>, std::vector<MCSymbol*> >
DeletedAddrLabelsNeedingEmission;
public:
MMIAddrLabelMap(MCContext &context) : Context(context) {}
~MMIAddrLabelMap() {
assert(DeletedAddrLabelsNeedingEmission.empty() &&
"Some labels for deleted blocks never got emitted");
}
ArrayRef<MCSymbol *> getAddrLabelSymbolToEmit(BasicBlock *BB);
void takeDeletedSymbolsForFunction(Function *F,
std::vector<MCSymbol*> &Result);
void UpdateForDeletedBlock(BasicBlock *BB);
void UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New);
};
}
ArrayRef<MCSymbol *> MMIAddrLabelMap::getAddrLabelSymbolToEmit(BasicBlock *BB) {
assert(BB->hasAddressTaken() &&
"Shouldn't get label for block without address taken");
AddrLabelSymEntry &Entry = AddrLabelSymbols[BB];
// If we already had an entry for this block, just return it.
if (!Entry.Symbols.empty()) {
assert(BB->getParent() == Entry.Fn && "Parent changed");
return Entry.Symbols;
}
// Otherwise, this is a new entry, create a new symbol for it and add an
// entry to BBCallbacks so we can be notified if the BB is deleted or RAUWd.
BBCallbacks.emplace_back(BB);
BBCallbacks.back().setMap(this);
Entry.Index = BBCallbacks.size() - 1;
Entry.Fn = BB->getParent();
Entry.Symbols.push_back(Context.createTempSymbol());
return Entry.Symbols;
}
/// takeDeletedSymbolsForFunction - If we have any deleted symbols for F, return
/// them.
void MMIAddrLabelMap::
takeDeletedSymbolsForFunction(Function *F, std::vector<MCSymbol*> &Result) {
DenseMap<AssertingVH<Function>, std::vector<MCSymbol*> >::iterator I =
DeletedAddrLabelsNeedingEmission.find(F);
// If there are no entries for the function, just return.
if (I == DeletedAddrLabelsNeedingEmission.end()) return;
// Otherwise, take the list.
std::swap(Result, I->second);
DeletedAddrLabelsNeedingEmission.erase(I);
}
void MMIAddrLabelMap::UpdateForDeletedBlock(BasicBlock *BB) {
// If the block got deleted, there is no need for the symbol. If the symbol
// was already emitted, we can just forget about it, otherwise we need to
// queue it up for later emission when the function is output.
AddrLabelSymEntry Entry = std::move(AddrLabelSymbols[BB]);
AddrLabelSymbols.erase(BB);
assert(!Entry.Symbols.empty() && "Didn't have a symbol, why a callback?");
BBCallbacks[Entry.Index] = nullptr; // Clear the callback.
assert((BB->getParent() == nullptr || BB->getParent() == Entry.Fn) &&
"Block/parent mismatch");
for (MCSymbol *Sym : Entry.Symbols) {
if (Sym->isDefined())
return;
// If the block is not yet defined, we need to emit it at the end of the
// function. Add the symbol to the DeletedAddrLabelsNeedingEmission list
// for the containing Function. Since the block is being deleted, its
// parent may already be removed, we have to get the function from 'Entry'.
DeletedAddrLabelsNeedingEmission[Entry.Fn].push_back(Sym);
}
}
void MMIAddrLabelMap::UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New) {
// Get the entry for the RAUW'd block and remove it from our map.
AddrLabelSymEntry OldEntry = std::move(AddrLabelSymbols[Old]);
AddrLabelSymbols.erase(Old);
assert(!OldEntry.Symbols.empty() && "Didn't have a symbol, why a callback?");
AddrLabelSymEntry &NewEntry = AddrLabelSymbols[New];
// If New is not address taken, just move our symbol over to it.
if (NewEntry.Symbols.empty()) {
BBCallbacks[OldEntry.Index].setPtr(New); // Update the callback.
NewEntry = std::move(OldEntry); // Set New's entry.
return;
}
BBCallbacks[OldEntry.Index] = nullptr; // Update the callback.
// Otherwise, we need to add the old symbols to the new block's set.
NewEntry.Symbols.insert(NewEntry.Symbols.end(), OldEntry.Symbols.begin(),
OldEntry.Symbols.end());
}
void MMIAddrLabelMapCallbackPtr::deleted() {
Map->UpdateForDeletedBlock(cast<BasicBlock>(getValPtr()));
}
void MMIAddrLabelMapCallbackPtr::allUsesReplacedWith(Value *V2) {
Map->UpdateForRAUWBlock(cast<BasicBlock>(getValPtr()), cast<BasicBlock>(V2));
}
//===----------------------------------------------------------------------===//
MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI,
const MCRegisterInfo &MRI,
const MCObjectFileInfo *MOFI)
: ImmutablePass(ID), Context(&MAI, &MRI, MOFI, nullptr, false) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
MachineModuleInfo::MachineModuleInfo()
: ImmutablePass(ID), Context(nullptr, nullptr, nullptr) {
llvm_unreachable("This MachineModuleInfo constructor should never be called, "
"MMI should always be explicitly constructed by "
"LLVMTargetMachine");
}
MachineModuleInfo::~MachineModuleInfo() {
}
bool MachineModuleInfo::doInitialization(Module &M) {
ObjFileMMI = nullptr;
CurCallSite = 0;
CallsEHReturn = false;
CallsUnwindInit = false;
DbgInfoAvailable = UsesVAFloatArgument = UsesMorestackAddr = false;
// Always emit some info, by default "no personality" info.
Personalities.push_back(nullptr);
PersonalityTypeCache = EHPersonality::Unknown;
AddrLabelSymbols = nullptr;
TheModule = nullptr;
return false;
}
bool MachineModuleInfo::doFinalization(Module &M) {
Personalities.clear();
delete AddrLabelSymbols;
AddrLabelSymbols = nullptr;
Context.reset();
delete ObjFileMMI;
ObjFileMMI = nullptr;
return false;
}
/// EndFunction - Discard function meta information.
///
void MachineModuleInfo::EndFunction() {
// Clean up frame info.
FrameInstructions.clear();
// Clean up exception info.
LandingPads.clear();
PersonalityTypeCache = EHPersonality::Unknown;
CallSiteMap.clear();
TypeInfos.clear();
FilterIds.clear();
FilterEnds.clear();
CallsEHReturn = false;
CallsUnwindInit = false;
VariableDbgInfos.clear();
}
//===- Address of Block Management ----------------------------------------===//
/// getAddrLabelSymbolToEmit - Return the symbol to be used for the specified
/// basic block when its address is taken. If other blocks were RAUW'd to
/// this one, we may have to emit them as well, return the whole set.
ArrayRef<MCSymbol *>
MachineModuleInfo::getAddrLabelSymbolToEmit(const BasicBlock *BB) {
// Lazily create AddrLabelSymbols.
if (!AddrLabelSymbols)
AddrLabelSymbols = new MMIAddrLabelMap(Context);
return AddrLabelSymbols->getAddrLabelSymbolToEmit(const_cast<BasicBlock*>(BB));
}
/// takeDeletedSymbolsForFunction - If the specified function has had any
/// references to address-taken blocks generated, but the block got deleted,
/// return the symbol now so we can emit it. This prevents emitting a
/// reference to a symbol that has no definition.
void MachineModuleInfo::
takeDeletedSymbolsForFunction(const Function *F,
std::vector<MCSymbol*> &Result) {
// If no blocks have had their addresses taken, we're done.
if (!AddrLabelSymbols) return;
return AddrLabelSymbols->
takeDeletedSymbolsForFunction(const_cast<Function*>(F), Result);
}
//===- EH -----------------------------------------------------------------===//
/// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
/// specified MachineBasicBlock.
LandingPadInfo &MachineModuleInfo::getOrCreateLandingPadInfo
(MachineBasicBlock *LandingPad) {
unsigned N = LandingPads.size();
for (unsigned i = 0; i < N; ++i) {
LandingPadInfo &LP = LandingPads[i];
if (LP.LandingPadBlock == LandingPad)
return LP;
}
LandingPads.push_back(LandingPadInfo(LandingPad));
return LandingPads[N];
}
/// addInvoke - Provide the begin and end labels of an invoke style call and
/// associate it with a try landing pad block.
void MachineModuleInfo::addInvoke(MachineBasicBlock *LandingPad,
MCSymbol *BeginLabel, MCSymbol *EndLabel) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.BeginLabels.push_back(BeginLabel);
LP.EndLabels.push_back(EndLabel);
}
/// addLandingPad - Provide the label of a try LandingPad block.
///
MCSymbol *MachineModuleInfo::addLandingPad(MachineBasicBlock *LandingPad) {
MCSymbol *LandingPadLabel = Context.createTempSymbol();
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.LandingPadLabel = LandingPadLabel;
return LandingPadLabel;
}
/// addPersonality - Provide the personality function for the exception
/// information.
void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
const Function *Personality) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.Personality = Personality;
addPersonality(Personality);
}
void MachineModuleInfo::addPersonality(const Function *Personality) {
for (unsigned i = 0; i < Personalities.size(); ++i)
if (Personalities[i] == Personality)
return;
// If this is the first personality we're adding go
// ahead and add it at the beginning.
if (!Personalities[0])
Personalities[0] = Personality;
else
Personalities.push_back(Personality);
}
void MachineModuleInfo::addWinEHState(MachineBasicBlock *LandingPad,
int State) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.WinEHState = State;
}
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
///
void MachineModuleInfo::
addCatchTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
for (unsigned N = TyInfo.size(); N; --N)
LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
}
/// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
///
void MachineModuleInfo::
addFilterTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
std::vector<unsigned> IdsInFilter(TyInfo.size());
for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
IdsInFilter[I] = getTypeIDFor(TyInfo[I]);
LP.TypeIds.push_back(getFilterIDFor(IdsInFilter));
}
/// addCleanup - Add a cleanup action for a landing pad.
///
void MachineModuleInfo::addCleanup(MachineBasicBlock *LandingPad) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.TypeIds.push_back(0);
}
void MachineModuleInfo::addSEHCatchHandler(MachineBasicBlock *LandingPad,
const Function *Filter,
const BlockAddress *RecoverBA) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
SEHHandler Handler;
Handler.FilterOrFinally = Filter;
Handler.RecoverBA = RecoverBA;
LP.SEHHandlers.push_back(Handler);
}
void MachineModuleInfo::addSEHCleanupHandler(MachineBasicBlock *LandingPad,
const Function *Cleanup) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
SEHHandler Handler;
Handler.FilterOrFinally = Cleanup;
Handler.RecoverBA = nullptr;
LP.SEHHandlers.push_back(Handler);
}
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
void MachineModuleInfo::TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap) {
for (unsigned i = 0; i != LandingPads.size(); ) {
LandingPadInfo &LandingPad = LandingPads[i];
if (LandingPad.LandingPadLabel &&
!LandingPad.LandingPadLabel->isDefined() &&
(!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
LandingPad.LandingPadLabel = nullptr;
// Special case: we *should* emit LPs with null LP MBB. This indicates
// "nounwind" case.
if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) {
LandingPads.erase(LandingPads.begin() + i);
continue;
}
for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
MCSymbol *EndLabel = LandingPad.EndLabels[j];
if ((BeginLabel->isDefined() ||
(LPMap && (*LPMap)[BeginLabel] != 0)) &&
(EndLabel->isDefined() ||
(LPMap && (*LPMap)[EndLabel] != 0))) continue;
LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
--j, --e;
}
// Remove landing pads with no try-ranges.
if (LandingPads[i].BeginLabels.empty()) {
LandingPads.erase(LandingPads.begin() + i);
continue;
}
// If there is no landing pad, ensure that the list of typeids is empty.
// If the only typeid is a cleanup, this is the same as having no typeids.
if (!LandingPad.LandingPadBlock ||
(LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
LandingPad.TypeIds.clear();
++i;
}
}
/// setCallSiteLandingPad - Map the landing pad's EH symbol to the call site
/// indexes.
void MachineModuleInfo::setCallSiteLandingPad(MCSymbol *Sym,
ArrayRef<unsigned> Sites) {
LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
}
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
unsigned MachineModuleInfo::getTypeIDFor(const GlobalValue *TI) {
for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
if (TypeInfos[i] == TI) return i + 1;
TypeInfos.push_back(TI);
return TypeInfos.size();
}
/// getFilterIDFor - Return the filter id for the specified typeinfos. This is
/// function wide.
int MachineModuleInfo::getFilterIDFor(std::vector<unsigned> &TyIds) {
// If the new filter coincides with the tail of an existing filter, then
// re-use the existing filter. Folding filters more than this requires
// re-ordering filters and/or their elements - probably not worth it.
for (std::vector<unsigned>::iterator I = FilterEnds.begin(),
E = FilterEnds.end(); I != E; ++I) {
unsigned i = *I, j = TyIds.size();
while (i && j)
if (FilterIds[--i] != TyIds[--j])
goto try_next;
if (!j)
// The new filter coincides with range [i, end) of the existing filter.
return -(1 + i);
try_next:;
}
// Add the new filter.
int FilterID = -(1 + FilterIds.size());
FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
FilterIds.insert(FilterIds.end(), TyIds.begin(), TyIds.end());
FilterEnds.push_back(FilterIds.size());
FilterIds.push_back(0); // terminator
return FilterID;
}
/// getPersonality - Return the personality function for the current function.
const Function *MachineModuleInfo::getPersonality() const {
for (const LandingPadInfo &LPI : LandingPads)
if (LPI.Personality)
return LPI.Personality;
return nullptr;
}
EHPersonality MachineModuleInfo::getPersonalityType() {
if (PersonalityTypeCache == EHPersonality::Unknown) {
if (const Function *F = getPersonality())
PersonalityTypeCache = classifyEHPersonality(F);
}
return PersonalityTypeCache;
}
/// getPersonalityIndex - Return unique index for current personality
/// function. NULL/first personality function should always get zero index.
unsigned MachineModuleInfo::getPersonalityIndex() const {
const Function* Personality = nullptr;
// Scan landing pads. If there is at least one non-NULL personality - use it.
for (unsigned i = 0, e = LandingPads.size(); i != e; ++i)
if (LandingPads[i].Personality) {
Personality = LandingPads[i].Personality;
break;
}
for (unsigned i = 0, e = Personalities.size(); i < e; ++i) {
if (Personalities[i] == Personality)
return i;
}
// This will happen if the current personality function is
// in the zero index.
return 0;
}
const Function *MachineModuleInfo::getWinEHParent(const Function *F) const {
StringRef WinEHParentName =
F->getFnAttribute("wineh-parent").getValueAsString();
if (WinEHParentName.empty() || WinEHParentName == F->getName())
return F;
return F->getParent()->getFunction(WinEHParentName);
}
WinEHFuncInfo &MachineModuleInfo::getWinEHFuncInfo(const Function *F) {
auto &Ptr = FuncInfoMap[getWinEHParent(F)];
if (!Ptr)
Ptr.reset(new WinEHFuncInfo);
return *Ptr;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachinePassRegistry.cpp | //===-- CodeGen/MachineInstr.cpp ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the machine function pass registry for register allocators
// and instruction schedulers.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachinePassRegistry.h"
using namespace llvm;
void MachinePassRegistryListener::anchor() { }
/// setDefault - Set the default constructor by name.
void MachinePassRegistry::setDefault(StringRef Name) {
MachinePassCtor Ctor = nullptr;
for(MachinePassRegistryNode *R = getList(); R; R = R->getNext()) {
if (R->getName() == Name) {
Ctor = R->getCtor();
break;
}
}
assert(Ctor && "Unregistered pass name");
setDefault(Ctor);
}
/// Add - Adds a function pass to the registration list.
///
void MachinePassRegistry::Add(MachinePassRegistryNode *Node) {
Node->setNext(List);
List = Node;
if (Listener) Listener->NotifyAdd(Node->getName(),
Node->getCtor(),
Node->getDescription());
}
/// Remove - Removes a function pass from the registration list.
///
void MachinePassRegistry::Remove(MachinePassRegistryNode *Node) {
for (MachinePassRegistryNode **I = &List; *I; I = (*I)->getNextAddress()) {
if (*I == Node) {
if (Listener) Listener->NotifyRemove(Node->getName());
*I = (*I)->getNext();
break;
}
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineRegisterInfo.cpp | //===-- lib/Codegen/MachineRegisterInfo.cpp -------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of the MachineRegisterInfo class.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
// Pin the vtable to this file.
void MachineRegisterInfo::Delegate::anchor() {}
MachineRegisterInfo::MachineRegisterInfo(const MachineFunction *MF)
: MF(MF), TheDelegate(nullptr), IsSSA(true), TracksLiveness(true),
TracksSubRegLiveness(false) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
UsedRegUnits.resize(getTargetRegisterInfo()->getNumRegUnits());
UsedPhysRegMask.resize(getTargetRegisterInfo()->getNumRegs());
// Create the physreg use/def lists.
PhysRegUseDefLists.resize(getTargetRegisterInfo()->getNumRegs(), nullptr);
}
/// setRegClass - Set the register class of the specified virtual register.
///
void
MachineRegisterInfo::setRegClass(unsigned Reg, const TargetRegisterClass *RC) {
assert(RC && RC->isAllocatable() && "Invalid RC for virtual register");
VRegInfo[Reg].first = RC;
}
const TargetRegisterClass *
MachineRegisterInfo::constrainRegClass(unsigned Reg,
const TargetRegisterClass *RC,
unsigned MinNumRegs) {
const TargetRegisterClass *OldRC = getRegClass(Reg);
if (OldRC == RC)
return RC;
const TargetRegisterClass *NewRC =
getTargetRegisterInfo()->getCommonSubClass(OldRC, RC);
if (!NewRC || NewRC == OldRC)
return NewRC;
if (NewRC->getNumRegs() < MinNumRegs)
return nullptr;
setRegClass(Reg, NewRC);
return NewRC;
}
bool
MachineRegisterInfo::recomputeRegClass(unsigned Reg) {
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
const TargetRegisterClass *OldRC = getRegClass(Reg);
const TargetRegisterClass *NewRC =
getTargetRegisterInfo()->getLargestLegalSuperClass(OldRC, *MF);
// Stop early if there is no room to grow.
if (NewRC == OldRC)
return false;
// Accumulate constraints from all uses.
for (MachineOperand &MO : reg_nodbg_operands(Reg)) {
// Apply the effect of the given operand to NewRC.
MachineInstr *MI = MO.getParent();
unsigned OpNo = &MO - &MI->getOperand(0);
NewRC = MI->getRegClassConstraintEffect(OpNo, NewRC, TII,
getTargetRegisterInfo());
if (!NewRC || NewRC == OldRC)
return false;
}
setRegClass(Reg, NewRC);
return true;
}
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
///
unsigned
MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){
assert(RegClass && "Cannot create register without RegClass!");
assert(RegClass->isAllocatable() &&
"Virtual register RegClass must be allocatable.");
// New virtual register number.
unsigned Reg = TargetRegisterInfo::index2VirtReg(getNumVirtRegs());
VRegInfo.grow(Reg);
VRegInfo[Reg].first = RegClass;
RegAllocHints.grow(Reg);
if (TheDelegate)
TheDelegate->MRI_NoteNewVirtualRegister(Reg);
return Reg;
}
/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
void MachineRegisterInfo::clearVirtRegs() {
#ifndef NDEBUG
for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (!VRegInfo[Reg].second)
continue;
verifyUseList(Reg);
llvm_unreachable("Remaining virtual register operands");
}
#endif
VRegInfo.clear();
}
void MachineRegisterInfo::verifyUseList(unsigned Reg) const {
#ifndef NDEBUG
bool Valid = true;
for (MachineOperand &M : reg_operands(Reg)) {
MachineOperand *MO = &M;
MachineInstr *MI = MO->getParent();
if (!MI) {
errs() << PrintReg(Reg, getTargetRegisterInfo())
<< " use list MachineOperand " << MO
<< " has no parent instruction.\n";
Valid = false;
continue;
}
MachineOperand *MO0 = &MI->getOperand(0);
unsigned NumOps = MI->getNumOperands();
if (!(MO >= MO0 && MO < MO0+NumOps)) {
errs() << PrintReg(Reg, getTargetRegisterInfo())
<< " use list MachineOperand " << MO
<< " doesn't belong to parent MI: " << *MI;
Valid = false;
}
if (!MO->isReg()) {
errs() << PrintReg(Reg, getTargetRegisterInfo())
<< " MachineOperand " << MO << ": " << *MO
<< " is not a register\n";
Valid = false;
}
if (MO->getReg() != Reg) {
errs() << PrintReg(Reg, getTargetRegisterInfo())
<< " use-list MachineOperand " << MO << ": "
<< *MO << " is the wrong register\n";
Valid = false;
}
}
assert(Valid && "Invalid use list");
#endif
}
void MachineRegisterInfo::verifyUseLists() const {
#ifndef NDEBUG
for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i)
verifyUseList(TargetRegisterInfo::index2VirtReg(i));
for (unsigned i = 1, e = getTargetRegisterInfo()->getNumRegs(); i != e; ++i)
verifyUseList(i);
#endif
}
/// Add MO to the linked list of operands for its register.
void MachineRegisterInfo::addRegOperandToUseList(MachineOperand *MO) {
assert(!MO->isOnRegUseList() && "Already on list");
MachineOperand *&HeadRef = getRegUseDefListHead(MO->getReg());
MachineOperand *const Head = HeadRef;
// Head points to the first list element.
// Next is NULL on the last list element.
// Prev pointers are circular, so Head->Prev == Last.
// Head is NULL for an empty list.
if (!Head) {
MO->Contents.Reg.Prev = MO;
MO->Contents.Reg.Next = nullptr;
HeadRef = MO;
return;
}
assert(MO->getReg() == Head->getReg() && "Different regs on the same list!");
// Insert MO between Last and Head in the circular Prev chain.
MachineOperand *Last = Head->Contents.Reg.Prev;
assert(Last && "Inconsistent use list");
assert(MO->getReg() == Last->getReg() && "Different regs on the same list!");
Head->Contents.Reg.Prev = MO;
MO->Contents.Reg.Prev = Last;
// Def operands always precede uses. This allows def_iterator to stop early.
// Insert def operands at the front, and use operands at the back.
if (MO->isDef()) {
// Insert def at the front.
MO->Contents.Reg.Next = Head;
HeadRef = MO;
} else {
// Insert use at the end.
MO->Contents.Reg.Next = nullptr;
Last->Contents.Reg.Next = MO;
}
}
/// Remove MO from its use-def list.
void MachineRegisterInfo::removeRegOperandFromUseList(MachineOperand *MO) {
assert(MO->isOnRegUseList() && "Operand not on use list");
MachineOperand *&HeadRef = getRegUseDefListHead(MO->getReg());
MachineOperand *const Head = HeadRef;
assert(Head && "List already empty");
// Unlink this from the doubly linked list of operands.
MachineOperand *Next = MO->Contents.Reg.Next;
MachineOperand *Prev = MO->Contents.Reg.Prev;
// Prev links are circular, next link is NULL instead of looping back to Head.
if (MO == Head)
HeadRef = Next;
else
Prev->Contents.Reg.Next = Next;
(Next ? Next : Head)->Contents.Reg.Prev = Prev;
MO->Contents.Reg.Prev = nullptr;
MO->Contents.Reg.Next = nullptr;
}
/// Move NumOps operands from Src to Dst, updating use-def lists as needed.
///
/// The Dst range is assumed to be uninitialized memory. (Or it may contain
/// operands that won't be destroyed, which is OK because the MO destructor is
/// trivial anyway).
///
/// The Src and Dst ranges may overlap.
void MachineRegisterInfo::moveOperands(MachineOperand *Dst,
MachineOperand *Src,
unsigned NumOps) {
assert(Src != Dst && NumOps && "Noop moveOperands");
// Copy backwards if Dst is within the Src range.
int Stride = 1;
if (Dst >= Src && Dst < Src + NumOps) {
Stride = -1;
Dst += NumOps - 1;
Src += NumOps - 1;
}
// Copy one operand at a time.
do {
new (Dst) MachineOperand(*Src);
// Dst takes Src's place in the use-def chain.
if (Src->isReg()) {
MachineOperand *&Head = getRegUseDefListHead(Src->getReg());
MachineOperand *Prev = Src->Contents.Reg.Prev;
MachineOperand *Next = Src->Contents.Reg.Next;
assert(Head && "List empty, but operand is chained");
assert(Prev && "Operand was not on use-def list");
// Prev links are circular, next link is NULL instead of looping back to
// Head.
if (Src == Head)
Head = Dst;
else
Prev->Contents.Reg.Next = Dst;
// Update Prev pointer. This also works when Src was pointing to itself
// in a 1-element list. In that case Head == Dst.
(Next ? Next : Head)->Contents.Reg.Prev = Dst;
}
Dst += Stride;
Src += Stride;
} while (--NumOps);
}
/// replaceRegWith - Replace all instances of FromReg with ToReg in the
/// machine function. This is like llvm-level X->replaceAllUsesWith(Y),
/// except that it also changes any definitions of the register as well.
/// If ToReg is a physical register we apply the sub register to obtain the
/// final/proper physical register.
void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
assert(FromReg != ToReg && "Cannot replace a reg with itself");
const TargetRegisterInfo *TRI = getTargetRegisterInfo();
// TODO: This could be more efficient by bulk changing the operands.
for (reg_iterator I = reg_begin(FromReg), E = reg_end(); I != E; ) {
MachineOperand &O = *I;
++I;
if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
O.substPhysReg(ToReg, *TRI);
} else {
O.setReg(ToReg);
}
}
}
/// getVRegDef - Return the machine instr that defines the specified virtual
/// register or null if none is found. This assumes that the code is in SSA
/// form, so there should only be one definition.
MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const {
// Since we are in SSA form, we can use the first definition.
def_instr_iterator I = def_instr_begin(Reg);
assert((I.atEnd() || std::next(I) == def_instr_end()) &&
"getVRegDef assumes a single definition or no definition");
return !I.atEnd() ? &*I : nullptr;
}
/// getUniqueVRegDef - Return the unique machine instr that defines the
/// specified virtual register or null if none is found. If there are
/// multiple definitions or no definition, return null.
MachineInstr *MachineRegisterInfo::getUniqueVRegDef(unsigned Reg) const {
if (def_empty(Reg)) return nullptr;
def_instr_iterator I = def_instr_begin(Reg);
if (std::next(I) != def_instr_end())
return nullptr;
return &*I;
}
bool MachineRegisterInfo::hasOneNonDBGUse(unsigned RegNo) const {
use_nodbg_iterator UI = use_nodbg_begin(RegNo);
if (UI == use_nodbg_end())
return false;
return ++UI == use_nodbg_end();
}
/// clearKillFlags - Iterate over all the uses of the given register and
/// clear the kill flag from the MachineOperand. This function is used by
/// optimization passes which extend register lifetimes and need only
/// preserve conservative kill flag information.
void MachineRegisterInfo::clearKillFlags(unsigned Reg) const {
for (MachineOperand &MO : use_operands(Reg))
MO.setIsKill(false);
}
bool MachineRegisterInfo::isLiveIn(unsigned Reg) const {
for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
if (I->first == Reg || I->second == Reg)
return true;
return false;
}
/// getLiveInPhysReg - If VReg is a live-in virtual register, return the
/// corresponding live-in physical register.
unsigned MachineRegisterInfo::getLiveInPhysReg(unsigned VReg) const {
for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
if (I->second == VReg)
return I->first;
return 0;
}
/// getLiveInVirtReg - If PReg is a live-in physical register, return the
/// corresponding live-in physical register.
unsigned MachineRegisterInfo::getLiveInVirtReg(unsigned PReg) const {
for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
if (I->first == PReg)
return I->second;
return 0;
}
/// EmitLiveInCopies - Emit copies to initialize livein virtual registers
/// into the given entry block.
void
MachineRegisterInfo::EmitLiveInCopies(MachineBasicBlock *EntryMBB,
const TargetRegisterInfo &TRI,
const TargetInstrInfo &TII) {
// Emit the copies into the top of the block.
for (unsigned i = 0, e = LiveIns.size(); i != e; ++i)
if (LiveIns[i].second) {
if (use_empty(LiveIns[i].second)) {
// The livein has no uses. Drop it.
//
// It would be preferable to have isel avoid creating live-in
// records for unused arguments in the first place, but it's
// complicated by the debug info code for arguments.
LiveIns.erase(LiveIns.begin() + i);
--i; --e;
} else {
// Emit a copy.
BuildMI(*EntryMBB, EntryMBB->begin(), DebugLoc(),
TII.get(TargetOpcode::COPY), LiveIns[i].second)
.addReg(LiveIns[i].first);
// Add the register to the entry block live-in set.
EntryMBB->addLiveIn(LiveIns[i].first);
}
} else {
// Add the register to the entry block live-in set.
EntryMBB->addLiveIn(LiveIns[i].first);
}
}
unsigned MachineRegisterInfo::getMaxLaneMaskForVReg(unsigned Reg) const
{
// Lane masks are only defined for vregs.
assert(TargetRegisterInfo::isVirtualRegister(Reg));
const TargetRegisterClass &TRC = *getRegClass(Reg);
return TRC.getLaneMask();
}
#ifndef NDEBUG
void MachineRegisterInfo::dumpUses(unsigned Reg) const {
for (MachineInstr &I : use_instructions(Reg))
I.dump();
}
#endif
void MachineRegisterInfo::freezeReservedRegs(const MachineFunction &MF) {
ReservedRegs = getTargetRegisterInfo()->getReservedRegs(MF);
assert(ReservedRegs.size() == getTargetRegisterInfo()->getNumRegs() &&
"Invalid ReservedRegs vector from target");
}
bool MachineRegisterInfo::isConstantPhysReg(unsigned PhysReg,
const MachineFunction &MF) const {
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
// Check if any overlapping register is modified, or allocatable so it may be
// used later.
for (MCRegAliasIterator AI(PhysReg, getTargetRegisterInfo(), true);
AI.isValid(); ++AI)
if (!def_empty(*AI) || isAllocatable(*AI))
return false;
return true;
}
/// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
/// specified register as undefined which causes the DBG_VALUE to be
/// deleted during LiveDebugVariables analysis.
void MachineRegisterInfo::markUsesInDebugValueAsUndef(unsigned Reg) const {
// Mark any DBG_VALUE that uses Reg as undef (but don't delete it.)
MachineRegisterInfo::use_instr_iterator nextI;
for (use_instr_iterator I = use_instr_begin(Reg), E = use_instr_end();
I != E; I = nextI) {
nextI = std::next(I); // I is invalidated by the setReg
MachineInstr *UseMI = &*I;
if (UseMI->isDebugValue())
UseMI->getOperand(0).setReg(0U);
}
}
static const Function *getCalledFunction(const MachineInstr &MI) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isGlobal())
continue;
const Function *Func = dyn_cast<Function>(MO.getGlobal());
if (Func != nullptr)
return Func;
}
return nullptr;
}
static bool isNoReturnDef(const MachineOperand &MO) {
// Anything which is not a noreturn function is a real def.
const MachineInstr &MI = *MO.getParent();
if (!MI.isCall())
return false;
const MachineBasicBlock &MBB = *MI.getParent();
if (!MBB.succ_empty())
return false;
const MachineFunction &MF = *MBB.getParent();
// We need to keep correct unwind information even if the function will
// not return, since the runtime may need it.
if (MF.getFunction()->hasFnAttribute(Attribute::UWTable))
return false;
const Function *Called = getCalledFunction(MI);
if (Called == nullptr || !Called->hasFnAttribute(Attribute::NoReturn)
|| !Called->hasFnAttribute(Attribute::NoUnwind))
return false;
return true;
}
bool MachineRegisterInfo::isPhysRegModified(unsigned PhysReg) const {
if (UsedPhysRegMask.test(PhysReg))
return true;
const TargetRegisterInfo *TRI = getTargetRegisterInfo();
for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) {
for (const MachineOperand &MO : make_range(def_begin(*AI), def_end())) {
if (isNoReturnDef(MO))
continue;
return true;
}
}
return false;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TargetInstrInfo.cpp | //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cctype>
using namespace llvm;
static cl::opt<bool> DisableHazardRecognizer(
"disable-sched-hazard", cl::Hidden, cl::init(false),
cl::desc("Disable hazard detection during preRA scheduling"));
TargetInstrInfo::~TargetInstrInfo() {
}
const TargetRegisterClass*
TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF) const {
if (OpNum >= MCID.getNumOperands())
return nullptr;
short RegClass = MCID.OpInfo[OpNum].RegClass;
if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
return TRI->getPointerRegClass(MF, RegClass);
// Instructions like INSERT_SUBREG do not have fixed register classes.
if (RegClass < 0)
return nullptr;
// Otherwise just look it up normally.
return TRI->getRegClass(RegClass);
}
/// insertNoop - Insert a noop into the instruction stream at the specified
/// point.
void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
llvm_unreachable("Target didn't implement insertNoop!");
}
/// Measure the specified inline asm to determine an approximation of its
/// length.
/// Comments (which run till the next SeparatorString or newline) do not
/// count as an instruction.
/// Any other non-whitespace text is considered an instruction, with
/// multiple instructions separated by SeparatorString or newlines.
/// Variable-length instructions are not handled here; this function
/// may be overloaded in the target code to do that.
unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const {
// Count the number of instructions in the asm.
bool atInsnStart = true;
unsigned Length = 0;
for (; *Str; ++Str) {
if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
strlen(MAI.getSeparatorString())) == 0)
atInsnStart = true;
if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
Length += MAI.getMaxInstLength();
atInsnStart = false;
}
if (atInsnStart && strncmp(Str, MAI.getCommentString(),
strlen(MAI.getCommentString())) == 0)
atInsnStart = false;
}
return Length;
}
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
/// after it, replacing it with an unconditional branch to NewDest.
void
TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MachineBasicBlock *NewDest) const {
MachineBasicBlock *MBB = Tail->getParent();
// Remove all the old successors of MBB from the CFG.
while (!MBB->succ_empty())
MBB->removeSuccessor(MBB->succ_begin());
// Remove all the dead instructions from the end of MBB.
MBB->erase(Tail, MBB->end());
// If MBB isn't immediately before MBB, insert a branch to it.
if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
Tail->getDebugLoc());
MBB->addSuccessor(NewDest);
}
// commuteInstruction - The default implementation of this method just exchanges
// the two operands returned by findCommutedOpIndices.
MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
bool NewMI) const {
const MCInstrDesc &MCID = MI->getDesc();
bool HasDef = MCID.getNumDefs();
if (HasDef && !MI->getOperand(0).isReg())
// No idea how to commute this instruction. Target should implement its own.
return nullptr;
unsigned Idx1, Idx2;
if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
assert(MI->isCommutable() && "Precondition violation: MI must be commutable.");
return nullptr;
}
assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
"This only knows how to commute register operands so far");
unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
unsigned Reg1 = MI->getOperand(Idx1).getReg();
unsigned Reg2 = MI->getOperand(Idx2).getReg();
unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
bool Reg1IsKill = MI->getOperand(Idx1).isKill();
bool Reg2IsKill = MI->getOperand(Idx2).isKill();
bool Reg1IsUndef = MI->getOperand(Idx1).isUndef();
bool Reg2IsUndef = MI->getOperand(Idx2).isUndef();
bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead();
bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead();
// If destination is tied to either of the commuted source register, then
// it must be updated.
if (HasDef && Reg0 == Reg1 &&
MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
Reg2IsKill = false;
Reg0 = Reg2;
SubReg0 = SubReg2;
} else if (HasDef && Reg0 == Reg2 &&
MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
Reg1IsKill = false;
Reg0 = Reg1;
SubReg0 = SubReg1;
}
if (NewMI) {
// Create a new instruction.
MachineFunction &MF = *MI->getParent()->getParent();
MI = MF.CloneMachineInstr(MI);
}
if (HasDef) {
MI->getOperand(0).setReg(Reg0);
MI->getOperand(0).setSubReg(SubReg0);
}
MI->getOperand(Idx2).setReg(Reg1);
MI->getOperand(Idx1).setReg(Reg2);
MI->getOperand(Idx2).setSubReg(SubReg1);
MI->getOperand(Idx1).setSubReg(SubReg2);
MI->getOperand(Idx2).setIsKill(Reg1IsKill);
MI->getOperand(Idx1).setIsKill(Reg2IsKill);
MI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
MI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
return MI;
}
/// findCommutedOpIndices - If specified MI is commutable, return the two
/// operand indices that would swap value. Return true if the instruction
/// is not in a form which this routine understands.
bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
assert(!MI->isBundle() &&
"TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isCommutable())
return false;
// This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
// is not true, then the target must implement this.
SrcOpIdx1 = MCID.getNumDefs();
SrcOpIdx2 = SrcOpIdx1 + 1;
if (!MI->getOperand(SrcOpIdx1).isReg() ||
!MI->getOperand(SrcOpIdx2).isReg())
// No idea.
return false;
return true;
}
bool
TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
if (!MI->isTerminator()) return false;
// Conditional branch is a special case.
if (MI->isBranch() && !MI->isBarrier())
return true;
if (!MI->isPredicable())
return true;
return !isPredicated(MI);
}
bool TargetInstrInfo::PredicateInstruction(
MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
bool MadeChange = false;
assert(!MI->isBundle() &&
"TargetInstrInfo::PredicateInstruction() can't handle bundles");
const MCInstrDesc &MCID = MI->getDesc();
if (!MI->isPredicable())
return false;
for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (MCID.OpInfo[i].isPredicate()) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg()) {
MO.setReg(Pred[j].getReg());
MadeChange = true;
} else if (MO.isImm()) {
MO.setImm(Pred[j].getImm());
MadeChange = true;
} else if (MO.isMBB()) {
MO.setMBB(Pred[j].getMBB());
MadeChange = true;
}
++j;
}
}
return MadeChange;
}
bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const {
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
oe = MI->memoperands_end();
o != oe;
++o) {
if ((*o)->isLoad()) {
if (const FixedStackPseudoSourceValue *Value =
dyn_cast_or_null<FixedStackPseudoSourceValue>(
(*o)->getPseudoValue())) {
FrameIndex = Value->getFrameIndex();
MMO = *o;
return true;
}
}
}
return false;
}
bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const {
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
oe = MI->memoperands_end();
o != oe;
++o) {
if ((*o)->isStore()) {
if (const FixedStackPseudoSourceValue *Value =
dyn_cast_or_null<FixedStackPseudoSourceValue>(
(*o)->getPseudoValue())) {
FrameIndex = Value->getFrameIndex();
MMO = *o;
return true;
}
}
}
return false;
}
bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
unsigned SubIdx, unsigned &Size,
unsigned &Offset,
const MachineFunction &MF) const {
if (!SubIdx) {
Size = RC->getSize();
Offset = 0;
return true;
}
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
// Convert bit size to byte size to be consistent with
// MCRegisterClass::getSize().
if (BitSize % 8)
return false;
int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
if (BitOffset < 0 || BitOffset % 8)
return false;
Size = BitSize /= 8;
Offset = (unsigned)BitOffset / 8;
assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
if (!MF.getTarget().getDataLayout()->isLittleEndian()) {
Offset = RC->getSize() - (Offset + Size);
}
return true;
}
void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
unsigned SubIdx,
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
}
bool
TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1,
const MachineRegisterInfo *MRI) const {
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
}
MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
MachineFunction &MF) const {
assert(!Orig->isNotDuplicable() &&
"Instruction cannot be duplicated");
return MF.CloneMachineInstr(Orig);
}
// If the COPY instruction in MI can be folded to a stack operation, return
// the register class to use.
static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
unsigned FoldIdx) {
assert(MI->isCopy() && "MI must be a COPY instruction");
if (MI->getNumOperands() != 2)
return nullptr;
assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
if (FoldOp.getSubReg() || LiveOp.getSubReg())
return nullptr;
unsigned FoldReg = FoldOp.getReg();
unsigned LiveReg = LiveOp.getReg();
assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
"Cannot fold physregs");
const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
return RC->contains(LiveOp.getReg()) ? RC : nullptr;
if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
return RC;
// FIXME: Allow folding when register classes are memory compatible.
return nullptr;
}
void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
llvm_unreachable("Not a MachO target");
}
bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
ArrayRef<unsigned> Ops) const {
return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
}
static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, int FrameIndex,
const TargetInstrInfo &TII) {
unsigned StartIdx = 0;
switch (MI->getOpcode()) {
case TargetOpcode::STACKMAP:
StartIdx = 2; // Skip ID, nShadowBytes.
break;
case TargetOpcode::PATCHPOINT: {
// For PatchPoint, the call args are not foldable.
PatchPointOpers opers(MI);
StartIdx = opers.getVarIdx();
break;
}
default:
llvm_unreachable("unexpected stackmap opcode");
}
// Return false if any operands requested for folding are not foldable (not
// part of the stackmap's live values).
for (unsigned Op : Ops) {
if (Op < StartIdx)
return nullptr;
}
MachineInstr *NewMI =
MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
// No need to fold return, the meta data, and function arguments
for (unsigned i = 0; i < StartIdx; ++i)
MIB.addOperand(MI->getOperand(i));
for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
MachineOperand &MO = MI->getOperand(i);
if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
unsigned SpillSize;
unsigned SpillOffset;
// Compute the spill slot size and offset.
const TargetRegisterClass *RC =
MF.getRegInfo().getRegClass(MO.getReg());
bool Valid =
TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
if (!Valid)
report_fatal_error("cannot spill patchpoint subregister operand");
MIB.addImm(StackMaps::IndirectMemRefOp);
MIB.addImm(SpillSize);
MIB.addFrameIndex(FrameIndex);
MIB.addImm(SpillOffset);
}
else
MIB.addOperand(MO);
}
return NewMI;
}
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
ArrayRef<unsigned> Ops,
int FI) const {
unsigned Flags = 0;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (MI->getOperand(Ops[i]).isDef())
Flags |= MachineMemOperand::MOStore;
else
Flags |= MachineMemOperand::MOLoad;
MachineBasicBlock *MBB = MI->getParent();
assert(MBB && "foldMemoryOperand needs an inserted instruction");
MachineFunction &MF = *MBB->getParent();
MachineInstr *NewMI = nullptr;
if (MI->getOpcode() == TargetOpcode::STACKMAP ||
MI->getOpcode() == TargetOpcode::PATCHPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
MBB->insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
}
if (NewMI) {
NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
NewMI->mayStore()) &&
"Folded a def to a non-store!");
assert((!(Flags & MachineMemOperand::MOLoad) ||
NewMI->mayLoad()) &&
"Folded a use to a non-load!");
const MachineFrameInfo &MFI = *MF.getFrameInfo();
assert(MFI.getObjectOffset(FI) != -1);
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
Flags, MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
return NewMI;
}
// Straight COPY may fold as load/store.
if (!MI->isCopy() || Ops.size() != 1)
return nullptr;
const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
if (!RC)
return nullptr;
const MachineOperand &MO = MI->getOperand(1-Ops[0]);
MachineBasicBlock::iterator Pos = MI;
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (Flags == MachineMemOperand::MOStore)
storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
else
loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
return --Pos;
}
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI) const {
assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
#ifndef NDEBUG
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
#endif
MachineBasicBlock &MBB = *MI->getParent();
MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
MachineInstr *NewMI = nullptr;
int FrameIndex = 0;
if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
if (NewMI)
NewMI = MBB.insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
}
if (!NewMI) return nullptr;
// Copy the memoperands from the load to the folded instruction.
if (MI->memoperands_empty()) {
NewMI->setMemRefs(LoadMI->memoperands_begin(),
LoadMI->memoperands_end());
}
else {
// Handle the rare case of folding multiple loads.
NewMI->setMemRefs(MI->memoperands_begin(),
MI->memoperands_end());
for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
E = LoadMI->memoperands_end(); I != E; ++I) {
NewMI->addMemOperand(MF, *I);
}
}
return NewMI;
}
bool TargetInstrInfo::
isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
AliasAnalysis *AA) const {
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Remat clients assume operand 0 is the defined register.
if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
return false;
unsigned DefReg = MI->getOperand(0).getReg();
// A sub-register definition can only be rematerialized if the instruction
// doesn't read the other parts of the register. Otherwise it is really a
// read-modify-write operation on the full virtual register which cannot be
// moved safely.
if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
return false;
// A load from a fixed stack slot can be rematerialized. This may be
// redundant with subsequent checks, but it's target-independent,
// simple, and a common case.
int FrameIdx = 0;
if (isLoadFromStackSlot(MI, FrameIdx) &&
MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
return true;
// Avoid instructions obviously unsafe for remat.
if (MI->isNotDuplicable() || MI->mayStore() ||
MI->hasUnmodeledSideEffects())
return false;
// Don't remat inline asm. We have no idea how expensive it is
// even if it's side effect free.
if (MI->isInlineAsm())
return false;
// Avoid instructions which load from potentially varying memory.
if (MI->mayLoad() && !MI->isInvariantLoad(AA))
return false;
// If any of the registers accessed are non-constant, conservatively assume
// the instruction is not rematerializable.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0)
continue;
// Check for a well-behaved physical register.
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
if (!MRI.isConstantPhysReg(Reg, MF))
return false;
} else {
// A physreg def. We can't remat it.
return false;
}
continue;
}
// Only allow one virtual-register def. There may be multiple defs of the
// same virtual register, though.
if (MO.isDef() && Reg != DefReg)
return false;
// Don't allow any virtual-register uses. Rematting an instruction with
// virtual register uses would length the live ranges of the uses, which
// is not necessarily a good idea, certainly not "trivial".
if (MO.isUse())
return false;
}
// Everything checked out.
return true;
}
int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
const MachineFunction *MF = MI->getParent()->getParent();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
if (MI->getOpcode() != FrameSetupOpcode &&
MI->getOpcode() != FrameDestroyOpcode)
return 0;
int SPAdj = MI->getOperand(0).getImm();
if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
(StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
SPAdj = -SPAdj;
return SPAdj;
}
/// isSchedulingBoundary - Test if the given instruction should be
/// considered a scheduling boundary. This primarily includes labels
/// and terminators.
bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const {
// Terminators and labels can't be scheduled around.
if (MI->isTerminator() || MI->isPosition())
return true;
// Don't attempt to schedule around any instruction that defines
// a stack-oriented pointer, as it's unlikely to be profitable. This
// saves compile time, because it doesn't require every single
// stack slot reference to depend on the instruction that does the
// modification.
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
return true;
return false;
}
// Provide a global flag for disabling the PreRA hazard recognizer that targets
// may choose to honor.
bool TargetInstrInfo::usePreRAHazardRecognizer() const {
return !DisableHazardRecognizer;
}
// Default implementation of CreateTargetRAHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
const ScheduleDAG *DAG) const {
// Dummy hazard recognizer allows all instructions to issue.
return new ScheduleHazardRecognizer();
}
// Default implementation of CreateTargetMIHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const {
return (ScheduleHazardRecognizer *)
new ScoreboardHazardRecognizer(II, DAG, "misched");
}
// Default implementation of CreateTargetPostRAHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const {
return (ScheduleHazardRecognizer *)
new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
}
//===----------------------------------------------------------------------===//
// SelectionDAG latency interface.
//===----------------------------------------------------------------------===//
int
TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
SDNode *UseNode, unsigned UseIdx) const {
if (!ItinData || ItinData->isEmpty())
return -1;
if (!DefNode->isMachineOpcode())
return -1;
unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
if (!UseNode->isMachineOpcode())
return ItinData->getOperandCycle(DefClass, DefIdx);
unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
}
int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
SDNode *N) const {
if (!ItinData || ItinData->isEmpty())
return 1;
if (!N->isMachineOpcode())
return 1;
return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
}
//===----------------------------------------------------------------------===//
// MachineInstr latency interface.
//===----------------------------------------------------------------------===//
unsigned
TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr *MI) const {
if (!ItinData || ItinData->isEmpty())
return 1;
unsigned Class = MI->getDesc().getSchedClass();
int UOps = ItinData->Itineraries[Class].NumMicroOps;
if (UOps >= 0)
return UOps;
// The # of u-ops is dynamically determined. The specific target should
// override this function to return the right number.
return 1;
}
/// Return the default expected latency for a def based on it's opcode.
unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
const MachineInstr *DefMI) const {
if (DefMI->isTransient())
return 0;
if (DefMI->mayLoad())
return SchedModel.LoadLatency;
if (isHighLatencyDef(DefMI->getOpcode()))
return SchedModel.HighLatency;
return 1;
}
unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
return 0;
}
unsigned TargetInstrInfo::
getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
// Default to one cycle for no itinerary. However, an "empty" itinerary may
// still have a MinLatency property, which getStageLatency checks.
if (!ItinData)
return MI->mayLoad() ? 2 : 1;
return ItinData->getStageLatency(MI->getDesc().getSchedClass());
}
bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
unsigned DefIdx) const {
const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
if (!ItinData || ItinData->isEmpty())
return false;
unsigned DefClass = DefMI->getDesc().getSchedClass();
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
return (DefCycle != -1 && DefCycle <= 1);
}
/// Both DefMI and UseMI must be valid. By default, call directly to the
/// itinerary. This may be overriden by the target.
int TargetInstrInfo::
getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
unsigned DefClass = DefMI->getDesc().getSchedClass();
unsigned UseClass = UseMI->getDesc().getSchedClass();
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
}
/// If we can determine the operand latency from the def only, without itinerary
/// lookup, do so. Otherwise return -1.
int TargetInstrInfo::computeDefOperandLatency(
const InstrItineraryData *ItinData,
const MachineInstr *DefMI) const {
// Let the target hook getInstrLatency handle missing itineraries.
if (!ItinData)
return getInstrLatency(ItinData, DefMI);
if(ItinData->isEmpty())
return defaultDefLatency(ItinData->SchedModel, DefMI);
// ...operand lookup required
return -1;
}
/// computeOperandLatency - Compute and return the latency of the given data
/// dependent def and use when the operand indices are already known. UseMI may
/// be NULL for an unknown use.
///
/// FindMin may be set to get the minimum vs. expected latency. Minimum
/// latency is used for scheduling groups, while expected latency is for
/// instruction cost and critical path.
///
/// Depending on the subtarget's itinerary properties, this may or may not need
/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
/// UseIdx to compute min latency.
unsigned TargetInstrInfo::
computeOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
int DefLatency = computeDefOperandLatency(ItinData, DefMI);
if (DefLatency >= 0)
return DefLatency;
assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
int OperLatency = 0;
if (UseMI)
OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
else {
unsigned DefClass = DefMI->getDesc().getSchedClass();
OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
}
if (OperLatency >= 0)
return OperLatency;
// No operand latency was found.
unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
// Expected latency is the max of the stage latency and itinerary props.
InstrLatency = std::max(InstrLatency,
defaultDefLatency(ItinData->SchedModel, DefMI));
return InstrLatency;
}
bool TargetInstrInfo::getRegSequenceInputs(
const MachineInstr &MI, unsigned DefIdx,
SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
assert((MI.isRegSequence() ||
MI.isRegSequenceLike()) && "Instruction do not have the proper type");
if (!MI.isRegSequence())
return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
// We are looking at:
// Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
OpIdx += 2) {
const MachineOperand &MOReg = MI.getOperand(OpIdx);
const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
assert(MOSubIdx.isImm() &&
"One of the subindex of the reg_sequence is not an immediate");
// Record Reg:SubReg, SubIdx.
InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
(unsigned)MOSubIdx.getImm()));
}
return true;
}
bool TargetInstrInfo::getExtractSubregInputs(
const MachineInstr &MI, unsigned DefIdx,
RegSubRegPairAndIdx &InputReg) const {
assert((MI.isExtractSubreg() ||
MI.isExtractSubregLike()) && "Instruction do not have the proper type");
if (!MI.isExtractSubreg())
return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
// We are looking at:
// Def = EXTRACT_SUBREG v0.sub1, sub0.
assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
const MachineOperand &MOReg = MI.getOperand(1);
const MachineOperand &MOSubIdx = MI.getOperand(2);
assert(MOSubIdx.isImm() &&
"The subindex of the extract_subreg is not an immediate");
InputReg.Reg = MOReg.getReg();
InputReg.SubReg = MOReg.getSubReg();
InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
return true;
}
bool TargetInstrInfo::getInsertSubregInputs(
const MachineInstr &MI, unsigned DefIdx,
RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
assert((MI.isInsertSubreg() ||
MI.isInsertSubregLike()) && "Instruction do not have the proper type");
if (!MI.isInsertSubreg())
return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
// We are looking at:
// Def = INSERT_SEQUENCE v0, v1, sub0.
assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
const MachineOperand &MOBaseReg = MI.getOperand(1);
const MachineOperand &MOInsertedReg = MI.getOperand(2);
const MachineOperand &MOSubIdx = MI.getOperand(3);
assert(MOSubIdx.isImm() &&
"One of the subindex of the reg_sequence is not an immediate");
BaseReg.Reg = MOBaseReg.getReg();
BaseReg.SubReg = MOBaseReg.getSubReg();
InsertedReg.Reg = MOInsertedReg.getReg();
InsertedReg.SubReg = MOInsertedReg.getSubReg();
InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
return true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TargetLoweringObjectFileImpl.cpp | //===-- llvm/CodeGen/TargetLoweringObjectFileImpl.cpp - Object File Info --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements classes used to handle lowerings specific to common
// object file formats.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
using namespace dwarf;
//===----------------------------------------------------------------------===//
// ELF
//===----------------------------------------------------------------------===//
MCSymbol *TargetLoweringObjectFileELF::getCFIPersonalitySymbol(
const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM,
MachineModuleInfo *MMI) const {
unsigned Encoding = getPersonalityEncoding();
if ((Encoding & 0x80) == dwarf::DW_EH_PE_indirect)
return getContext().getOrCreateSymbol(StringRef("DW.ref.") +
TM.getSymbol(GV, Mang)->getName());
if ((Encoding & 0x70) == dwarf::DW_EH_PE_absptr)
return TM.getSymbol(GV, Mang);
report_fatal_error("We do not support this DWARF encoding yet!");
}
void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
const TargetMachine &TM,
const MCSymbol *Sym) const {
SmallString<64> NameData("DW.ref.");
NameData += Sym->getName();
MCSymbolELF *Label =
cast<MCSymbolELF>(getContext().getOrCreateSymbol(NameData));
Streamer.EmitSymbolAttribute(Label, MCSA_Hidden);
Streamer.EmitSymbolAttribute(Label, MCSA_Weak);
StringRef Prefix = ".data.";
NameData.insert(NameData.begin(), Prefix.begin(), Prefix.end());
unsigned Flags = ELF::SHF_ALLOC | ELF::SHF_WRITE | ELF::SHF_GROUP;
MCSection *Sec = getContext().getELFSection(NameData, ELF::SHT_PROGBITS,
Flags, 0, Label->getName());
unsigned Size = TM.getDataLayout()->getPointerSize();
Streamer.SwitchSection(Sec);
Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment());
Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
const MCExpr *E = MCConstantExpr::create(Size, getContext());
Streamer.emitELFSize(Label, E);
Streamer.EmitLabel(Label);
Streamer.EmitSymbolValue(Sym, Size);
}
const MCExpr *TargetLoweringObjectFileELF::getTTypeGlobalReference(
const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
const TargetMachine &TM, MachineModuleInfo *MMI,
MCStreamer &Streamer) const {
if (Encoding & dwarf::DW_EH_PE_indirect) {
MachineModuleInfoELF &ELFMMI = MMI->getObjFileInfo<MachineModuleInfoELF>();
MCSymbol *SSym = getSymbolWithGlobalValueBase(GV, ".DW.stub", Mang, TM);
// Add information about the stub reference to ELFMMI so that the stub
// gets emitted by the asmprinter.
MachineModuleInfoImpl::StubValueTy &StubSym = ELFMMI.getGVStubEntry(SSym);
if (!StubSym.getPointer()) {
MCSymbol *Sym = TM.getSymbol(GV, Mang);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
return TargetLoweringObjectFile::
getTTypeReference(MCSymbolRefExpr::create(SSym, getContext()),
Encoding & ~dwarf::DW_EH_PE_indirect, Streamer);
}
return TargetLoweringObjectFile::
getTTypeGlobalReference(GV, Encoding, Mang, TM, MMI, Streamer);
}
static SectionKind
getELFKindForNamedSection(StringRef Name, SectionKind K) {
// N.B.: The defaults used in here are no the same ones used in MC.
// We follow gcc, MC follows gas. For example, given ".section .eh_frame",
// both gas and MC will produce a section with no flags. Given
// section(".eh_frame") gcc will produce:
//
// .section .eh_frame,"a",@progbits
if (Name.empty() || Name[0] != '.') return K;
// Some lame default implementation based on some magic section names.
if (Name == ".bss" ||
Name.startswith(".bss.") ||
Name.startswith(".gnu.linkonce.b.") ||
Name.startswith(".llvm.linkonce.b.") ||
Name == ".sbss" ||
Name.startswith(".sbss.") ||
Name.startswith(".gnu.linkonce.sb.") ||
Name.startswith(".llvm.linkonce.sb."))
return SectionKind::getBSS();
if (Name == ".tdata" ||
Name.startswith(".tdata.") ||
Name.startswith(".gnu.linkonce.td.") ||
Name.startswith(".llvm.linkonce.td."))
return SectionKind::getThreadData();
if (Name == ".tbss" ||
Name.startswith(".tbss.") ||
Name.startswith(".gnu.linkonce.tb.") ||
Name.startswith(".llvm.linkonce.tb."))
return SectionKind::getThreadBSS();
return K;
}
static unsigned getELFSectionType(StringRef Name, SectionKind K) {
if (Name == ".init_array")
return ELF::SHT_INIT_ARRAY;
if (Name == ".fini_array")
return ELF::SHT_FINI_ARRAY;
if (Name == ".preinit_array")
return ELF::SHT_PREINIT_ARRAY;
if (K.isBSS() || K.isThreadBSS())
return ELF::SHT_NOBITS;
return ELF::SHT_PROGBITS;
}
static unsigned getELFSectionFlags(SectionKind K) {
unsigned Flags = 0;
if (!K.isMetadata())
Flags |= ELF::SHF_ALLOC;
if (K.isText())
Flags |= ELF::SHF_EXECINSTR;
if (K.isWriteable())
Flags |= ELF::SHF_WRITE;
if (K.isThreadLocal())
Flags |= ELF::SHF_TLS;
if (K.isMergeableCString() || K.isMergeableConst())
Flags |= ELF::SHF_MERGE;
if (K.isMergeableCString())
Flags |= ELF::SHF_STRINGS;
return Flags;
}
static const Comdat *getELFComdat(const GlobalValue *GV) {
const Comdat *C = GV->getComdat();
if (!C)
return nullptr;
if (C->getSelectionKind() != Comdat::Any)
report_fatal_error("ELF COMDATs only support SelectionKind::Any, '" +
C->getName() + "' cannot be lowered.");
return C;
}
MCSection *TargetLoweringObjectFileELF::getExplicitSectionGlobal(
const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const {
StringRef SectionName = GV->getSection();
// Infer section flags from the section name if we can.
Kind = getELFKindForNamedSection(SectionName, Kind);
StringRef Group = "";
unsigned Flags = getELFSectionFlags(Kind);
if (const Comdat *C = getELFComdat(GV)) {
Group = C->getName();
Flags |= ELF::SHF_GROUP;
}
return getContext().getELFSection(SectionName,
getELFSectionType(SectionName, Kind), Flags,
/*EntrySize=*/0, Group);
}
/// Return the section prefix name used by options FunctionsSections and
/// DataSections.
static StringRef getSectionPrefixForGlobal(SectionKind Kind) {
if (Kind.isText())
return ".text";
if (Kind.isReadOnly())
return ".rodata";
if (Kind.isBSS())
return ".bss";
if (Kind.isThreadData())
return ".tdata";
if (Kind.isThreadBSS())
return ".tbss";
if (Kind.isDataNoRel())
return ".data";
if (Kind.isDataRelLocal())
return ".data.rel.local";
if (Kind.isDataRel())
return ".data.rel";
if (Kind.isReadOnlyWithRelLocal())
return ".data.rel.ro.local";
assert(Kind.isReadOnlyWithRel() && "Unknown section kind");
return ".data.rel.ro";
}
static MCSectionELF *
selectELFSectionForGlobal(MCContext &Ctx, const GlobalValue *GV,
SectionKind Kind, Mangler &Mang,
const TargetMachine &TM, bool EmitUniqueSection,
unsigned Flags, unsigned *NextUniqueID) {
unsigned EntrySize = 0;
if (Kind.isMergeableCString()) {
if (Kind.isMergeable2ByteCString()) {
EntrySize = 2;
} else if (Kind.isMergeable4ByteCString()) {
EntrySize = 4;
} else {
EntrySize = 1;
assert(Kind.isMergeable1ByteCString() && "unknown string width");
}
} else if (Kind.isMergeableConst()) {
if (Kind.isMergeableConst4()) {
EntrySize = 4;
} else if (Kind.isMergeableConst8()) {
EntrySize = 8;
} else {
assert(Kind.isMergeableConst16() && "unknown data width");
EntrySize = 16;
}
}
StringRef Group = "";
if (const Comdat *C = getELFComdat(GV)) {
Flags |= ELF::SHF_GROUP;
Group = C->getName();
}
bool UniqueSectionNames = TM.getUniqueSectionNames();
SmallString<128> Name;
if (Kind.isMergeableCString()) {
// We also need alignment here.
// FIXME: this is getting the alignment of the character, not the
// alignment of the global!
unsigned Align =
TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV));
std::string SizeSpec = ".rodata.str" + utostr(EntrySize) + ".";
Name = SizeSpec + utostr(Align);
} else if (Kind.isMergeableConst()) {
Name = ".rodata.cst";
Name += utostr(EntrySize);
} else {
Name = getSectionPrefixForGlobal(Kind);
}
if (EmitUniqueSection && UniqueSectionNames) {
Name.push_back('.');
TM.getNameWithPrefix(Name, GV, Mang, true);
}
unsigned UniqueID = ~0;
if (EmitUniqueSection && !UniqueSectionNames) {
UniqueID = *NextUniqueID;
(*NextUniqueID)++;
}
return Ctx.getELFSection(Name, getELFSectionType(Name, Kind), Flags,
EntrySize, Group, UniqueID);
}
MCSection *TargetLoweringObjectFileELF::SelectSectionForGlobal(
const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const {
unsigned Flags = getELFSectionFlags(Kind);
// If we have -ffunction-section or -fdata-section then we should emit the
// global value to a uniqued section specifically for it.
bool EmitUniqueSection = false;
if (!(Flags & ELF::SHF_MERGE) && !Kind.isCommon()) {
if (Kind.isText())
EmitUniqueSection = TM.getFunctionSections();
else
EmitUniqueSection = TM.getDataSections();
}
EmitUniqueSection |= GV->hasComdat();
return selectELFSectionForGlobal(getContext(), GV, Kind, Mang, TM,
EmitUniqueSection, Flags, &NextUniqueID);
}
MCSection *TargetLoweringObjectFileELF::getSectionForJumpTable(
const Function &F, Mangler &Mang, const TargetMachine &TM) const {
// If the function can be removed, produce a unique section so that
// the table doesn't prevent the removal.
const Comdat *C = F.getComdat();
bool EmitUniqueSection = TM.getFunctionSections() || C;
if (!EmitUniqueSection)
return ReadOnlySection;
return selectELFSectionForGlobal(getContext(), &F, SectionKind::getReadOnly(),
Mang, TM, EmitUniqueSection, ELF::SHF_ALLOC,
&NextUniqueID);
}
bool TargetLoweringObjectFileELF::shouldPutJumpTableInFunctionSection(
bool UsesLabelDifference, const Function &F) const {
// We can always create relative relocations, so use another section
// that can be marked non-executable.
return false;
}
/// Given a mergeable constant with the specified size and relocation
/// information, return a section that it should be placed in.
MCSection *
TargetLoweringObjectFileELF::getSectionForConstant(SectionKind Kind,
const Constant *C) const {
if (Kind.isMergeableConst4() && MergeableConst4Section)
return MergeableConst4Section;
if (Kind.isMergeableConst8() && MergeableConst8Section)
return MergeableConst8Section;
if (Kind.isMergeableConst16() && MergeableConst16Section)
return MergeableConst16Section;
if (Kind.isReadOnly())
return ReadOnlySection;
if (Kind.isReadOnlyWithRelLocal()) return DataRelROLocalSection;
assert(Kind.isReadOnlyWithRel() && "Unknown section kind");
return DataRelROSection;
}
static MCSectionELF *getStaticStructorSection(MCContext &Ctx, bool UseInitArray,
bool IsCtor, unsigned Priority,
const MCSymbol *KeySym) {
std::string Name;
unsigned Type;
unsigned Flags = ELF::SHF_ALLOC | ELF::SHF_WRITE;
StringRef COMDAT = KeySym ? KeySym->getName() : "";
if (KeySym)
Flags |= ELF::SHF_GROUP;
if (UseInitArray) {
if (IsCtor) {
Type = ELF::SHT_INIT_ARRAY;
Name = ".init_array";
} else {
Type = ELF::SHT_FINI_ARRAY;
Name = ".fini_array";
}
if (Priority != 65535) {
Name += '.';
Name += utostr(Priority);
}
} else {
// The default scheme is .ctor / .dtor, so we have to invert the priority
// numbering.
if (IsCtor)
Name = ".ctors";
else
Name = ".dtors";
if (Priority != 65535) {
Name += '.';
Name += utostr(65535 - Priority);
}
Type = ELF::SHT_PROGBITS;
}
return Ctx.getELFSection(Name, Type, Flags, 0, COMDAT);
}
MCSection *TargetLoweringObjectFileELF::getStaticCtorSection(
unsigned Priority, const MCSymbol *KeySym) const {
return getStaticStructorSection(getContext(), UseInitArray, true, Priority,
KeySym);
}
MCSection *TargetLoweringObjectFileELF::getStaticDtorSection(
unsigned Priority, const MCSymbol *KeySym) const {
return getStaticStructorSection(getContext(), UseInitArray, false, Priority,
KeySym);
}
void
TargetLoweringObjectFileELF::InitializeELF(bool UseInitArray_) {
UseInitArray = UseInitArray_;
if (!UseInitArray)
return;
StaticCtorSection = getContext().getELFSection(
".init_array", ELF::SHT_INIT_ARRAY, ELF::SHF_WRITE | ELF::SHF_ALLOC);
StaticDtorSection = getContext().getELFSection(
".fini_array", ELF::SHT_FINI_ARRAY, ELF::SHF_WRITE | ELF::SHF_ALLOC);
}
//===----------------------------------------------------------------------===//
// MachO
//===----------------------------------------------------------------------===//
TargetLoweringObjectFileMachO::TargetLoweringObjectFileMachO()
: TargetLoweringObjectFile() {
SupportIndirectSymViaGOTPCRel = true;
}
/// emitModuleFlags - Perform code emission for module flags.
void TargetLoweringObjectFileMachO::
emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
Mangler &Mang, const TargetMachine &TM) const {
unsigned VersionVal = 0;
unsigned ImageInfoFlags = 0;
MDNode *LinkerOptions = nullptr;
StringRef SectionVal;
for (ArrayRef<Module::ModuleFlagEntry>::iterator
i = ModuleFlags.begin(), e = ModuleFlags.end(); i != e; ++i) {
const Module::ModuleFlagEntry &MFE = *i;
// Ignore flags with 'Require' behavior.
if (MFE.Behavior == Module::Require)
continue;
StringRef Key = MFE.Key->getString();
Metadata *Val = MFE.Val;
if (Key == "Objective-C Image Info Version") {
VersionVal = mdconst::extract<ConstantInt>(Val)->getZExtValue();
} else if (Key == "Objective-C Garbage Collection" ||
Key == "Objective-C GC Only" ||
Key == "Objective-C Is Simulated" ||
Key == "Objective-C Image Swift Version") {
ImageInfoFlags |= mdconst::extract<ConstantInt>(Val)->getZExtValue();
} else if (Key == "Objective-C Image Info Section") {
SectionVal = cast<MDString>(Val)->getString();
} else if (Key == "Linker Options") {
LinkerOptions = cast<MDNode>(Val);
}
}
// Emit the linker options if present.
if (LinkerOptions) {
for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) {
MDNode *MDOptions = cast<MDNode>(LinkerOptions->getOperand(i));
SmallVector<std::string, 4> StrOptions;
// Convert to strings.
for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) {
MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii));
StrOptions.push_back(MDOption->getString());
}
Streamer.EmitLinkerOptions(StrOptions);
}
}
// The section is mandatory. If we don't have it, then we don't have GC info.
if (SectionVal.empty()) return;
StringRef Segment, Section;
unsigned TAA = 0, StubSize = 0;
bool TAAParsed;
std::string ErrorCode =
MCSectionMachO::ParseSectionSpecifier(SectionVal, Segment, Section,
TAA, TAAParsed, StubSize);
if (!ErrorCode.empty())
// If invalid, report the error with report_fatal_error.
report_fatal_error("Invalid section specifier '" + Section + "': " +
ErrorCode + ".");
// Get the section.
MCSectionMachO *S = getContext().getMachOSection(
Segment, Section, TAA, StubSize, SectionKind::getDataNoRel());
Streamer.SwitchSection(S);
Streamer.EmitLabel(getContext().
getOrCreateSymbol(StringRef("L_OBJC_IMAGE_INFO")));
Streamer.EmitIntValue(VersionVal, 4);
Streamer.EmitIntValue(ImageInfoFlags, 4);
Streamer.AddBlankLine();
}
static void checkMachOComdat(const GlobalValue *GV) {
const Comdat *C = GV->getComdat();
if (!C)
return;
report_fatal_error("MachO doesn't support COMDATs, '" + C->getName() +
"' cannot be lowered.");
}
MCSection *TargetLoweringObjectFileMachO::getExplicitSectionGlobal(
const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const {
// Parse the section specifier and create it if valid.
StringRef Segment, Section;
unsigned TAA = 0, StubSize = 0;
bool TAAParsed;
checkMachOComdat(GV);
std::string ErrorCode =
MCSectionMachO::ParseSectionSpecifier(GV->getSection(), Segment, Section,
TAA, TAAParsed, StubSize);
if (!ErrorCode.empty()) {
// If invalid, report the error with report_fatal_error.
report_fatal_error("Global variable '" + GV->getName() +
"' has an invalid section specifier '" +
GV->getSection() + "': " + ErrorCode + ".");
}
// Get the section.
MCSectionMachO *S =
getContext().getMachOSection(Segment, Section, TAA, StubSize, Kind);
// If TAA wasn't set by ParseSectionSpecifier() above,
// use the value returned by getMachOSection() as a default.
if (!TAAParsed)
TAA = S->getTypeAndAttributes();
// Okay, now that we got the section, verify that the TAA & StubSize agree.
// If the user declared multiple globals with different section flags, we need
// to reject it here.
if (S->getTypeAndAttributes() != TAA || S->getStubSize() != StubSize) {
// If invalid, report the error with report_fatal_error.
report_fatal_error("Global variable '" + GV->getName() +
"' section type or attributes does not match previous"
" section specifier");
}
return S;
}
MCSection *TargetLoweringObjectFileMachO::SelectSectionForGlobal(
const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const {
checkMachOComdat(GV);
// Handle thread local data.
if (Kind.isThreadBSS()) return TLSBSSSection;
if (Kind.isThreadData()) return TLSDataSection;
if (Kind.isText())
return GV->isWeakForLinker() ? TextCoalSection : TextSection;
// If this is weak/linkonce, put this in a coalescable section, either in text
// or data depending on if it is writable.
if (GV->isWeakForLinker()) {
if (Kind.isReadOnly())
return ConstTextCoalSection;
return DataCoalSection;
}
// FIXME: Alignment check should be handled by section classifier.
if (Kind.isMergeable1ByteCString() &&
TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
return CStringSection;
// Do not put 16-bit arrays in the UString section if they have an
// externally visible label, this runs into issues with certain linker
// versions.
if (Kind.isMergeable2ByteCString() && !GV->hasExternalLinkage() &&
TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
return UStringSection;
// With MachO only variables whose corresponding symbol starts with 'l' or
// 'L' can be merged, so we only try merging GVs with private linkage.
if (GV->hasPrivateLinkage() && Kind.isMergeableConst()) {
if (Kind.isMergeableConst4())
return FourByteConstantSection;
if (Kind.isMergeableConst8())
return EightByteConstantSection;
if (Kind.isMergeableConst16())
return SixteenByteConstantSection;
}
// Otherwise, if it is readonly, but not something we can specially optimize,
// just drop it in .const.
if (Kind.isReadOnly())
return ReadOnlySection;
// If this is marked const, put it into a const section. But if the dynamic
// linker needs to write to it, put it in the data segment.
if (Kind.isReadOnlyWithRel())
return ConstDataSection;
// Put zero initialized globals with strong external linkage in the
// DATA, __common section with the .zerofill directive.
if (Kind.isBSSExtern())
return DataCommonSection;
// Put zero initialized globals with local linkage in __DATA,__bss directive
// with the .zerofill directive (aka .lcomm).
if (Kind.isBSSLocal())
return DataBSSSection;
// Otherwise, just drop the variable in the normal data section.
return DataSection;
}
MCSection *
TargetLoweringObjectFileMachO::getSectionForConstant(SectionKind Kind,
const Constant *C) const {
// If this constant requires a relocation, we have to put it in the data
// segment, not in the text segment.
if (Kind.isDataRel() || Kind.isReadOnlyWithRel())
return ConstDataSection;
if (Kind.isMergeableConst4())
return FourByteConstantSection;
if (Kind.isMergeableConst8())
return EightByteConstantSection;
if (Kind.isMergeableConst16())
return SixteenByteConstantSection;
return ReadOnlySection; // .const
}
const MCExpr *TargetLoweringObjectFileMachO::getTTypeGlobalReference(
const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
const TargetMachine &TM, MachineModuleInfo *MMI,
MCStreamer &Streamer) const {
// The mach-o version of this method defaults to returning a stub reference.
if (Encoding & DW_EH_PE_indirect) {
MachineModuleInfoMachO &MachOMMI =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
MCSymbol *SSym =
getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr", Mang, TM);
// Add information about the stub reference to MachOMMI so that the stub
// gets emitted by the asmprinter.
MachineModuleInfoImpl::StubValueTy &StubSym =
GV->hasHiddenVisibility() ? MachOMMI.getHiddenGVStubEntry(SSym) :
MachOMMI.getGVStubEntry(SSym);
if (!StubSym.getPointer()) {
MCSymbol *Sym = TM.getSymbol(GV, Mang);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
return TargetLoweringObjectFile::
getTTypeReference(MCSymbolRefExpr::create(SSym, getContext()),
Encoding & ~dwarf::DW_EH_PE_indirect, Streamer);
}
return TargetLoweringObjectFile::getTTypeGlobalReference(GV, Encoding, Mang,
TM, MMI, Streamer);
}
MCSymbol *TargetLoweringObjectFileMachO::getCFIPersonalitySymbol(
const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM,
MachineModuleInfo *MMI) const {
// The mach-o version of this method defaults to returning a stub reference.
MachineModuleInfoMachO &MachOMMI =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
MCSymbol *SSym = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr", Mang, TM);
// Add information about the stub reference to MachOMMI so that the stub
// gets emitted by the asmprinter.
MachineModuleInfoImpl::StubValueTy &StubSym = MachOMMI.getGVStubEntry(SSym);
if (!StubSym.getPointer()) {
MCSymbol *Sym = TM.getSymbol(GV, Mang);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
return SSym;
}
const MCExpr *TargetLoweringObjectFileMachO::getIndirectSymViaGOTPCRel(
const MCSymbol *Sym, const MCValue &MV, int64_t Offset,
MachineModuleInfo *MMI, MCStreamer &Streamer) const {
// Although MachO 32-bit targets do not explictly have a GOTPCREL relocation
// as 64-bit do, we replace the GOT equivalent by accessing the final symbol
// through a non_lazy_ptr stub instead. One advantage is that it allows the
// computation of deltas to final external symbols. Example:
//
// _extgotequiv:
// .long _extfoo
//
// _delta:
// .long _extgotequiv-_delta
//
// is transformed to:
//
// _delta:
// .long L_extfoo$non_lazy_ptr-(_delta+0)
//
// .section __IMPORT,__pointers,non_lazy_symbol_pointers
// L_extfoo$non_lazy_ptr:
// .indirect_symbol _extfoo
// .long 0
//
MachineModuleInfoMachO &MachOMMI =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
MCContext &Ctx = getContext();
// The offset must consider the original displacement from the base symbol
// since 32-bit targets don't have a GOTPCREL to fold the PC displacement.
Offset = -MV.getConstant();
const MCSymbol *BaseSym = &MV.getSymB()->getSymbol();
// Access the final symbol via sym$non_lazy_ptr and generate the appropriated
// non_lazy_ptr stubs.
SmallString<128> Name;
StringRef Suffix = "$non_lazy_ptr";
Name += DL->getPrivateGlobalPrefix();
Name += Sym->getName();
Name += Suffix;
MCSymbol *Stub = Ctx.getOrCreateSymbol(Name);
MachineModuleInfoImpl::StubValueTy &StubSym = MachOMMI.getGVStubEntry(Stub);
if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
StubValueTy(const_cast<MCSymbol *>(Sym), true /* access indirectly */);
const MCExpr *BSymExpr =
MCSymbolRefExpr::create(BaseSym, MCSymbolRefExpr::VK_None, Ctx);
const MCExpr *LHS =
MCSymbolRefExpr::create(Stub, MCSymbolRefExpr::VK_None, Ctx);
if (!Offset)
return MCBinaryExpr::createSub(LHS, BSymExpr, Ctx);
const MCExpr *RHS =
MCBinaryExpr::createAdd(BSymExpr, MCConstantExpr::create(Offset, Ctx), Ctx);
return MCBinaryExpr::createSub(LHS, RHS, Ctx);
}
//===----------------------------------------------------------------------===//
// COFF
//===----------------------------------------------------------------------===//
static unsigned
getCOFFSectionFlags(SectionKind K) {
unsigned Flags = 0;
if (K.isMetadata())
Flags |=
COFF::IMAGE_SCN_MEM_DISCARDABLE;
else if (K.isText())
Flags |=
COFF::IMAGE_SCN_MEM_EXECUTE |
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_CNT_CODE;
else if (K.isBSS())
Flags |=
COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_MEM_WRITE;
else if (K.isThreadLocal())
Flags |=
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_MEM_WRITE;
else if (K.isReadOnly() || K.isReadOnlyWithRel())
Flags |=
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ;
else if (K.isWriteable())
Flags |=
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_MEM_WRITE;
return Flags;
}
static const GlobalValue *getComdatGVForCOFF(const GlobalValue *GV) {
const Comdat *C = GV->getComdat();
assert(C && "expected GV to have a Comdat!");
StringRef ComdatGVName = C->getName();
const GlobalValue *ComdatGV = GV->getParent()->getNamedValue(ComdatGVName);
if (!ComdatGV)
report_fatal_error("Associative COMDAT symbol '" + ComdatGVName +
"' does not exist.");
if (ComdatGV->getComdat() != C)
report_fatal_error("Associative COMDAT symbol '" + ComdatGVName +
"' is not a key for its COMDAT.");
return ComdatGV;
}
static int getSelectionForCOFF(const GlobalValue *GV) {
if (const Comdat *C = GV->getComdat()) {
const GlobalValue *ComdatKey = getComdatGVForCOFF(GV);
if (const auto *GA = dyn_cast<GlobalAlias>(ComdatKey))
ComdatKey = GA->getBaseObject();
if (ComdatKey == GV) {
switch (C->getSelectionKind()) {
case Comdat::Any:
return COFF::IMAGE_COMDAT_SELECT_ANY;
case Comdat::ExactMatch:
return COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH;
case Comdat::Largest:
return COFF::IMAGE_COMDAT_SELECT_LARGEST;
case Comdat::NoDuplicates:
return COFF::IMAGE_COMDAT_SELECT_NODUPLICATES;
case Comdat::SameSize:
return COFF::IMAGE_COMDAT_SELECT_SAME_SIZE;
}
} else {
return COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE;
}
}
return 0;
}
MCSection *TargetLoweringObjectFileCOFF::getExplicitSectionGlobal(
const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const {
int Selection = 0;
unsigned Characteristics = getCOFFSectionFlags(Kind);
StringRef Name = GV->getSection();
StringRef COMDATSymName = "";
if (GV->hasComdat()) {
Selection = getSelectionForCOFF(GV);
const GlobalValue *ComdatGV;
if (Selection == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE)
ComdatGV = getComdatGVForCOFF(GV);
else
ComdatGV = GV;
if (!ComdatGV->hasPrivateLinkage()) {
MCSymbol *Sym = TM.getSymbol(ComdatGV, Mang);
COMDATSymName = Sym->getName();
Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
} else {
Selection = 0;
}
}
return getContext().getCOFFSection(Name,
Characteristics,
Kind,
COMDATSymName,
Selection);
}
static const char *getCOFFSectionNameForUniqueGlobal(SectionKind Kind) {
if (Kind.isText())
return ".text";
if (Kind.isBSS())
return ".bss";
if (Kind.isThreadLocal())
return ".tls$";
if (Kind.isReadOnly() || Kind.isReadOnlyWithRel())
return ".rdata";
return ".data";
}
MCSection *TargetLoweringObjectFileCOFF::SelectSectionForGlobal(
const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const {
// If we have -ffunction-sections then we should emit the global value to a
// uniqued section specifically for it.
bool EmitUniquedSection;
if (Kind.isText())
EmitUniquedSection = TM.getFunctionSections();
else
EmitUniquedSection = TM.getDataSections();
if ((EmitUniquedSection && !Kind.isCommon()) || GV->hasComdat()) {
const char *Name = getCOFFSectionNameForUniqueGlobal(Kind);
unsigned Characteristics = getCOFFSectionFlags(Kind);
Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
int Selection = getSelectionForCOFF(GV);
if (!Selection)
Selection = COFF::IMAGE_COMDAT_SELECT_NODUPLICATES;
const GlobalValue *ComdatGV;
if (GV->hasComdat())
ComdatGV = getComdatGVForCOFF(GV);
else
ComdatGV = GV;
if (!ComdatGV->hasPrivateLinkage()) {
MCSymbol *Sym = TM.getSymbol(ComdatGV, Mang);
StringRef COMDATSymName = Sym->getName();
return getContext().getCOFFSection(Name, Characteristics, Kind,
COMDATSymName, Selection);
} else {
SmallString<256> TmpData;
getNameWithPrefix(TmpData, GV, /*CannotUsePrivateLabel=*/true, Mang, TM);
return getContext().getCOFFSection(Name, Characteristics, Kind, TmpData,
Selection);
}
}
if (Kind.isText())
return TextSection;
if (Kind.isThreadLocal())
return TLSDataSection;
if (Kind.isReadOnly() || Kind.isReadOnlyWithRel())
return ReadOnlySection;
// Note: we claim that common symbols are put in BSSSection, but they are
// really emitted with the magic .comm directive, which creates a symbol table
// entry but not a section.
if (Kind.isBSS() || Kind.isCommon())
return BSSSection;
return DataSection;
}
void TargetLoweringObjectFileCOFF::getNameWithPrefix(
SmallVectorImpl<char> &OutName, const GlobalValue *GV,
bool CannotUsePrivateLabel, Mangler &Mang, const TargetMachine &TM) const {
if (GV->hasPrivateLinkage() &&
((isa<Function>(GV) && TM.getFunctionSections()) ||
(isa<GlobalVariable>(GV) && TM.getDataSections())))
CannotUsePrivateLabel = true;
Mang.getNameWithPrefix(OutName, GV, CannotUsePrivateLabel);
}
MCSection *TargetLoweringObjectFileCOFF::getSectionForJumpTable(
const Function &F, Mangler &Mang, const TargetMachine &TM) const {
// If the function can be removed, produce a unique section so that
// the table doesn't prevent the removal.
const Comdat *C = F.getComdat();
bool EmitUniqueSection = TM.getFunctionSections() || C;
if (!EmitUniqueSection)
return ReadOnlySection;
// FIXME: we should produce a symbol for F instead.
if (F.hasPrivateLinkage())
return ReadOnlySection;
MCSymbol *Sym = TM.getSymbol(&F, Mang);
StringRef COMDATSymName = Sym->getName();
SectionKind Kind = SectionKind::getReadOnly();
const char *Name = getCOFFSectionNameForUniqueGlobal(Kind);
unsigned Characteristics = getCOFFSectionFlags(Kind);
Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
return getContext().getCOFFSection(Name, Characteristics, Kind, COMDATSymName,
COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE);
}
void TargetLoweringObjectFileCOFF::
emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
Mangler &Mang, const TargetMachine &TM) const {
MDNode *LinkerOptions = nullptr;
// Look for the "Linker Options" flag, since it's the only one we support.
for (ArrayRef<Module::ModuleFlagEntry>::iterator
i = ModuleFlags.begin(), e = ModuleFlags.end(); i != e; ++i) {
const Module::ModuleFlagEntry &MFE = *i;
StringRef Key = MFE.Key->getString();
Metadata *Val = MFE.Val;
if (Key == "Linker Options") {
LinkerOptions = cast<MDNode>(Val);
break;
}
}
if (!LinkerOptions)
return;
// Emit the linker options to the linker .drectve section. According to the
// spec, this section is a space-separated string containing flags for linker.
MCSection *Sec = getDrectveSection();
Streamer.SwitchSection(Sec);
for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) {
MDNode *MDOptions = cast<MDNode>(LinkerOptions->getOperand(i));
for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) {
MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii));
// Lead with a space for consistency with our dllexport implementation.
std::string Directive(" ");
Directive.append(MDOption->getString());
Streamer.EmitBytes(Directive);
}
}
}
MCSection *TargetLoweringObjectFileCOFF::getStaticCtorSection(
unsigned Priority, const MCSymbol *KeySym) const {
return getContext().getAssociativeCOFFSection(
cast<MCSectionCOFF>(StaticCtorSection), KeySym);
}
MCSection *TargetLoweringObjectFileCOFF::getStaticDtorSection(
unsigned Priority, const MCSymbol *KeySym) const {
return getContext().getAssociativeCOFFSection(
cast<MCSectionCOFF>(StaticDtorSection), KeySym);
}
void TargetLoweringObjectFileCOFF::emitLinkerFlagsForGlobal(
raw_ostream &OS, const GlobalValue *GV, const Mangler &Mang) const {
if (!GV->hasDLLExportStorageClass() || GV->isDeclaration())
return;
const Triple &TT = getTargetTriple();
if (TT.isKnownWindowsMSVCEnvironment())
OS << " /EXPORT:";
else
OS << " -export:";
if (TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment()) {
std::string Flag;
raw_string_ostream FlagOS(Flag);
Mang.getNameWithPrefix(FlagOS, GV, false);
FlagOS.flush();
if (Flag[0] == DL->getGlobalPrefix())
OS << Flag.substr(1);
else
OS << Flag;
} else {
Mang.getNameWithPrefix(OS, GV, false);
}
if (!GV->getValueType()->isFunctionTy()) {
if (TT.isKnownWindowsMSVCEnvironment())
OS << ",DATA";
else
OS << ",data";
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ShadowStackGC.cpp | //===-- ShadowStackGC.cpp - GC support for uncooperative targets ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements lowering for the llvm.gc* intrinsics for targets that do
// not natively support them (which includes the C backend). Note that the code
// generated is not quite as efficient as algorithms which generate stack maps
// to identify roots.
//
// This pass implements the code transformation described in this paper:
// "Accurate Garbage Collection in an Uncooperative Environment"
// Fergus Henderson, ISMM, 2002
//
// In runtime/GC/SemiSpace.cpp is a prototype runtime which is compatible with
// ShadowStackGC.
//
// In order to support this particular transformation, all stack roots are
// coallocated in the stack. This allows a fully target-independent stack map
// while introducing only minor runtime overhead.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCs.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
using namespace llvm;
#define DEBUG_TYPE "shadowstackgc"
namespace {
class ShadowStackGC : public GCStrategy {
public:
ShadowStackGC();
};
}
static GCRegistry::Add<ShadowStackGC>
X("shadow-stack", "Very portable GC for uncooperative code generators");
void llvm::linkShadowStackGC() {}
ShadowStackGC::ShadowStackGC() {
InitRoots = true;
CustomRoots = true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LLVMBuild.txt | ;===- ./lib/CodeGen/LLVMBuild.txt ------------------------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[common]
subdirectories = AsmPrinter SelectionDAG MIRParser
[component_0]
type = Library
name = CodeGen
parent = Libraries
required_libraries = Analysis Core Scalar Support Target TransformUtils
; MC Instrumentation - HLSL Change
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineFunctionAnalysis.cpp | //===-- MachineFunctionAnalysis.cpp ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the definitions of the MachineFunctionAnalysis members.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunctionInitializer.h"
using namespace llvm;
char MachineFunctionAnalysis::ID = 0;
MachineFunctionAnalysis::MachineFunctionAnalysis(
const TargetMachine &tm, MachineFunctionInitializer *MFInitializer)
: FunctionPass(ID), TM(tm), MF(nullptr), MFInitializer(MFInitializer) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
MachineFunctionAnalysis::~MachineFunctionAnalysis() {
releaseMemory();
assert(!MF && "MachineFunctionAnalysis left initialized!");
}
void MachineFunctionAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MachineModuleInfo>();
}
bool MachineFunctionAnalysis::doInitialization(Module &M) {
MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>();
assert(MMI && "MMI not around yet??");
MMI->setModule(&M);
NextFnNum = 0;
return false;
}
bool MachineFunctionAnalysis::runOnFunction(Function &F) {
assert(!MF && "MachineFunctionAnalysis already initialized!");
MF = new MachineFunction(&F, TM, NextFnNum++,
getAnalysis<MachineModuleInfo>());
if (MFInitializer)
MFInitializer->initializeMachineFunction(*MF);
return false;
}
void MachineFunctionAnalysis::releaseMemory() {
delete MF;
MF = nullptr;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/SlotIndexes.cpp | //===-- SlotIndexes.cpp - Slot Indexes Pass ------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
#define DEBUG_TYPE "slotindexes"
char SlotIndexes::ID = 0;
INITIALIZE_PASS(SlotIndexes, "slotindexes",
"Slot index numbering", false, false)
STATISTIC(NumLocalRenum, "Number of local renumberings");
STATISTIC(NumGlobalRenum, "Number of global renumberings");
void SlotIndexes::getAnalysisUsage(AnalysisUsage &au) const {
au.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(au);
}
void SlotIndexes::releaseMemory() {
mi2iMap.clear();
MBBRanges.clear();
idx2MBBMap.clear();
indexList.clear();
ileAllocator.Reset();
}
bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
// Compute numbering as follows:
// Grab an iterator to the start of the index list.
// Iterate over all MBBs, and within each MBB all MIs, keeping the MI
// iterator in lock-step (though skipping it over indexes which have
// null pointers in the instruction field).
// At each iteration assert that the instruction pointed to in the index
// is the same one pointed to by the MI iterator. This
// FIXME: This can be simplified. The mi2iMap_, Idx2MBBMap, etc. should
// only need to be set up once after the first numbering is computed.
mf = &fn;
// Check that the list contains only the sentinal.
assert(indexList.empty() && "Index list non-empty at initial numbering?");
assert(idx2MBBMap.empty() &&
"Index -> MBB mapping non-empty at initial numbering?");
assert(MBBRanges.empty() &&
"MBB -> Index mapping non-empty at initial numbering?");
assert(mi2iMap.empty() &&
"MachineInstr -> Index mapping non-empty at initial numbering?");
unsigned index = 0;
MBBRanges.resize(mf->getNumBlockIDs());
idx2MBBMap.reserve(mf->size());
indexList.push_back(createEntry(nullptr, index));
// Iterate over the function.
for (MachineFunction::iterator mbbItr = mf->begin(), mbbEnd = mf->end();
mbbItr != mbbEnd; ++mbbItr) {
MachineBasicBlock *mbb = &*mbbItr;
// Insert an index for the MBB start.
SlotIndex blockStartIndex(&indexList.back(), SlotIndex::Slot_Block);
for (MachineBasicBlock::iterator miItr = mbb->begin(), miEnd = mbb->end();
miItr != miEnd; ++miItr) {
MachineInstr *mi = miItr;
if (mi->isDebugValue())
continue;
// Insert a store index for the instr.
indexList.push_back(createEntry(mi, index += SlotIndex::InstrDist));
// Save this base index in the maps.
mi2iMap.insert(std::make_pair(mi, SlotIndex(&indexList.back(),
SlotIndex::Slot_Block)));
}
// We insert one blank instructions between basic blocks.
indexList.push_back(createEntry(nullptr, index += SlotIndex::InstrDist));
MBBRanges[mbb->getNumber()].first = blockStartIndex;
MBBRanges[mbb->getNumber()].second = SlotIndex(&indexList.back(),
SlotIndex::Slot_Block);
idx2MBBMap.push_back(IdxMBBPair(blockStartIndex, mbb));
}
// Sort the Idx2MBBMap
std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
DEBUG(mf->print(dbgs(), this));
// And we're done!
return false;
}
void SlotIndexes::renumberIndexes() {
// Renumber updates the index of every element of the index list.
DEBUG(dbgs() << "\n*** Renumbering SlotIndexes ***\n");
++NumGlobalRenum;
unsigned index = 0;
for (IndexList::iterator I = indexList.begin(), E = indexList.end();
I != E; ++I) {
I->setIndex(index);
index += SlotIndex::InstrDist;
}
}
// Renumber indexes locally after curItr was inserted, but failed to get a new
// index.
void SlotIndexes::renumberIndexes(IndexList::iterator curItr) {
// Number indexes with half the default spacing so we can catch up quickly.
const unsigned Space = SlotIndex::InstrDist/2;
static_assert((Space & 3) == 0, "InstrDist must be a multiple of 2*NUM");
IndexList::iterator startItr = std::prev(curItr);
unsigned index = startItr->getIndex();
do {
curItr->setIndex(index += Space);
++curItr;
// If the next index is bigger, we have caught up.
} while (curItr != indexList.end() && curItr->getIndex() <= index);
DEBUG(dbgs() << "\n*** Renumbered SlotIndexes " << startItr->getIndex() << '-'
<< index << " ***\n");
++NumLocalRenum;
}
// Repair indexes after adding and removing instructions.
void SlotIndexes::repairIndexesInRange(MachineBasicBlock *MBB,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End) {
// FIXME: Is this really necessary? The only caller repairIntervalsForRange()
// does the same thing.
// Find anchor points, which are at the beginning/end of blocks or at
// instructions that already have indexes.
while (Begin != MBB->begin() && !hasIndex(Begin))
--Begin;
while (End != MBB->end() && !hasIndex(End))
++End;
bool includeStart = (Begin == MBB->begin());
SlotIndex startIdx;
if (includeStart)
startIdx = getMBBStartIdx(MBB);
else
startIdx = getInstructionIndex(Begin);
SlotIndex endIdx;
if (End == MBB->end())
endIdx = getMBBEndIdx(MBB);
else
endIdx = getInstructionIndex(End);
// FIXME: Conceptually, this code is implementing an iterator on MBB that
// optionally includes an additional position prior to MBB->begin(), indicated
// by the includeStart flag. This is done so that we can iterate MIs in a MBB
// in parallel with SlotIndexes, but there should be a better way to do this.
IndexList::iterator ListB = startIdx.listEntry();
IndexList::iterator ListI = endIdx.listEntry();
MachineBasicBlock::iterator MBBI = End;
bool pastStart = false;
while (ListI != ListB || MBBI != Begin || (includeStart && !pastStart)) {
assert(ListI->getIndex() >= startIdx.getIndex() &&
(includeStart || !pastStart) &&
"Decremented past the beginning of region to repair.");
MachineInstr *SlotMI = ListI->getInstr();
MachineInstr *MI = (MBBI != MBB->end() && !pastStart) ? MBBI : nullptr;
bool MBBIAtBegin = MBBI == Begin && (!includeStart || pastStart);
if (SlotMI == MI && !MBBIAtBegin) {
--ListI;
if (MBBI != Begin)
--MBBI;
else
pastStart = true;
} else if (MI && mi2iMap.find(MI) == mi2iMap.end()) {
if (MBBI != Begin)
--MBBI;
else
pastStart = true;
} else {
--ListI;
if (SlotMI)
removeMachineInstrFromMaps(SlotMI);
}
}
// In theory this could be combined with the previous loop, but it is tricky
// to update the IndexList while we are iterating it.
for (MachineBasicBlock::iterator I = End; I != Begin;) {
--I;
MachineInstr *MI = I;
if (!MI->isDebugValue() && mi2iMap.find(MI) == mi2iMap.end())
insertMachineInstrInMaps(MI);
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SlotIndexes::dump() const {
for (IndexList::const_iterator itr = indexList.begin();
itr != indexList.end(); ++itr) {
dbgs() << itr->getIndex() << " ";
if (itr->getInstr()) {
dbgs() << *itr->getInstr();
} else {
dbgs() << "\n";
}
}
for (unsigned i = 0, e = MBBRanges.size(); i != e; ++i)
dbgs() << "BB#" << i << "\t[" << MBBRanges[i].first << ';'
<< MBBRanges[i].second << ")\n";
}
#endif
// Print a SlotIndex to a raw_ostream.
void SlotIndex::print(raw_ostream &os) const {
if (isValid())
os << listEntry()->getIndex() << "Berd"[getSlot()];
else
os << "invalid";
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
// Dump a SlotIndex to stderr.
void SlotIndex::dump() const {
print(dbgs());
dbgs() << "\n";
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/InterferenceCache.cpp | //===-- InterferenceCache.cpp - Caching per-block interference ---------*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// InterferenceCache remembers per-block interference in LiveIntervalUnions.
//
//===----------------------------------------------------------------------===//
#include "InterferenceCache.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
// Static member used for null interference cursors.
const InterferenceCache::BlockInterference
InterferenceCache::Cursor::NoInterference;
// Initializes PhysRegEntries (instead of a SmallVector, PhysRegEntries is a
// buffer of size NumPhysRegs to speed up alloc/clear for targets with large
// reg files). Calloced memory is used for good form, and quites tools like
// Valgrind too, but zero initialized memory is not required by the algorithm:
// this is because PhysRegEntries works like a SparseSet and its entries are
// only valid when there is a corresponding CacheEntries assignment. There is
// also support for when pass managers are reused for targets with different
// numbers of PhysRegs: in this case PhysRegEntries is freed and reinitialized.
void InterferenceCache::reinitPhysRegEntries() {
if (PhysRegEntriesCount == TRI->getNumRegs()) return;
delete[] PhysRegEntries; // HLSL Change: Use overridable operator delete
PhysRegEntriesCount = TRI->getNumRegs();
// HLSL Change Begin: Use overridable operator new
PhysRegEntries = new unsigned char[PhysRegEntriesCount];
std::memset(PhysRegEntries, 0, PhysRegEntriesCount);
// HLSL Change End
}
void InterferenceCache::init(MachineFunction *mf,
LiveIntervalUnion *liuarray,
SlotIndexes *indexes,
LiveIntervals *lis,
const TargetRegisterInfo *tri) {
MF = mf;
LIUArray = liuarray;
TRI = tri;
reinitPhysRegEntries();
for (unsigned i = 0; i != CacheEntries; ++i)
Entries[i].clear(mf, indexes, lis);
}
InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
unsigned E = PhysRegEntries[PhysReg];
if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
if (!Entries[E].valid(LIUArray, TRI))
Entries[E].revalidate(LIUArray, TRI);
return &Entries[E];
}
// No valid entry exists, pick the next round-robin entry.
E = RoundRobin;
if (++RoundRobin == CacheEntries)
RoundRobin = 0;
for (unsigned i = 0; i != CacheEntries; ++i) {
// Skip entries that are in use.
if (Entries[E].hasRefs()) {
if (++E == CacheEntries)
E = 0;
continue;
}
Entries[E].reset(PhysReg, LIUArray, TRI, MF);
PhysRegEntries[PhysReg] = E;
return &Entries[E];
}
llvm_unreachable("Ran out of interference cache entries.");
}
/// revalidate - LIU contents have changed, update tags.
void InterferenceCache::Entry::revalidate(LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI) {
// Invalidate all block entries.
++Tag;
// Invalidate all iterators.
PrevPos = SlotIndex();
unsigned i = 0;
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units, ++i)
RegUnits[i].VirtTag = LIUArray[*Units].getTag();
}
void InterferenceCache::Entry::reset(unsigned physReg,
LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI,
const MachineFunction *MF) {
assert(!hasRefs() && "Cannot reset cache entry with references");
// LIU's changed, invalidate cache.
++Tag;
PhysReg = physReg;
Blocks.resize(MF->getNumBlockIDs());
// Reset iterators.
PrevPos = SlotIndex();
RegUnits.clear();
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
RegUnits.push_back(LIUArray[*Units]);
RegUnits.back().Fixed = &LIS->getRegUnit(*Units);
}
}
bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI) {
unsigned i = 0, e = RegUnits.size();
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units, ++i) {
if (i == e)
return false;
if (LIUArray[*Units].changedSince(RegUnits[i].VirtTag))
return false;
}
return i == e;
}
void InterferenceCache::Entry::update(unsigned MBBNum) {
SlotIndex Start, Stop;
std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
// Use advanceTo only when possible.
if (PrevPos != Start) {
if (!PrevPos.isValid() || Start < PrevPos) {
for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
RegUnitInfo &RUI = RegUnits[i];
RUI.VirtI.find(Start);
RUI.FixedI = RUI.Fixed->find(Start);
}
} else {
for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
RegUnitInfo &RUI = RegUnits[i];
RUI.VirtI.advanceTo(Start);
if (RUI.FixedI != RUI.Fixed->end())
RUI.FixedI = RUI.Fixed->advanceTo(RUI.FixedI, Start);
}
}
PrevPos = Start;
}
MachineFunction::const_iterator MFI = MF->getBlockNumbered(MBBNum);
BlockInterference *BI = &Blocks[MBBNum];
ArrayRef<SlotIndex> RegMaskSlots;
ArrayRef<const uint32_t*> RegMaskBits;
for (;;) {
BI->Tag = Tag;
BI->First = BI->Last = SlotIndex();
// Check for first interference from virtregs.
for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
if (!I.valid())
continue;
SlotIndex StartI = I.start();
if (StartI >= Stop)
continue;
if (!BI->First.isValid() || StartI < BI->First)
BI->First = StartI;
}
// Same thing for fixed interference.
for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
LiveInterval::const_iterator I = RegUnits[i].FixedI;
LiveInterval::const_iterator E = RegUnits[i].Fixed->end();
if (I == E)
continue;
SlotIndex StartI = I->start;
if (StartI >= Stop)
continue;
if (!BI->First.isValid() || StartI < BI->First)
BI->First = StartI;
}
// Also check for register mask interference.
RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
for (unsigned i = 0, e = RegMaskSlots.size();
i != e && RegMaskSlots[i] < Limit; ++i)
if (MachineOperand::clobbersPhysReg(RegMaskBits[i], PhysReg)) {
// Register mask i clobbers PhysReg before the LIU interference.
BI->First = RegMaskSlots[i];
break;
}
PrevPos = Stop;
if (BI->First.isValid())
break;
// No interference in this block? Go ahead and precompute the next block.
if (++MFI == MF->end())
return;
MBBNum = MFI->getNumber();
BI = &Blocks[MBBNum];
if (BI->Tag == Tag)
return;
std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
}
// Check for last interference in block.
for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
if (!I.valid() || I.start() >= Stop)
continue;
I.advanceTo(Stop);
bool Backup = !I.valid() || I.start() >= Stop;
if (Backup)
--I;
SlotIndex StopI = I.stop();
if (!BI->Last.isValid() || StopI > BI->Last)
BI->Last = StopI;
if (Backup)
++I;
}
// Fixed interference.
for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
LiveInterval::iterator &I = RegUnits[i].FixedI;
LiveRange *LR = RegUnits[i].Fixed;
if (I == LR->end() || I->start >= Stop)
continue;
I = LR->advanceTo(I, Stop);
bool Backup = I == LR->end() || I->start >= Stop;
if (Backup)
--I;
SlotIndex StopI = I->end;
if (!BI->Last.isValid() || StopI > BI->Last)
BI->Last = StopI;
if (Backup)
++I;
}
// Also check for register mask interference.
SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
for (unsigned i = RegMaskSlots.size();
i && RegMaskSlots[i-1].getDeadSlot() > Limit; --i)
if (MachineOperand::clobbersPhysReg(RegMaskBits[i-1], PhysReg)) {
// Register mask i-1 clobbers PhysReg after the LIU interference.
// Model the regmask clobber as a dead def.
BI->Last = RegMaskSlots[i-1].getDeadSlot();
break;
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegisterPressure.cpp | //===-- RegisterPressure.cpp - Dynamic Register Pressure ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the RegisterPressure class which can be used to track
// MachineInstr level register pressure.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
/// Increase pressure for each pressure set provided by TargetRegisterInfo.
static void increaseSetPressure(std::vector<unsigned> &CurrSetPressure,
PSetIterator PSetI) {
unsigned Weight = PSetI.getWeight();
for (; PSetI.isValid(); ++PSetI)
CurrSetPressure[*PSetI] += Weight;
}
/// Decrease pressure for each pressure set provided by TargetRegisterInfo.
static void decreaseSetPressure(std::vector<unsigned> &CurrSetPressure,
PSetIterator PSetI) {
unsigned Weight = PSetI.getWeight();
for (; PSetI.isValid(); ++PSetI) {
assert(CurrSetPressure[*PSetI] >= Weight && "register pressure underflow");
CurrSetPressure[*PSetI] -= Weight;
}
}
LLVM_DUMP_METHOD
void llvm::dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
const TargetRegisterInfo *TRI) {
bool Empty = true;
for (unsigned i = 0, e = SetPressure.size(); i < e; ++i) {
if (SetPressure[i] != 0) {
dbgs() << TRI->getRegPressureSetName(i) << "=" << SetPressure[i] << '\n';
Empty = false;
}
}
if (Empty)
dbgs() << "\n";
}
LLVM_DUMP_METHOD
void RegisterPressure::dump(const TargetRegisterInfo *TRI) const {
dbgs() << "Max Pressure: ";
dumpRegSetPressure(MaxSetPressure, TRI);
dbgs() << "Live In: ";
for (unsigned i = 0, e = LiveInRegs.size(); i < e; ++i)
dbgs() << PrintVRegOrUnit(LiveInRegs[i], TRI) << " ";
dbgs() << '\n';
dbgs() << "Live Out: ";
for (unsigned i = 0, e = LiveOutRegs.size(); i < e; ++i)
dbgs() << PrintVRegOrUnit(LiveOutRegs[i], TRI) << " ";
dbgs() << '\n';
}
LLVM_DUMP_METHOD
void RegPressureTracker::dump() const {
if (!isTopClosed() || !isBottomClosed()) {
dbgs() << "Curr Pressure: ";
dumpRegSetPressure(CurrSetPressure, TRI);
}
P.dump(TRI);
}
void PressureDiff::dump(const TargetRegisterInfo &TRI) const {
for (const PressureChange &Change : *this) {
if (!Change.isValid() || Change.getUnitInc() == 0)
continue;
dbgs() << " " << TRI.getRegPressureSetName(Change.getPSet())
<< " " << Change.getUnitInc();
}
dbgs() << '\n';
}
/// Increase the current pressure as impacted by these registers and bump
/// the high water mark if needed.
void RegPressureTracker::increaseRegPressure(ArrayRef<unsigned> RegUnits) {
for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
PSetIterator PSetI = MRI->getPressureSets(RegUnits[i]);
unsigned Weight = PSetI.getWeight();
for (; PSetI.isValid(); ++PSetI) {
CurrSetPressure[*PSetI] += Weight;
if (CurrSetPressure[*PSetI] > P.MaxSetPressure[*PSetI]) {
P.MaxSetPressure[*PSetI] = CurrSetPressure[*PSetI];
}
}
}
}
/// Simply decrease the current pressure as impacted by these registers.
void RegPressureTracker::decreaseRegPressure(ArrayRef<unsigned> RegUnits) {
for (unsigned I = 0, E = RegUnits.size(); I != E; ++I)
decreaseSetPressure(CurrSetPressure, MRI->getPressureSets(RegUnits[I]));
}
/// Clear the result so it can be used for another round of pressure tracking.
void IntervalPressure::reset() {
TopIdx = BottomIdx = SlotIndex();
MaxSetPressure.clear();
LiveInRegs.clear();
LiveOutRegs.clear();
}
/// Clear the result so it can be used for another round of pressure tracking.
void RegionPressure::reset() {
TopPos = BottomPos = MachineBasicBlock::const_iterator();
MaxSetPressure.clear();
LiveInRegs.clear();
LiveOutRegs.clear();
}
/// If the current top is not less than or equal to the next index, open it.
/// We happen to need the SlotIndex for the next top for pressure update.
void IntervalPressure::openTop(SlotIndex NextTop) {
if (TopIdx <= NextTop)
return;
TopIdx = SlotIndex();
LiveInRegs.clear();
}
/// If the current top is the previous instruction (before receding), open it.
void RegionPressure::openTop(MachineBasicBlock::const_iterator PrevTop) {
if (TopPos != PrevTop)
return;
TopPos = MachineBasicBlock::const_iterator();
LiveInRegs.clear();
}
/// If the current bottom is not greater than the previous index, open it.
void IntervalPressure::openBottom(SlotIndex PrevBottom) {
if (BottomIdx > PrevBottom)
return;
BottomIdx = SlotIndex();
LiveInRegs.clear();
}
/// If the current bottom is the previous instr (before advancing), open it.
void RegionPressure::openBottom(MachineBasicBlock::const_iterator PrevBottom) {
if (BottomPos != PrevBottom)
return;
BottomPos = MachineBasicBlock::const_iterator();
LiveInRegs.clear();
}
const LiveRange *RegPressureTracker::getLiveRange(unsigned Reg) const {
if (TargetRegisterInfo::isVirtualRegister(Reg))
return &LIS->getInterval(Reg);
return LIS->getCachedRegUnit(Reg);
}
void RegPressureTracker::reset() {
MBB = nullptr;
LIS = nullptr;
CurrSetPressure.clear();
LiveThruPressure.clear();
P.MaxSetPressure.clear();
if (RequireIntervals)
static_cast<IntervalPressure&>(P).reset();
else
static_cast<RegionPressure&>(P).reset();
LiveRegs.PhysRegs.clear();
LiveRegs.VirtRegs.clear();
UntiedDefs.clear();
}
/// Setup the RegPressureTracker.
///
/// TODO: Add support for pressure without LiveIntervals.
void RegPressureTracker::init(const MachineFunction *mf,
const RegisterClassInfo *rci,
const LiveIntervals *lis,
const MachineBasicBlock *mbb,
MachineBasicBlock::const_iterator pos,
bool ShouldTrackUntiedDefs)
{
reset();
MF = mf;
TRI = MF->getSubtarget().getRegisterInfo();
RCI = rci;
MRI = &MF->getRegInfo();
MBB = mbb;
TrackUntiedDefs = ShouldTrackUntiedDefs;
if (RequireIntervals) {
assert(lis && "IntervalPressure requires LiveIntervals");
LIS = lis;
}
CurrPos = pos;
CurrSetPressure.assign(TRI->getNumRegPressureSets(), 0);
P.MaxSetPressure = CurrSetPressure;
LiveRegs.PhysRegs.setUniverse(TRI->getNumRegs());
LiveRegs.VirtRegs.setUniverse(MRI->getNumVirtRegs());
if (TrackUntiedDefs)
UntiedDefs.setUniverse(MRI->getNumVirtRegs());
}
/// Does this pressure result have a valid top position and live ins.
bool RegPressureTracker::isTopClosed() const {
if (RequireIntervals)
return static_cast<IntervalPressure&>(P).TopIdx.isValid();
return (static_cast<RegionPressure&>(P).TopPos ==
MachineBasicBlock::const_iterator());
}
/// Does this pressure result have a valid bottom position and live outs.
bool RegPressureTracker::isBottomClosed() const {
if (RequireIntervals)
return static_cast<IntervalPressure&>(P).BottomIdx.isValid();
return (static_cast<RegionPressure&>(P).BottomPos ==
MachineBasicBlock::const_iterator());
}
SlotIndex RegPressureTracker::getCurrSlot() const {
MachineBasicBlock::const_iterator IdxPos = CurrPos;
while (IdxPos != MBB->end() && IdxPos->isDebugValue())
++IdxPos;
if (IdxPos == MBB->end())
return LIS->getMBBEndIdx(MBB);
return LIS->getInstructionIndex(IdxPos).getRegSlot();
}
/// Set the boundary for the top of the region and summarize live ins.
void RegPressureTracker::closeTop() {
if (RequireIntervals)
static_cast<IntervalPressure&>(P).TopIdx = getCurrSlot();
else
static_cast<RegionPressure&>(P).TopPos = CurrPos;
assert(P.LiveInRegs.empty() && "inconsistent max pressure result");
P.LiveInRegs.reserve(LiveRegs.PhysRegs.size() + LiveRegs.VirtRegs.size());
P.LiveInRegs.append(LiveRegs.PhysRegs.begin(), LiveRegs.PhysRegs.end());
for (SparseSet<unsigned>::const_iterator I =
LiveRegs.VirtRegs.begin(), E = LiveRegs.VirtRegs.end(); I != E; ++I)
P.LiveInRegs.push_back(*I);
std::sort(P.LiveInRegs.begin(), P.LiveInRegs.end());
P.LiveInRegs.erase(std::unique(P.LiveInRegs.begin(), P.LiveInRegs.end()),
P.LiveInRegs.end());
}
/// Set the boundary for the bottom of the region and summarize live outs.
void RegPressureTracker::closeBottom() {
if (RequireIntervals)
static_cast<IntervalPressure&>(P).BottomIdx = getCurrSlot();
else
static_cast<RegionPressure&>(P).BottomPos = CurrPos;
assert(P.LiveOutRegs.empty() && "inconsistent max pressure result");
P.LiveOutRegs.reserve(LiveRegs.PhysRegs.size() + LiveRegs.VirtRegs.size());
P.LiveOutRegs.append(LiveRegs.PhysRegs.begin(), LiveRegs.PhysRegs.end());
for (SparseSet<unsigned>::const_iterator I =
LiveRegs.VirtRegs.begin(), E = LiveRegs.VirtRegs.end(); I != E; ++I)
P.LiveOutRegs.push_back(*I);
std::sort(P.LiveOutRegs.begin(), P.LiveOutRegs.end());
P.LiveOutRegs.erase(std::unique(P.LiveOutRegs.begin(), P.LiveOutRegs.end()),
P.LiveOutRegs.end());
}
/// Finalize the region boundaries and record live ins and live outs.
void RegPressureTracker::closeRegion() {
if (!isTopClosed() && !isBottomClosed()) {
assert(LiveRegs.PhysRegs.empty() && LiveRegs.VirtRegs.empty() &&
"no region boundary");
return;
}
if (!isBottomClosed())
closeBottom();
else if (!isTopClosed())
closeTop();
// If both top and bottom are closed, do nothing.
}
/// The register tracker is unaware of global liveness so ignores normal
/// live-thru ranges. However, two-address or coalesced chains can also lead
/// to live ranges with no holes. Count these to inform heuristics that we
/// can never drop below this pressure.
void RegPressureTracker::initLiveThru(const RegPressureTracker &RPTracker) {
LiveThruPressure.assign(TRI->getNumRegPressureSets(), 0);
assert(isBottomClosed() && "need bottom-up tracking to intialize.");
for (unsigned i = 0, e = P.LiveOutRegs.size(); i < e; ++i) {
unsigned Reg = P.LiveOutRegs[i];
if (TargetRegisterInfo::isVirtualRegister(Reg)
&& !RPTracker.hasUntiedDef(Reg)) {
increaseSetPressure(LiveThruPressure, MRI->getPressureSets(Reg));
}
}
}
/// \brief Convenient wrapper for checking membership in RegisterOperands.
/// (std::count() doesn't have an early exit).
static bool containsReg(ArrayRef<unsigned> RegUnits, unsigned RegUnit) {
return std::find(RegUnits.begin(), RegUnits.end(), RegUnit) != RegUnits.end();
}
namespace {
/// Collect this instruction's unique uses and defs into SmallVectors for
/// processing defs and uses in order.
///
/// FIXME: always ignore tied opers
class RegisterOperands {
const TargetRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
bool IgnoreDead;
public:
SmallVector<unsigned, 8> Uses;
SmallVector<unsigned, 8> Defs;
SmallVector<unsigned, 8> DeadDefs;
RegisterOperands(const TargetRegisterInfo *tri,
const MachineRegisterInfo *mri, bool ID = false):
TRI(tri), MRI(mri), IgnoreDead(ID) {}
/// Push this operand's register onto the correct vector.
void collect(const MachineOperand &MO) {
if (!MO.isReg() || !MO.getReg())
return;
if (MO.readsReg())
pushRegUnits(MO.getReg(), Uses);
if (MO.isDef()) {
if (MO.isDead()) {
if (!IgnoreDead)
pushRegUnits(MO.getReg(), DeadDefs);
}
else
pushRegUnits(MO.getReg(), Defs);
}
}
protected:
void pushRegUnits(unsigned Reg, SmallVectorImpl<unsigned> &RegUnits) {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (containsReg(RegUnits, Reg))
return;
RegUnits.push_back(Reg);
}
else if (MRI->isAllocatable(Reg)) {
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
if (containsReg(RegUnits, *Units))
continue;
RegUnits.push_back(*Units);
}
}
}
};
} // namespace
/// Collect physical and virtual register operands.
static void collectOperands(const MachineInstr *MI,
RegisterOperands &RegOpers) {
for (ConstMIBundleOperands OperI(MI); OperI.isValid(); ++OperI)
RegOpers.collect(*OperI);
// Remove redundant physreg dead defs.
SmallVectorImpl<unsigned>::iterator I =
std::remove_if(RegOpers.DeadDefs.begin(), RegOpers.DeadDefs.end(),
std::bind1st(std::ptr_fun(containsReg), RegOpers.Defs));
RegOpers.DeadDefs.erase(I, RegOpers.DeadDefs.end());
}
/// Initialize an array of N PressureDiffs.
void PressureDiffs::init(unsigned N) {
Size = N;
if (N <= Max) {
memset(PDiffArray, 0, N * sizeof(PressureDiff));
return;
}
Max = Size;
// HLSL Change Begin: Use overridable operator new/delete
delete[] PDiffArray;
PDiffArray = new PressureDiff[N];
std::memset(PDiffArray, 0, N * sizeof(PressureDiff));
// HLSL Change End
}
/// Add a change in pressure to the pressure diff of a given instruction.
void PressureDiff::addPressureChange(unsigned RegUnit, bool IsDec,
const MachineRegisterInfo *MRI) {
PSetIterator PSetI = MRI->getPressureSets(RegUnit);
int Weight = IsDec ? -PSetI.getWeight() : PSetI.getWeight();
for (; PSetI.isValid(); ++PSetI) {
// Find an existing entry in the pressure diff for this PSet.
PressureDiff::iterator I = begin(), E = end();
for (; I != E && I->isValid(); ++I) {
if (I->getPSet() >= *PSetI)
break;
}
// If all pressure sets are more constrained, skip the remaining PSets.
if (I == E)
break;
// Insert this PressureChange.
if (!I->isValid() || I->getPSet() != *PSetI) {
PressureChange PTmp = PressureChange(*PSetI);
for (PressureDiff::iterator J = I; J != E && PTmp.isValid(); ++J)
std::swap(*J,PTmp);
}
// Update the units for this pressure set.
I->setUnitInc(I->getUnitInc() + Weight);
}
}
/// Record the pressure difference induced by the given operand list.
static void collectPDiff(PressureDiff &PDiff, RegisterOperands &RegOpers,
const MachineRegisterInfo *MRI) {
assert(!PDiff.begin()->isValid() && "stale PDiff");
for (unsigned i = 0, e = RegOpers.Defs.size(); i != e; ++i)
PDiff.addPressureChange(RegOpers.Defs[i], true, MRI);
for (unsigned i = 0, e = RegOpers.Uses.size(); i != e; ++i)
PDiff.addPressureChange(RegOpers.Uses[i], false, MRI);
}
/// Force liveness of registers.
void RegPressureTracker::addLiveRegs(ArrayRef<unsigned> Regs) {
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
if (LiveRegs.insert(Regs[i]))
increaseRegPressure(Regs[i]);
}
}
/// Add Reg to the live in set and increase max pressure.
void RegPressureTracker::discoverLiveIn(unsigned Reg) {
assert(!LiveRegs.contains(Reg) && "avoid bumping max pressure twice");
if (containsReg(P.LiveInRegs, Reg))
return;
// At live in discovery, unconditionally increase the high water mark.
P.LiveInRegs.push_back(Reg);
increaseSetPressure(P.MaxSetPressure, MRI->getPressureSets(Reg));
}
/// Add Reg to the live out set and increase max pressure.
void RegPressureTracker::discoverLiveOut(unsigned Reg) {
assert(!LiveRegs.contains(Reg) && "avoid bumping max pressure twice");
if (containsReg(P.LiveOutRegs, Reg))
return;
// At live out discovery, unconditionally increase the high water mark.
P.LiveOutRegs.push_back(Reg);
increaseSetPressure(P.MaxSetPressure, MRI->getPressureSets(Reg));
}
/// Recede across the previous instruction. If LiveUses is provided, record any
/// RegUnits that are made live by the current instruction's uses. This includes
/// registers that are both defined and used by the instruction. If a pressure
/// difference pointer is provided record the changes is pressure caused by this
/// instruction independent of liveness.
bool RegPressureTracker::recede(SmallVectorImpl<unsigned> *LiveUses,
PressureDiff *PDiff) {
// Check for the top of the analyzable region.
if (CurrPos == MBB->begin()) {
closeRegion();
return false;
}
if (!isBottomClosed())
closeBottom();
// Open the top of the region using block iterators.
if (!RequireIntervals && isTopClosed())
static_cast<RegionPressure&>(P).openTop(CurrPos);
// Find the previous instruction.
do
--CurrPos;
while (CurrPos != MBB->begin() && CurrPos->isDebugValue());
if (CurrPos->isDebugValue()) {
closeRegion();
return false;
}
SlotIndex SlotIdx;
if (RequireIntervals)
SlotIdx = LIS->getInstructionIndex(CurrPos).getRegSlot();
// Open the top of the region using slot indexes.
if (RequireIntervals && isTopClosed())
static_cast<IntervalPressure&>(P).openTop(SlotIdx);
RegisterOperands RegOpers(TRI, MRI);
collectOperands(CurrPos, RegOpers);
if (PDiff)
collectPDiff(*PDiff, RegOpers, MRI);
// Boost pressure for all dead defs together.
increaseRegPressure(RegOpers.DeadDefs);
decreaseRegPressure(RegOpers.DeadDefs);
// Kill liveness at live defs.
// TODO: consider earlyclobbers?
for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) {
unsigned Reg = RegOpers.Defs[i];
bool DeadDef = false;
if (RequireIntervals) {
const LiveRange *LR = getLiveRange(Reg);
if (LR) {
LiveQueryResult LRQ = LR->Query(SlotIdx);
DeadDef = LRQ.isDeadDef();
}
}
if (DeadDef) {
// LiveIntervals knows this is a dead even though it's MachineOperand is
// not flagged as such. Since this register will not be recorded as
// live-out, increase its PDiff value to avoid underflowing pressure.
if (PDiff)
PDiff->addPressureChange(Reg, false, MRI);
} else {
if (LiveRegs.erase(Reg))
decreaseRegPressure(Reg);
else
discoverLiveOut(Reg);
}
}
// Generate liveness for uses.
for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) {
unsigned Reg = RegOpers.Uses[i];
if (!LiveRegs.contains(Reg)) {
// Adjust liveouts if LiveIntervals are available.
if (RequireIntervals) {
const LiveRange *LR = getLiveRange(Reg);
if (LR) {
LiveQueryResult LRQ = LR->Query(SlotIdx);
if (!LRQ.isKill() && !LRQ.valueDefined())
discoverLiveOut(Reg);
}
}
increaseRegPressure(Reg);
LiveRegs.insert(Reg);
if (LiveUses && !containsReg(*LiveUses, Reg))
LiveUses->push_back(Reg);
}
}
if (TrackUntiedDefs) {
for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) {
unsigned Reg = RegOpers.Defs[i];
if (TargetRegisterInfo::isVirtualRegister(Reg) && !LiveRegs.contains(Reg))
UntiedDefs.insert(Reg);
}
}
return true;
}
/// Advance across the current instruction.
bool RegPressureTracker::advance() {
assert(!TrackUntiedDefs && "unsupported mode");
// Check for the bottom of the analyzable region.
if (CurrPos == MBB->end()) {
closeRegion();
return false;
}
if (!isTopClosed())
closeTop();
SlotIndex SlotIdx;
if (RequireIntervals)
SlotIdx = getCurrSlot();
// Open the bottom of the region using slot indexes.
if (isBottomClosed()) {
if (RequireIntervals)
static_cast<IntervalPressure&>(P).openBottom(SlotIdx);
else
static_cast<RegionPressure&>(P).openBottom(CurrPos);
}
RegisterOperands RegOpers(TRI, MRI);
collectOperands(CurrPos, RegOpers);
for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) {
unsigned Reg = RegOpers.Uses[i];
// Discover live-ins.
bool isLive = LiveRegs.contains(Reg);
if (!isLive)
discoverLiveIn(Reg);
// Kill liveness at last uses.
bool lastUse = false;
if (RequireIntervals) {
const LiveRange *LR = getLiveRange(Reg);
lastUse = LR && LR->Query(SlotIdx).isKill();
}
else {
// Allocatable physregs are always single-use before register rewriting.
lastUse = !TargetRegisterInfo::isVirtualRegister(Reg);
}
if (lastUse && isLive) {
LiveRegs.erase(Reg);
decreaseRegPressure(Reg);
}
else if (!lastUse && !isLive)
increaseRegPressure(Reg);
}
// Generate liveness for defs.
for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) {
unsigned Reg = RegOpers.Defs[i];
if (LiveRegs.insert(Reg))
increaseRegPressure(Reg);
}
// Boost pressure for all dead defs together.
increaseRegPressure(RegOpers.DeadDefs);
decreaseRegPressure(RegOpers.DeadDefs);
// Find the next instruction.
do
++CurrPos;
while (CurrPos != MBB->end() && CurrPos->isDebugValue());
return true;
}
/// Find the max change in excess pressure across all sets.
static void computeExcessPressureDelta(ArrayRef<unsigned> OldPressureVec,
ArrayRef<unsigned> NewPressureVec,
RegPressureDelta &Delta,
const RegisterClassInfo *RCI,
ArrayRef<unsigned> LiveThruPressureVec) {
Delta.Excess = PressureChange();
for (unsigned i = 0, e = OldPressureVec.size(); i < e; ++i) {
unsigned POld = OldPressureVec[i];
unsigned PNew = NewPressureVec[i];
int PDiff = (int)PNew - (int)POld;
if (!PDiff) // No change in this set in the common case.
continue;
// Only consider change beyond the limit.
unsigned Limit = RCI->getRegPressureSetLimit(i);
if (!LiveThruPressureVec.empty())
Limit += LiveThruPressureVec[i];
if (Limit > POld) {
if (Limit > PNew)
PDiff = 0; // Under the limit
else
PDiff = PNew - Limit; // Just exceeded limit.
}
else if (Limit > PNew)
PDiff = Limit - POld; // Just obeyed limit.
if (PDiff) {
Delta.Excess = PressureChange(i);
Delta.Excess.setUnitInc(PDiff);
break;
}
}
}
/// Find the max change in max pressure that either surpasses a critical PSet
/// limit or exceeds the current MaxPressureLimit.
///
/// FIXME: comparing each element of the old and new MaxPressure vectors here is
/// silly. It's done now to demonstrate the concept but will go away with a
/// RegPressureTracker API change to work with pressure differences.
static void computeMaxPressureDelta(ArrayRef<unsigned> OldMaxPressureVec,
ArrayRef<unsigned> NewMaxPressureVec,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit,
RegPressureDelta &Delta) {
Delta.CriticalMax = PressureChange();
Delta.CurrentMax = PressureChange();
unsigned CritIdx = 0, CritEnd = CriticalPSets.size();
for (unsigned i = 0, e = OldMaxPressureVec.size(); i < e; ++i) {
unsigned POld = OldMaxPressureVec[i];
unsigned PNew = NewMaxPressureVec[i];
if (PNew == POld) // No change in this set in the common case.
continue;
if (!Delta.CriticalMax.isValid()) {
while (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() < i)
++CritIdx;
if (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() == i) {
int PDiff = (int)PNew - (int)CriticalPSets[CritIdx].getUnitInc();
if (PDiff > 0) {
Delta.CriticalMax = PressureChange(i);
Delta.CriticalMax.setUnitInc(PDiff);
}
}
}
// Find the first increase above MaxPressureLimit.
// (Ignores negative MDiff).
if (!Delta.CurrentMax.isValid() && PNew > MaxPressureLimit[i]) {
Delta.CurrentMax = PressureChange(i);
Delta.CurrentMax.setUnitInc(PNew - POld);
if (CritIdx == CritEnd || Delta.CriticalMax.isValid())
break;
}
}
}
/// Record the upward impact of a single instruction on current register
/// pressure. Unlike the advance/recede pressure tracking interface, this does
/// not discover live in/outs.
///
/// This is intended for speculative queries. It leaves pressure inconsistent
/// with the current position, so must be restored by the caller.
void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) {
assert(!MI->isDebugValue() && "Expect a nondebug instruction.");
// Account for register pressure similar to RegPressureTracker::recede().
RegisterOperands RegOpers(TRI, MRI, /*IgnoreDead=*/true);
collectOperands(MI, RegOpers);
// Boost max pressure for all dead defs together.
// Since CurrSetPressure and MaxSetPressure
increaseRegPressure(RegOpers.DeadDefs);
decreaseRegPressure(RegOpers.DeadDefs);
// Kill liveness at live defs.
for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) {
unsigned Reg = RegOpers.Defs[i];
bool DeadDef = false;
if (RequireIntervals) {
const LiveRange *LR = getLiveRange(Reg);
if (LR) {
SlotIndex SlotIdx = LIS->getInstructionIndex(MI);
LiveQueryResult LRQ = LR->Query(SlotIdx);
DeadDef = LRQ.isDeadDef();
}
}
if (!DeadDef) {
if (!containsReg(RegOpers.Uses, Reg))
decreaseRegPressure(Reg);
}
}
// Generate liveness for uses.
for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) {
unsigned Reg = RegOpers.Uses[i];
if (!LiveRegs.contains(Reg))
increaseRegPressure(Reg);
}
}
/// Consider the pressure increase caused by traversing this instruction
/// bottom-up. Find the pressure set with the most change beyond its pressure
/// limit based on the tracker's current pressure, and return the change in
/// number of register units of that pressure set introduced by this
/// instruction.
///
/// This assumes that the current LiveOut set is sufficient.
///
/// This is expensive for an on-the-fly query because it calls
/// bumpUpwardPressure to recompute the pressure sets based on current
/// liveness. This mainly exists to verify correctness, e.g. with
/// -verify-misched. getUpwardPressureDelta is the fast version of this query
/// that uses the per-SUnit cache of the PressureDiff.
void RegPressureTracker::
getMaxUpwardPressureDelta(const MachineInstr *MI, PressureDiff *PDiff,
RegPressureDelta &Delta,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit) {
// Snapshot Pressure.
// FIXME: The snapshot heap space should persist. But I'm planning to
// summarize the pressure effect so we don't need to snapshot at all.
std::vector<unsigned> SavedPressure = CurrSetPressure;
std::vector<unsigned> SavedMaxPressure = P.MaxSetPressure;
bumpUpwardPressure(MI);
computeExcessPressureDelta(SavedPressure, CurrSetPressure, Delta, RCI,
LiveThruPressure);
computeMaxPressureDelta(SavedMaxPressure, P.MaxSetPressure, CriticalPSets,
MaxPressureLimit, Delta);
assert(Delta.CriticalMax.getUnitInc() >= 0 &&
Delta.CurrentMax.getUnitInc() >= 0 && "cannot decrease max pressure");
// Restore the tracker's state.
P.MaxSetPressure.swap(SavedMaxPressure);
CurrSetPressure.swap(SavedPressure);
#ifndef NDEBUG
if (!PDiff)
return;
// Check if the alternate algorithm yields the same result.
RegPressureDelta Delta2;
getUpwardPressureDelta(MI, *PDiff, Delta2, CriticalPSets, MaxPressureLimit);
if (Delta != Delta2) {
dbgs() << "PDiff: ";
PDiff->dump(*TRI);
dbgs() << "DELTA: " << *MI;
if (Delta.Excess.isValid())
dbgs() << "Excess1 " << TRI->getRegPressureSetName(Delta.Excess.getPSet())
<< " " << Delta.Excess.getUnitInc() << "\n";
if (Delta.CriticalMax.isValid())
dbgs() << "Critic1 " << TRI->getRegPressureSetName(Delta.CriticalMax.getPSet())
<< " " << Delta.CriticalMax.getUnitInc() << "\n";
if (Delta.CurrentMax.isValid())
dbgs() << "CurrMx1 " << TRI->getRegPressureSetName(Delta.CurrentMax.getPSet())
<< " " << Delta.CurrentMax.getUnitInc() << "\n";
if (Delta2.Excess.isValid())
dbgs() << "Excess2 " << TRI->getRegPressureSetName(Delta2.Excess.getPSet())
<< " " << Delta2.Excess.getUnitInc() << "\n";
if (Delta2.CriticalMax.isValid())
dbgs() << "Critic2 " << TRI->getRegPressureSetName(Delta2.CriticalMax.getPSet())
<< " " << Delta2.CriticalMax.getUnitInc() << "\n";
if (Delta2.CurrentMax.isValid())
dbgs() << "CurrMx2 " << TRI->getRegPressureSetName(Delta2.CurrentMax.getPSet())
<< " " << Delta2.CurrentMax.getUnitInc() << "\n";
llvm_unreachable("RegP Delta Mismatch");
}
#endif
}
/// This is the fast version of querying register pressure that does not
/// directly depend on current liveness.
///
/// @param Delta captures information needed for heuristics.
///
/// @param CriticalPSets Are the pressure sets that are known to exceed some
/// limit within the region, not necessarily at the current position.
///
/// @param MaxPressureLimit Is the max pressure within the region, not
/// necessarily at the current position.
void RegPressureTracker::
getUpwardPressureDelta(const MachineInstr *MI, /*const*/ PressureDiff &PDiff,
RegPressureDelta &Delta,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit) const {
unsigned CritIdx = 0, CritEnd = CriticalPSets.size();
for (PressureDiff::const_iterator
PDiffI = PDiff.begin(), PDiffE = PDiff.end();
PDiffI != PDiffE && PDiffI->isValid(); ++PDiffI) {
unsigned PSetID = PDiffI->getPSet();
unsigned Limit = RCI->getRegPressureSetLimit(PSetID);
if (!LiveThruPressure.empty())
Limit += LiveThruPressure[PSetID];
unsigned POld = CurrSetPressure[PSetID];
unsigned MOld = P.MaxSetPressure[PSetID];
unsigned MNew = MOld;
// Ignore DeadDefs here because they aren't captured by PressureChange.
unsigned PNew = POld + PDiffI->getUnitInc();
assert((PDiffI->getUnitInc() >= 0) == (PNew >= POld) && "PSet overflow");
if (PNew > MOld)
MNew = PNew;
// Check if current pressure has exceeded the limit.
if (!Delta.Excess.isValid()) {
unsigned ExcessInc = 0;
if (PNew > Limit)
ExcessInc = POld > Limit ? PNew - POld : PNew - Limit;
else if (POld > Limit)
ExcessInc = Limit - POld;
if (ExcessInc) {
Delta.Excess = PressureChange(PSetID);
Delta.Excess.setUnitInc(ExcessInc);
}
}
// Check if max pressure has exceeded a critical pressure set max.
if (MNew == MOld)
continue;
if (!Delta.CriticalMax.isValid()) {
while (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() < PSetID)
++CritIdx;
if (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() == PSetID) {
int CritInc = (int)MNew - (int)CriticalPSets[CritIdx].getUnitInc();
if (CritInc > 0 && CritInc <= INT16_MAX) {
Delta.CriticalMax = PressureChange(PSetID);
Delta.CriticalMax.setUnitInc(CritInc);
}
}
}
// Check if max pressure has exceeded the current max.
if (!Delta.CurrentMax.isValid() && MNew > MaxPressureLimit[PSetID]) {
Delta.CurrentMax = PressureChange(PSetID);
Delta.CurrentMax.setUnitInc(MNew - MOld);
}
}
}
/// Helper to find a vreg use between two indices [PriorUseIdx, NextUseIdx).
static bool findUseBetween(unsigned Reg,
SlotIndex PriorUseIdx, SlotIndex NextUseIdx,
const MachineRegisterInfo *MRI,
const LiveIntervals *LIS) {
for (MachineRegisterInfo::use_instr_nodbg_iterator
UI = MRI->use_instr_nodbg_begin(Reg),
UE = MRI->use_instr_nodbg_end(); UI != UE; ++UI) {
const MachineInstr* MI = &*UI;
if (MI->isDebugValue())
continue;
SlotIndex InstSlot = LIS->getInstructionIndex(MI).getRegSlot();
if (InstSlot >= PriorUseIdx && InstSlot < NextUseIdx)
return true;
}
return false;
}
/// Record the downward impact of a single instruction on current register
/// pressure. Unlike the advance/recede pressure tracking interface, this does
/// not discover live in/outs.
///
/// This is intended for speculative queries. It leaves pressure inconsistent
/// with the current position, so must be restored by the caller.
void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) {
assert(!MI->isDebugValue() && "Expect a nondebug instruction.");
// Account for register pressure similar to RegPressureTracker::recede().
RegisterOperands RegOpers(TRI, MRI);
collectOperands(MI, RegOpers);
// Kill liveness at last uses. Assume allocatable physregs are single-use
// rather than checking LiveIntervals.
SlotIndex SlotIdx;
if (RequireIntervals)
SlotIdx = LIS->getInstructionIndex(MI).getRegSlot();
for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) {
unsigned Reg = RegOpers.Uses[i];
if (RequireIntervals) {
// FIXME: allow the caller to pass in the list of vreg uses that remain
// to be bottom-scheduled to avoid searching uses at each query.
SlotIndex CurrIdx = getCurrSlot();
const LiveRange *LR = getLiveRange(Reg);
if (LR) {
LiveQueryResult LRQ = LR->Query(SlotIdx);
if (LRQ.isKill() && !findUseBetween(Reg, CurrIdx, SlotIdx, MRI, LIS)) {
decreaseRegPressure(Reg);
}
}
}
else if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
// Allocatable physregs are always single-use before register rewriting.
decreaseRegPressure(Reg);
}
}
// Generate liveness for defs.
increaseRegPressure(RegOpers.Defs);
// Boost pressure for all dead defs together.
increaseRegPressure(RegOpers.DeadDefs);
decreaseRegPressure(RegOpers.DeadDefs);
}
/// Consider the pressure increase caused by traversing this instruction
/// top-down. Find the register class with the most change in its pressure limit
/// based on the tracker's current pressure, and return the number of excess
/// register units of that pressure set introduced by this instruction.
///
/// This assumes that the current LiveIn set is sufficient.
///
/// This is expensive for an on-the-fly query because it calls
/// bumpDownwardPressure to recompute the pressure sets based on current
/// liveness. We don't yet have a fast version of downward pressure tracking
/// analagous to getUpwardPressureDelta.
void RegPressureTracker::
getMaxDownwardPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit) {
// Snapshot Pressure.
std::vector<unsigned> SavedPressure = CurrSetPressure;
std::vector<unsigned> SavedMaxPressure = P.MaxSetPressure;
bumpDownwardPressure(MI);
computeExcessPressureDelta(SavedPressure, CurrSetPressure, Delta, RCI,
LiveThruPressure);
computeMaxPressureDelta(SavedMaxPressure, P.MaxSetPressure, CriticalPSets,
MaxPressureLimit, Delta);
assert(Delta.CriticalMax.getUnitInc() >= 0 &&
Delta.CurrentMax.getUnitInc() >= 0 && "cannot decrease max pressure");
// Restore the tracker's state.
P.MaxSetPressure.swap(SavedMaxPressure);
CurrSetPressure.swap(SavedPressure);
}
/// Get the pressure of each PSet after traversing this instruction bottom-up.
void RegPressureTracker::
getUpwardPressure(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult) {
// Snapshot pressure.
PressureResult = CurrSetPressure;
MaxPressureResult = P.MaxSetPressure;
bumpUpwardPressure(MI);
// Current pressure becomes the result. Restore current pressure.
P.MaxSetPressure.swap(MaxPressureResult);
CurrSetPressure.swap(PressureResult);
}
/// Get the pressure of each PSet after traversing this instruction top-down.
void RegPressureTracker::
getDownwardPressure(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult) {
// Snapshot pressure.
PressureResult = CurrSetPressure;
MaxPressureResult = P.MaxSetPressure;
bumpDownwardPressure(MI);
// Current pressure becomes the result. Restore current pressure.
P.MaxSetPressure.swap(MaxPressureResult);
CurrSetPressure.swap(PressureResult);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegAllocBasic.cpp | //===-- RegAllocBasic.cpp - Basic Register Allocator ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RABasic function pass, which provides a minimal
// implementation of the basic register allocator.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "AllocationOrder.h"
#include "LiveDebugVariables.h"
#include "RegAllocBase.h"
#include "Spiller.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveRegMatrix.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/PassAnalysisSupport.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cstdlib>
#include <queue>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
static RegisterRegAlloc basicRegAlloc("basic", "basic register allocator",
createBasicRegisterAllocator);
namespace {
struct CompSpillWeight {
bool operator()(LiveInterval *A, LiveInterval *B) const {
return A->weight < B->weight;
}
};
}
namespace {
/// RABasic provides a minimal implementation of the basic register allocation
/// algorithm. It prioritizes live virtual registers by spill weight and spills
/// whenever a register is unavailable. This is not practical in production but
/// provides a useful baseline both for measuring other allocators and comparing
/// the speed of the basic algorithm against other styles of allocators.
class RABasic : public MachineFunctionPass, public RegAllocBase
{
// context
MachineFunction *MF;
// state
std::unique_ptr<Spiller> SpillerInstance;
std::priority_queue<LiveInterval*, std::vector<LiveInterval*>,
CompSpillWeight> Queue;
// Scratch space. Allocated here to avoid repeated malloc calls in
// selectOrSplit().
BitVector UsableRegs;
public:
RABasic();
/// Return the pass name.
const char* getPassName() const override {
return "Basic Register Allocator";
}
/// RABasic analysis usage.
void getAnalysisUsage(AnalysisUsage &AU) const override;
void releaseMemory() override;
Spiller &spiller() override { return *SpillerInstance; }
void enqueue(LiveInterval *LI) override {
Queue.push(LI);
}
LiveInterval *dequeue() override {
if (Queue.empty())
return nullptr;
LiveInterval *LI = Queue.top();
Queue.pop();
return LI;
}
unsigned selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &SplitVRegs) override;
/// Perform register allocation.
bool runOnMachineFunction(MachineFunction &mf) override;
// Helper for spilling all live virtual registers currently unified under preg
// that interfere with the most recently queried lvr. Return true if spilling
// was successful, and append any new spilled/split intervals to splitLVRs.
bool spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<unsigned> &SplitVRegs);
static char ID;
};
char RABasic::ID = 0;
} // end anonymous namespace
RABasic::RABasic(): MachineFunctionPass(ID) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
}
void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addPreserved<MachineBlockFrequencyInfo>();
AU.addRequiredID(MachineDominatorsID);
AU.addPreservedID(MachineDominatorsID);
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
AU.addRequired<LiveRegMatrix>();
AU.addPreserved<LiveRegMatrix>();
MachineFunctionPass::getAnalysisUsage(AU);
}
void RABasic::releaseMemory() {
SpillerInstance.reset();
}
// Spill or split all live virtual registers currently unified under PhysReg
// that interfere with VirtReg. The newly spilled or split live intervals are
// returned by appending them to SplitVRegs.
bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<unsigned> &SplitVRegs) {
// Record each interference and determine if all are spillable before mutating
// either the union or live intervals.
SmallVector<LiveInterval*, 8> Intfs;
// Collect interferences assigned to any alias of the physical register.
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
Q.collectInterferingVRegs();
if (Q.seenUnspillableVReg())
return false;
for (unsigned i = Q.interferingVRegs().size(); i; --i) {
LiveInterval *Intf = Q.interferingVRegs()[i - 1];
if (!Intf->isSpillable() || Intf->weight > VirtReg.weight)
return false;
Intfs.push_back(Intf);
}
}
DEBUG(dbgs() << "spilling " << TRI->getName(PhysReg) <<
" interferences with " << VirtReg << "\n");
assert(!Intfs.empty() && "expected interference");
// Spill each interfering vreg allocated to PhysReg or an alias.
for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
LiveInterval &Spill = *Intfs[i];
// Skip duplicates.
if (!VRM->hasPhys(Spill.reg))
continue;
// Deallocate the interfering vreg by removing it from the union.
// A LiveInterval instance may not be in a union during modification!
Matrix->unassign(Spill);
// Spill the extracted interval.
LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM);
spiller().spill(LRE);
}
return true;
}
// Driver for the register assignment and splitting heuristics.
// Manages iteration over the LiveIntervalUnions.
//
// This is a minimal implementation of register assignment and splitting that
// spills whenever we run out of registers.
//
// selectOrSplit can only be called once per live virtual register. We then do a
// single interference test for each register the correct class until we find an
// available register. So, the number of interference tests in the worst case is
// |vregs| * |machineregs|. And since the number of interference tests is
// minimal, there is no value in caching them outside the scope of
// selectOrSplit().
unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &SplitVRegs) {
// Populate a list of physical register spill candidates.
SmallVector<unsigned, 8> PhysRegSpillCands;
// Check for an available register in this class.
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
while (unsigned PhysReg = Order.next()) {
// Check for interference in PhysReg
switch (Matrix->checkInterference(VirtReg, PhysReg)) {
case LiveRegMatrix::IK_Free:
// PhysReg is available, allocate it.
return PhysReg;
case LiveRegMatrix::IK_VirtReg:
// Only virtual registers in the way, we may be able to spill them.
PhysRegSpillCands.push_back(PhysReg);
continue;
default:
// RegMask or RegUnit interference.
continue;
}
}
// Try to spill another interfering reg with less spill weight.
for (SmallVectorImpl<unsigned>::iterator PhysRegI = PhysRegSpillCands.begin(),
PhysRegE = PhysRegSpillCands.end(); PhysRegI != PhysRegE; ++PhysRegI) {
if (!spillInterferences(VirtReg, *PhysRegI, SplitVRegs))
continue;
assert(!Matrix->checkInterference(VirtReg, *PhysRegI) &&
"Interference after spill.");
// Tell the caller to allocate to this newly freed physical register.
return *PhysRegI;
}
// No other spill candidates were found, so spill the current VirtReg.
DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
if (!VirtReg.isSpillable())
return ~0u;
LiveRangeEdit LRE(&VirtReg, SplitVRegs, *MF, *LIS, VRM);
spiller().spill(LRE);
// The live virtual register requesting allocation was spilled, so tell
// the caller not to allocate anything during this round.
return 0;
}
bool RABasic::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "********** BASIC REGISTER ALLOCATION **********\n"
<< "********** Function: "
<< mf.getName() << '\n');
MF = &mf;
RegAllocBase::init(getAnalysis<VirtRegMap>(),
getAnalysis<LiveIntervals>(),
getAnalysis<LiveRegMatrix>());
calculateSpillWeightsAndHints(*LIS, *MF,
getAnalysis<MachineLoopInfo>(),
getAnalysis<MachineBlockFrequencyInfo>());
SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
allocatePhysRegs();
// Diagnostic output before rewriting
DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *VRM << "\n");
releaseMemory();
return true;
}
FunctionPass* llvm::createBasicRegisterAllocator()
{
return new RABasic();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ImplicitNullChecks.cpp | //===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass turns explicit null checks of the form
//
// test %r10, %r10
// je throw_npe
// movl (%r10), %esi
// ...
//
// to
//
// faulting_load_op("movl (%r10), %esi", throw_npe)
// ...
//
// With the help of a runtime that understands the .fault_maps section,
// faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
// a page fault.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
static cl::opt<unsigned> PageSize("imp-null-check-page-size",
cl::desc("The page size of the target in "
"bytes"),
cl::init(4096));
#define DEBUG_TYPE "implicit-null-checks"
STATISTIC(NumImplicitNullChecks,
"Number of explicit null checks made implicit");
namespace {
class ImplicitNullChecks : public MachineFunctionPass {
/// Represents one null check that can be made implicit.
struct NullCheck {
// The memory operation the null check can be folded into.
MachineInstr *MemOperation;
// The instruction actually doing the null check (Ptr != 0).
MachineInstr *CheckOperation;
// The block the check resides in.
MachineBasicBlock *CheckBlock;
// The block branched to if the pointer is non-null.
MachineBasicBlock *NotNullSucc;
// The block branched to if the pointer is null.
MachineBasicBlock *NullSucc;
NullCheck()
: MemOperation(), CheckOperation(), CheckBlock(), NotNullSucc(),
NullSucc() {}
explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
MachineBasicBlock *checkBlock,
MachineBasicBlock *notNullSucc,
MachineBasicBlock *nullSucc)
: MemOperation(memOperation), CheckOperation(checkOperation),
CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc) {
}
};
const TargetInstrInfo *TII = nullptr;
const TargetRegisterInfo *TRI = nullptr;
MachineModuleInfo *MMI = nullptr;
bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
SmallVectorImpl<NullCheck> &NullCheckList);
MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB,
MCSymbol *HandlerLabel);
void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
public:
static char ID;
ImplicitNullChecks() : MachineFunctionPass(ID) {
initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
};
}
bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getRegInfo().getTargetRegisterInfo();
MMI = &MF.getMMI();
SmallVector<NullCheck, 16> NullCheckList;
for (auto &MBB : MF)
analyzeBlockForNullChecks(MBB, NullCheckList);
if (!NullCheckList.empty())
rewriteNullChecks(NullCheckList);
return !NullCheckList.empty();
}
/// Analyze MBB to check if its terminating branch can be turned into an
/// implicit null check. If yes, append a description of the said null check to
/// NullCheckList and return true, else return false.
bool ImplicitNullChecks::analyzeBlockForNullChecks(
MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
MDNode *BranchMD =
MBB.getBasicBlock()
? MBB.getBasicBlock()->getTerminator()->getMetadata("make.implicit")
: nullptr;
if (!BranchMD)
return false;
MachineBranchPredicate MBP;
if (TII->AnalyzeBranchPredicate(MBB, MBP, true))
return false;
// Is the predicate comparing an integer to zero?
if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
(MBP.Predicate == MachineBranchPredicate::PRED_NE ||
MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
return false;
// If we cannot erase the test instruction itself, then making the null check
// implicit does not buy us much.
if (!MBP.SingleUseCondition)
return false;
MachineBasicBlock *NotNullSucc, *NullSucc;
if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
NotNullSucc = MBP.TrueDest;
NullSucc = MBP.FalseDest;
} else {
NotNullSucc = MBP.FalseDest;
NullSucc = MBP.TrueDest;
}
// We handle the simplest case for now. We can potentially do better by using
// the machine dominator tree.
if (NotNullSucc->pred_size() != 1)
return false;
// Starting with a code fragment like:
//
// test %RAX, %RAX
// jne LblNotNull
//
// LblNull:
// callq throw_NullPointerException
//
// LblNotNull:
// Inst0
// Inst1
// ...
// Def = Load (%RAX + <offset>)
// ...
//
//
// we want to end up with
//
// Def = TrappingLoad (%RAX + <offset>), LblNull
// jmp LblNotNull ;; explicit or fallthrough
//
// LblNotNull:
// Inst0
// Inst1
// ...
//
// LblNull:
// callq throw_NullPointerException
//
unsigned PointerReg = MBP.LHS.getReg();
// As we scan NotNullSucc for a suitable load instruction, we keep track of
// the registers defined and used by the instructions we scan past. This bit
// of information lets us decide if it is legal to hoist the load instruction
// we find (if we do find such an instruction) to before NotNullSucc.
DenseSet<unsigned> RegDefs, RegUses;
// Returns true if it is safe to reorder MI to before NotNullSucc.
auto IsSafeToHoist = [&](MachineInstr *MI) {
// Right now we don't want to worry about LLVM's memory model. This can be
// made more precise later.
for (auto *MMO : MI->memoperands())
if (!MMO->isUnordered())
return false;
for (auto &MO : MI->operands()) {
if (MO.isReg() && MO.getReg()) {
for (unsigned Reg : RegDefs)
if (TRI->regsOverlap(Reg, MO.getReg()))
return false; // We found a write-after-write or read-after-write
if (MO.isDef())
for (unsigned Reg : RegUses)
if (TRI->regsOverlap(Reg, MO.getReg()))
return false; // We found a write-after-read
}
}
return true;
};
for (auto MII = NotNullSucc->begin(), MIE = NotNullSucc->end(); MII != MIE;
++MII) {
MachineInstr *MI = &*MII;
unsigned BaseReg, Offset;
if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
if (MI->mayLoad() && !MI->isPredicable() && BaseReg == PointerReg &&
Offset < PageSize && MI->getDesc().getNumDefs() == 1 &&
IsSafeToHoist(MI)) {
NullCheckList.emplace_back(MI, MBP.ConditionDef, &MBB, NotNullSucc,
NullSucc);
return true;
}
// MI did not match our criteria for conversion to a trapping load. Check
// if we can continue looking.
if (MI->mayStore() || MI->hasUnmodeledSideEffects())
return false;
for (auto *MMO : MI->memoperands())
// Right now we don't want to worry about LLVM's memory model.
if (!MMO->isUnordered())
return false;
// It _may_ be okay to reorder a later load instruction across MI. Make a
// note of its operands so that we can make the legality check if we find a
// suitable load instruction:
for (auto &MO : MI->operands()) {
if (!MO.isReg() || !MO.getReg())
continue;
if (MO.isDef())
RegDefs.insert(MO.getReg());
else
RegUses.insert(MO.getReg());
}
}
return false;
}
/// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine
/// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI
/// (defining the same register), and branches to HandlerLabel if the load
/// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB.
MachineInstr *ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI,
MachineBasicBlock *MBB,
MCSymbol *HandlerLabel) {
DebugLoc DL;
unsigned NumDefs = LoadMI->getDesc().getNumDefs();
assert(NumDefs == 1 && "other cases unhandled!");
(void)NumDefs;
unsigned DefReg = LoadMI->defs().begin()->getReg();
assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 &&
"expected exactly one def!");
auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg)
.addSym(HandlerLabel)
.addImm(LoadMI->getOpcode());
for (auto &MO : LoadMI->uses())
MIB.addOperand(MO);
MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end());
return MIB;
}
/// Rewrite the null checks in NullCheckList into implicit null checks.
void ImplicitNullChecks::rewriteNullChecks(
ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
DebugLoc DL;
for (auto &NC : NullCheckList) {
MCSymbol *HandlerLabel = MMI->getContext().createTempSymbol();
// Remove the conditional branch dependent on the null check.
unsigned BranchesRemoved = TII->RemoveBranch(*NC.CheckBlock);
(void)BranchesRemoved;
assert(BranchesRemoved > 0 && "expected at least one branch!");
// Insert a faulting load where the conditional branch was originally. We
// check earlier ensures that this bit of code motion is legal. We do not
// touch the successors list for any basic block since we haven't changed
// control flow, we've just made it implicit.
insertFaultingLoad(NC.MemOperation, NC.CheckBlock, HandlerLabel);
NC.MemOperation->eraseFromParent();
NC.CheckOperation->eraseFromParent();
// Insert an *unconditional* branch to not-null successor.
TII->InsertBranch(*NC.CheckBlock, NC.NotNullSucc, nullptr, /*Cond=*/None,
DL);
// Emit the HandlerLabel as an EH_LABEL.
BuildMI(*NC.NullSucc, NC.NullSucc->begin(), DL,
TII->get(TargetOpcode::EH_LABEL)).addSym(HandlerLabel);
NumImplicitNullChecks++;
}
}
char ImplicitNullChecks::ID = 0;
char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
"Implicit null checks", false, false)
INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
"Implicit null checks", false, false)
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/StackMapLivenessAnalysis.cpp | //===-- StackMapLivenessAnalysis.cpp - StackMap live Out Analysis ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the StackMap Liveness analysis pass. The pass calculates
// the liveness for each basic block in a function and attaches the register
// live-out information to a stackmap or patchpoint intrinsic if present.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "stackmaps"
static cl::opt<bool> EnablePatchPointLiveness(
"enable-patchpoint-liveness", cl::Hidden, cl::init(true),
cl::desc("Enable PatchPoint Liveness Analysis Pass"));
STATISTIC(NumStackMapFuncVisited, "Number of functions visited");
STATISTIC(NumStackMapFuncSkipped, "Number of functions skipped");
STATISTIC(NumBBsVisited, "Number of basic blocks visited");
STATISTIC(NumBBsHaveNoStackmap, "Number of basic blocks with no stackmap");
STATISTIC(NumStackMaps, "Number of StackMaps visited");
namespace {
/// \brief This pass calculates the liveness information for each basic block in
/// a function and attaches the register live-out information to a patchpoint
/// intrinsic if present.
///
/// This pass can be disabled via the -enable-patchpoint-liveness=false flag.
/// The pass skips functions that don't have any patchpoint intrinsics. The
/// information provided by this pass is optional and not required by the
/// aformentioned intrinsic to function.
class StackMapLiveness : public MachineFunctionPass {
const TargetRegisterInfo *TRI;
LivePhysRegs LiveRegs;
public:
static char ID;
/// \brief Default construct and initialize the pass.
StackMapLiveness();
/// \brief Tell the pass manager which passes we depend on and what
/// information we preserve.
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// \brief Calculate the liveness information for the given machine function.
bool runOnMachineFunction(MachineFunction &MF) override;
private:
/// \brief Performs the actual liveness calculation for the function.
bool calculateLiveness(MachineFunction &MF);
/// \brief Add the current register live set to the instruction.
void addLiveOutSetToMI(MachineFunction &MF, MachineInstr &MI);
/// \brief Create a register mask and initialize it with the registers from
/// the register live set.
uint32_t *createRegisterMask(MachineFunction &MF) const;
};
} // namespace
char StackMapLiveness::ID = 0;
char &llvm::StackMapLivenessID = StackMapLiveness::ID;
INITIALIZE_PASS(StackMapLiveness, "stackmap-liveness",
"StackMap Liveness Analysis", false, false)
/// Default construct and initialize the pass.
StackMapLiveness::StackMapLiveness() : MachineFunctionPass(ID) {
initializeStackMapLivenessPass(*PassRegistry::getPassRegistry());
}
/// Tell the pass manager which passes we depend on and what information we
/// preserve.
void StackMapLiveness::getAnalysisUsage(AnalysisUsage &AU) const {
// We preserve all information.
AU.setPreservesAll();
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
/// Calculate the liveness information for the given machine function.
bool StackMapLiveness::runOnMachineFunction(MachineFunction &MF) {
if (!EnablePatchPointLiveness)
return false;
DEBUG(dbgs() << "********** COMPUTING STACKMAP LIVENESS: " << MF.getName()
<< " **********\n");
TRI = MF.getSubtarget().getRegisterInfo();
++NumStackMapFuncVisited;
// Skip this function if there are no patchpoints to process.
if (!MF.getFrameInfo()->hasPatchPoint()) {
++NumStackMapFuncSkipped;
return false;
}
return calculateLiveness(MF);
}
/// Performs the actual liveness calculation for the function.
bool StackMapLiveness::calculateLiveness(MachineFunction &MF) {
bool HasChanged = false;
// For all basic blocks in the function.
for (auto &MBB : MF) {
DEBUG(dbgs() << "****** BB " << MBB.getName() << " ******\n");
LiveRegs.init(TRI);
LiveRegs.addLiveOuts(&MBB);
bool HasStackMap = false;
// Reverse iterate over all instructions and add the current live register
// set to an instruction if we encounter a patchpoint instruction.
for (auto I = MBB.rbegin(), E = MBB.rend(); I != E; ++I) {
if (I->getOpcode() == TargetOpcode::PATCHPOINT) {
addLiveOutSetToMI(MF, *I);
HasChanged = true;
HasStackMap = true;
++NumStackMaps;
}
DEBUG(dbgs() << " " << LiveRegs << " " << *I);
LiveRegs.stepBackward(*I);
}
++NumBBsVisited;
if (!HasStackMap)
++NumBBsHaveNoStackmap;
}
return HasChanged;
}
/// Add the current register live set to the instruction.
void StackMapLiveness::addLiveOutSetToMI(MachineFunction &MF,
MachineInstr &MI) {
uint32_t *Mask = createRegisterMask(MF);
MachineOperand MO = MachineOperand::CreateRegLiveOut(Mask);
MI.addOperand(MF, MO);
}
/// Create a register mask and initialize it with the registers from the
/// register live set.
uint32_t *StackMapLiveness::createRegisterMask(MachineFunction &MF) const {
// The mask is owned and cleaned up by the Machine Function.
uint32_t *Mask = MF.allocateRegisterMask(TRI->getNumRegs());
for (auto Reg : LiveRegs)
Mask[Reg / 32] |= 1U << (Reg % 32);
// Give the target a chance to adjust the mask.
TRI->adjustStackMapLiveOutMask(Mask);
return Mask;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/module.modulemap | module CodeGen { requires cplusplus umbrella "." module * { export * } }
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/StackColoring.cpp | //===-- StackColoring.cpp -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass implements the stack-coloring optimization that looks for
// lifetime markers machine instructions (LIFESTART_BEGIN and LIFESTART_END),
// which represent the possible lifetime of stack slots. It attempts to
// merge disjoint stack slots and reduce the used stack space.
// NOTE: This pass is not StackSlotColoring, which optimizes spill slots.
//
// TODO: In the future we plan to improve stack coloring in the following ways:
// 1. Allow merging multiple small slots into a single larger slot at different
// offsets.
// 2. Merge this pass with StackSlotColoring and allow merging of allocas with
// spill slots.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "stackcoloring"
static cl::opt<bool>
DisableColoring("no-stack-coloring",
cl::init(false), cl::Hidden,
cl::desc("Disable stack coloring"));
/// The user may write code that uses allocas outside of the declared lifetime
/// zone. This can happen when the user returns a reference to a local
/// data-structure. We can detect these cases and decide not to optimize the
/// code. If this flag is enabled, we try to save the user.
static cl::opt<bool>
ProtectFromEscapedAllocas("protect-from-escaped-allocas",
cl::init(false), cl::Hidden,
cl::desc("Do not optimize lifetime zones that "
"are broken"));
STATISTIC(NumMarkerSeen, "Number of lifetime markers found.");
STATISTIC(StackSpaceSaved, "Number of bytes saved due to merging slots.");
STATISTIC(StackSlotMerged, "Number of stack slot merged.");
STATISTIC(EscapedAllocas, "Number of allocas that escaped the lifetime region");
//===----------------------------------------------------------------------===//
// StackColoring Pass
//===----------------------------------------------------------------------===//
namespace {
/// StackColoring - A machine pass for merging disjoint stack allocations,
/// marked by the LIFETIME_START and LIFETIME_END pseudo instructions.
class StackColoring : public MachineFunctionPass {
MachineFrameInfo *MFI;
MachineFunction *MF;
/// A class representing liveness information for a single basic block.
/// Each bit in the BitVector represents the liveness property
/// for a different stack slot.
struct BlockLifetimeInfo {
/// Which slots BEGINs in each basic block.
BitVector Begin;
/// Which slots ENDs in each basic block.
BitVector End;
/// Which slots are marked as LIVE_IN, coming into each basic block.
BitVector LiveIn;
/// Which slots are marked as LIVE_OUT, coming out of each basic block.
BitVector LiveOut;
};
/// Maps active slots (per bit) for each basic block.
typedef DenseMap<const MachineBasicBlock*, BlockLifetimeInfo> LivenessMap;
LivenessMap BlockLiveness;
/// Maps serial numbers to basic blocks.
DenseMap<const MachineBasicBlock*, int> BasicBlocks;
/// Maps basic blocks to a serial number.
SmallVector<const MachineBasicBlock*, 8> BasicBlockNumbering;
/// Maps liveness intervals for each slot.
SmallVector<std::unique_ptr<LiveInterval>, 16> Intervals;
/// VNInfo is used for the construction of LiveIntervals.
VNInfo::Allocator VNInfoAllocator;
/// SlotIndex analysis object.
SlotIndexes *Indexes;
/// The stack protector object.
StackProtector *SP;
/// The list of lifetime markers found. These markers are to be removed
/// once the coloring is done.
SmallVector<MachineInstr*, 8> Markers;
public:
static char ID;
StackColoring() : MachineFunctionPass(ID) {
initializeStackColoringPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction &MF) override;
private:
/// Debug.
void dump() const;
/// Removes all of the lifetime marker instructions from the function.
/// \returns true if any markers were removed.
bool removeAllMarkers();
/// Scan the machine function and find all of the lifetime markers.
/// Record the findings in the BEGIN and END vectors.
/// \returns the number of markers found.
unsigned collectMarkers(unsigned NumSlot);
/// Perform the dataflow calculation and calculate the lifetime for each of
/// the slots, based on the BEGIN/END vectors. Set the LifetimeLIVE_IN and
/// LifetimeLIVE_OUT maps that represent which stack slots are live coming
/// in and out blocks.
void calculateLocalLiveness();
/// Construct the LiveIntervals for the slots.
void calculateLiveIntervals(unsigned NumSlots);
/// Go over the machine function and change instructions which use stack
/// slots to use the joint slots.
void remapInstructions(DenseMap<int, int> &SlotRemap);
/// The input program may contain instructions which are not inside lifetime
/// markers. This can happen due to a bug in the compiler or due to a bug in
/// user code (for example, returning a reference to a local variable).
/// This procedure checks all of the instructions in the function and
/// invalidates lifetime ranges which do not contain all of the instructions
/// which access that frame slot.
void removeInvalidSlotRanges();
/// Map entries which point to other entries to their destination.
/// A->B->C becomes A->C.
void expungeSlotMap(DenseMap<int, int> &SlotRemap, unsigned NumSlots);
};
} // end anonymous namespace
char StackColoring::ID = 0;
char &llvm::StackColoringID = StackColoring::ID;
INITIALIZE_PASS_BEGIN(StackColoring,
"stack-coloring", "Merge disjoint stack slots", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(StackProtector)
INITIALIZE_PASS_END(StackColoring,
"stack-coloring", "Merge disjoint stack slots", false, false)
void StackColoring::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<SlotIndexes>();
AU.addRequired<StackProtector>();
MachineFunctionPass::getAnalysisUsage(AU);
}
void StackColoring::dump() const {
for (MachineBasicBlock *MBB : depth_first(MF)) {
DEBUG(dbgs() << "Inspecting block #" << BasicBlocks.lookup(MBB) << " ["
<< MBB->getName() << "]\n");
LivenessMap::const_iterator BI = BlockLiveness.find(MBB);
assert(BI != BlockLiveness.end() && "Block not found");
const BlockLifetimeInfo &BlockInfo = BI->second;
DEBUG(dbgs()<<"BEGIN : {");
for (unsigned i=0; i < BlockInfo.Begin.size(); ++i)
DEBUG(dbgs()<<BlockInfo.Begin.test(i)<<" ");
DEBUG(dbgs()<<"}\n");
DEBUG(dbgs()<<"END : {");
for (unsigned i=0; i < BlockInfo.End.size(); ++i)
DEBUG(dbgs()<<BlockInfo.End.test(i)<<" ");
DEBUG(dbgs()<<"}\n");
DEBUG(dbgs()<<"LIVE_IN: {");
for (unsigned i=0; i < BlockInfo.LiveIn.size(); ++i)
DEBUG(dbgs()<<BlockInfo.LiveIn.test(i)<<" ");
DEBUG(dbgs()<<"}\n");
DEBUG(dbgs()<<"LIVEOUT: {");
for (unsigned i=0; i < BlockInfo.LiveOut.size(); ++i)
DEBUG(dbgs()<<BlockInfo.LiveOut.test(i)<<" ");
DEBUG(dbgs()<<"}\n");
}
}
unsigned StackColoring::collectMarkers(unsigned NumSlot) {
unsigned MarkersFound = 0;
// Scan the function to find all lifetime markers.
// NOTE: We use a reverse-post-order iteration to ensure that we obtain a
// deterministic numbering, and because we'll need a post-order iteration
// later for solving the liveness dataflow problem.
for (MachineBasicBlock *MBB : depth_first(MF)) {
// Assign a serial number to this basic block.
BasicBlocks[MBB] = BasicBlockNumbering.size();
BasicBlockNumbering.push_back(MBB);
// Keep a reference to avoid repeated lookups.
BlockLifetimeInfo &BlockInfo = BlockLiveness[MBB];
BlockInfo.Begin.resize(NumSlot);
BlockInfo.End.resize(NumSlot);
for (MachineInstr &MI : *MBB) {
if (MI.getOpcode() != TargetOpcode::LIFETIME_START &&
MI.getOpcode() != TargetOpcode::LIFETIME_END)
continue;
Markers.push_back(&MI);
bool IsStart = MI.getOpcode() == TargetOpcode::LIFETIME_START;
const MachineOperand &MO = MI.getOperand(0);
unsigned Slot = MO.getIndex();
MarkersFound++;
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
if (Allocation) {
DEBUG(dbgs()<<"Found a lifetime marker for slot #"<<Slot<<
" with allocation: "<< Allocation->getName()<<"\n");
}
if (IsStart) {
BlockInfo.Begin.set(Slot);
} else {
if (BlockInfo.Begin.test(Slot)) {
// Allocas that start and end within a single block are handled
// specially when computing the LiveIntervals to avoid pessimizing
// the liveness propagation.
BlockInfo.Begin.reset(Slot);
} else {
BlockInfo.End.set(Slot);
}
}
}
}
// Update statistics.
NumMarkerSeen += MarkersFound;
return MarkersFound;
}
void StackColoring::calculateLocalLiveness() {
// Perform a standard reverse dataflow computation to solve for
// global liveness. The BEGIN set here is equivalent to KILL in the standard
// formulation, and END is equivalent to GEN. The result of this computation
// is a map from blocks to bitvectors where the bitvectors represent which
// allocas are live in/out of that block.
SmallPtrSet<const MachineBasicBlock*, 8> BBSet(BasicBlockNumbering.begin(),
BasicBlockNumbering.end());
unsigned NumSSMIters = 0;
bool changed = true;
while (changed) {
changed = false;
++NumSSMIters;
SmallPtrSet<const MachineBasicBlock*, 8> NextBBSet;
for (const MachineBasicBlock *BB : BasicBlockNumbering) {
if (!BBSet.count(BB)) continue;
// Use an iterator to avoid repeated lookups.
LivenessMap::iterator BI = BlockLiveness.find(BB);
assert(BI != BlockLiveness.end() && "Block not found");
BlockLifetimeInfo &BlockInfo = BI->second;
BitVector LocalLiveIn;
BitVector LocalLiveOut;
// Forward propagation from begins to ends.
for (MachineBasicBlock::const_pred_iterator PI = BB->pred_begin(),
PE = BB->pred_end(); PI != PE; ++PI) {
LivenessMap::const_iterator I = BlockLiveness.find(*PI);
assert(I != BlockLiveness.end() && "Predecessor not found");
LocalLiveIn |= I->second.LiveOut;
}
LocalLiveIn |= BlockInfo.End;
LocalLiveIn.reset(BlockInfo.Begin);
// Reverse propagation from ends to begins.
for (MachineBasicBlock::const_succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI) {
LivenessMap::const_iterator I = BlockLiveness.find(*SI);
assert(I != BlockLiveness.end() && "Successor not found");
LocalLiveOut |= I->second.LiveIn;
}
LocalLiveOut |= BlockInfo.Begin;
LocalLiveOut.reset(BlockInfo.End);
LocalLiveIn |= LocalLiveOut;
LocalLiveOut |= LocalLiveIn;
// After adopting the live bits, we need to turn-off the bits which
// are de-activated in this block.
LocalLiveOut.reset(BlockInfo.End);
LocalLiveIn.reset(BlockInfo.Begin);
// If we have both BEGIN and END markers in the same basic block then
// we know that the BEGIN marker comes after the END, because we already
// handle the case where the BEGIN comes before the END when collecting
// the markers (and building the BEGIN/END vectore).
// Want to enable the LIVE_IN and LIVE_OUT of slots that have both
// BEGIN and END because it means that the value lives before and after
// this basic block.
BitVector LocalEndBegin = BlockInfo.End;
LocalEndBegin &= BlockInfo.Begin;
LocalLiveIn |= LocalEndBegin;
LocalLiveOut |= LocalEndBegin;
if (LocalLiveIn.test(BlockInfo.LiveIn)) {
changed = true;
BlockInfo.LiveIn |= LocalLiveIn;
NextBBSet.insert(BB->pred_begin(), BB->pred_end());
}
if (LocalLiveOut.test(BlockInfo.LiveOut)) {
changed = true;
BlockInfo.LiveOut |= LocalLiveOut;
NextBBSet.insert(BB->succ_begin(), BB->succ_end());
}
}
BBSet = std::move(NextBBSet);
}// while changed.
}
void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
SmallVector<SlotIndex, 16> Starts;
SmallVector<SlotIndex, 16> Finishes;
// For each block, find which slots are active within this block
// and update the live intervals.
for (const MachineBasicBlock &MBB : *MF) {
Starts.clear();
Starts.resize(NumSlots);
Finishes.clear();
Finishes.resize(NumSlots);
// Create the interval for the basic blocks with lifetime markers in them.
for (const MachineInstr *MI : Markers) {
if (MI->getParent() != &MBB)
continue;
assert((MI->getOpcode() == TargetOpcode::LIFETIME_START ||
MI->getOpcode() == TargetOpcode::LIFETIME_END) &&
"Invalid Lifetime marker");
bool IsStart = MI->getOpcode() == TargetOpcode::LIFETIME_START;
const MachineOperand &Mo = MI->getOperand(0);
int Slot = Mo.getIndex();
assert(Slot >= 0 && "Invalid slot");
SlotIndex ThisIndex = Indexes->getInstructionIndex(MI);
if (IsStart) {
if (!Starts[Slot].isValid() || Starts[Slot] > ThisIndex)
Starts[Slot] = ThisIndex;
} else {
if (!Finishes[Slot].isValid() || Finishes[Slot] < ThisIndex)
Finishes[Slot] = ThisIndex;
}
}
// Create the interval of the blocks that we previously found to be 'alive'.
BlockLifetimeInfo &MBBLiveness = BlockLiveness[&MBB];
for (int pos = MBBLiveness.LiveIn.find_first(); pos != -1;
pos = MBBLiveness.LiveIn.find_next(pos)) {
Starts[pos] = Indexes->getMBBStartIdx(&MBB);
}
for (int pos = MBBLiveness.LiveOut.find_first(); pos != -1;
pos = MBBLiveness.LiveOut.find_next(pos)) {
Finishes[pos] = Indexes->getMBBEndIdx(&MBB);
}
for (unsigned i = 0; i < NumSlots; ++i) {
assert(Starts[i].isValid() == Finishes[i].isValid() && "Unmatched range");
if (!Starts[i].isValid())
continue;
assert(Starts[i] && Finishes[i] && "Invalid interval");
VNInfo *ValNum = Intervals[i]->getValNumInfo(0);
SlotIndex S = Starts[i];
SlotIndex F = Finishes[i];
if (S < F) {
// We have a single consecutive region.
Intervals[i]->addSegment(LiveInterval::Segment(S, F, ValNum));
} else {
// We have two non-consecutive regions. This happens when
// LIFETIME_START appears after the LIFETIME_END marker.
SlotIndex NewStart = Indexes->getMBBStartIdx(&MBB);
SlotIndex NewFin = Indexes->getMBBEndIdx(&MBB);
Intervals[i]->addSegment(LiveInterval::Segment(NewStart, F, ValNum));
Intervals[i]->addSegment(LiveInterval::Segment(S, NewFin, ValNum));
}
}
}
}
bool StackColoring::removeAllMarkers() {
unsigned Count = 0;
for (MachineInstr *MI : Markers) {
MI->eraseFromParent();
Count++;
}
Markers.clear();
DEBUG(dbgs()<<"Removed "<<Count<<" markers.\n");
return Count;
}
void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
unsigned FixedInstr = 0;
unsigned FixedMemOp = 0;
unsigned FixedDbg = 0;
MachineModuleInfo *MMI = &MF->getMMI();
// Remap debug information that refers to stack slots.
for (auto &VI : MMI->getVariableDbgInfo()) {
if (!VI.Var)
continue;
if (SlotRemap.count(VI.Slot)) {
DEBUG(dbgs() << "Remapping debug info for ["
<< cast<DILocalVariable>(VI.Var)->getName() << "].\n");
VI.Slot = SlotRemap[VI.Slot];
FixedDbg++;
}
}
// Keep a list of *allocas* which need to be remapped.
DenseMap<const AllocaInst*, const AllocaInst*> Allocas;
for (const std::pair<int, int> &SI : SlotRemap) {
const AllocaInst *From = MFI->getObjectAllocation(SI.first);
const AllocaInst *To = MFI->getObjectAllocation(SI.second);
assert(To && From && "Invalid allocation object");
Allocas[From] = To;
// AA might be used later for instruction scheduling, and we need it to be
// able to deduce the correct aliasing releationships between pointers
// derived from the alloca being remapped and the target of that remapping.
// The only safe way, without directly informing AA about the remapping
// somehow, is to directly update the IR to reflect the change being made
// here.
Instruction *Inst = const_cast<AllocaInst *>(To);
if (From->getType() != To->getType()) {
BitCastInst *Cast = new BitCastInst(Inst, From->getType());
Cast->insertAfter(Inst);
Inst = Cast;
}
// Allow the stack protector to adjust its value map to account for the
// upcoming replacement.
SP->adjustForColoring(From, To);
// Note that this will not replace uses in MMOs (which we'll update below),
// or anywhere else (which is why we won't delete the original
// instruction).
const_cast<AllocaInst *>(From)->replaceAllUsesWith(Inst);
}
// Remap all instructions to the new stack slots.
for (MachineBasicBlock &BB : *MF)
for (MachineInstr &I : BB) {
// Skip lifetime markers. We'll remove them soon.
if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
I.getOpcode() == TargetOpcode::LIFETIME_END)
continue;
// Update the MachineMemOperand to use the new alloca.
for (MachineMemOperand *MMO : I.memoperands()) {
// FIXME: In order to enable the use of TBAA when using AA in CodeGen,
// we'll also need to update the TBAA nodes in MMOs with values
// derived from the merged allocas. When doing this, we'll need to use
// the same variant of GetUnderlyingObjects that is used by the
// instruction scheduler (that can look through ptrtoint/inttoptr
// pairs).
// We've replaced IR-level uses of the remapped allocas, so we only
// need to replace direct uses here.
const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(MMO->getValue());
if (!AI)
continue;
if (!Allocas.count(AI))
continue;
MMO->setValue(Allocas[AI]);
FixedMemOp++;
}
// Update all of the machine instruction operands.
for (MachineOperand &MO : I.operands()) {
if (!MO.isFI())
continue;
int FromSlot = MO.getIndex();
// Don't touch arguments.
if (FromSlot<0)
continue;
// Only look at mapped slots.
if (!SlotRemap.count(FromSlot))
continue;
// In a debug build, check that the instruction that we are modifying is
// inside the expected live range. If the instruction is not inside
// the calculated range then it means that the alloca usage moved
// outside of the lifetime markers, or that the user has a bug.
// NOTE: Alloca address calculations which happen outside the lifetime
// zone are are okay, despite the fact that we don't have a good way
// for validating all of the usages of the calculation.
#ifndef NDEBUG
bool TouchesMemory = I.mayLoad() || I.mayStore();
// If we *don't* protect the user from escaped allocas, don't bother
// validating the instructions.
if (!I.isDebugValue() && TouchesMemory && ProtectFromEscapedAllocas) {
SlotIndex Index = Indexes->getInstructionIndex(&I);
const LiveInterval *Interval = &*Intervals[FromSlot];
assert(Interval->find(Index) != Interval->end() &&
"Found instruction usage outside of live range.");
}
#endif
// Fix the machine instructions.
int ToSlot = SlotRemap[FromSlot];
MO.setIndex(ToSlot);
FixedInstr++;
}
}
DEBUG(dbgs()<<"Fixed "<<FixedMemOp<<" machine memory operands.\n");
DEBUG(dbgs()<<"Fixed "<<FixedDbg<<" debug locations.\n");
DEBUG(dbgs()<<"Fixed "<<FixedInstr<<" machine instructions.\n");
}
void StackColoring::removeInvalidSlotRanges() {
for (MachineBasicBlock &BB : *MF)
for (MachineInstr &I : BB) {
if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
I.getOpcode() == TargetOpcode::LIFETIME_END || I.isDebugValue())
continue;
// Some intervals are suspicious! In some cases we find address
// calculations outside of the lifetime zone, but not actual memory
// read or write. Memory accesses outside of the lifetime zone are a clear
// violation, but address calculations are okay. This can happen when
// GEPs are hoisted outside of the lifetime zone.
// So, in here we only check instructions which can read or write memory.
if (!I.mayLoad() && !I.mayStore())
continue;
// Check all of the machine operands.
for (const MachineOperand &MO : I.operands()) {
if (!MO.isFI())
continue;
int Slot = MO.getIndex();
if (Slot<0)
continue;
if (Intervals[Slot]->empty())
continue;
// Check that the used slot is inside the calculated lifetime range.
// If it is not, warn about it and invalidate the range.
LiveInterval *Interval = &*Intervals[Slot];
SlotIndex Index = Indexes->getInstructionIndex(&I);
if (Interval->find(Index) == Interval->end()) {
Interval->clear();
DEBUG(dbgs()<<"Invalidating range #"<<Slot<<"\n");
EscapedAllocas++;
}
}
}
}
void StackColoring::expungeSlotMap(DenseMap<int, int> &SlotRemap,
unsigned NumSlots) {
// Expunge slot remap map.
for (unsigned i=0; i < NumSlots; ++i) {
// If we are remapping i
if (SlotRemap.count(i)) {
int Target = SlotRemap[i];
// As long as our target is mapped to something else, follow it.
while (SlotRemap.count(Target)) {
Target = SlotRemap[Target];
SlotRemap[i] = Target;
}
}
}
}
bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
if (skipOptnoneFunction(*Func.getFunction()))
return false;
DEBUG(dbgs() << "********** Stack Coloring **********\n"
<< "********** Function: "
<< ((const Value*)Func.getFunction())->getName() << '\n');
MF = &Func;
MFI = MF->getFrameInfo();
Indexes = &getAnalysis<SlotIndexes>();
SP = &getAnalysis<StackProtector>();
BlockLiveness.clear();
BasicBlocks.clear();
BasicBlockNumbering.clear();
Markers.clear();
Intervals.clear();
VNInfoAllocator.Reset();
unsigned NumSlots = MFI->getObjectIndexEnd();
// If there are no stack slots then there are no markers to remove.
if (!NumSlots)
return false;
SmallVector<int, 8> SortedSlots;
SortedSlots.reserve(NumSlots);
Intervals.reserve(NumSlots);
unsigned NumMarkers = collectMarkers(NumSlots);
unsigned TotalSize = 0;
DEBUG(dbgs()<<"Found "<<NumMarkers<<" markers and "<<NumSlots<<" slots\n");
DEBUG(dbgs()<<"Slot structure:\n");
for (int i=0; i < MFI->getObjectIndexEnd(); ++i) {
DEBUG(dbgs()<<"Slot #"<<i<<" - "<<MFI->getObjectSize(i)<<" bytes.\n");
TotalSize += MFI->getObjectSize(i);
}
DEBUG(dbgs()<<"Total Stack size: "<<TotalSize<<" bytes\n\n");
// Don't continue because there are not enough lifetime markers, or the
// stack is too small, or we are told not to optimize the slots.
if (NumMarkers < 2 || TotalSize < 16 || DisableColoring) {
DEBUG(dbgs()<<"Will not try to merge slots.\n");
return removeAllMarkers();
}
for (unsigned i=0; i < NumSlots; ++i) {
std::unique_ptr<LiveInterval> LI(new LiveInterval(i, 0));
LI->getNextValue(Indexes->getZeroIndex(), VNInfoAllocator);
Intervals.push_back(std::move(LI));
SortedSlots.push_back(i);
}
// Calculate the liveness of each block.
calculateLocalLiveness();
// Propagate the liveness information.
calculateLiveIntervals(NumSlots);
// Search for allocas which are used outside of the declared lifetime
// markers.
if (ProtectFromEscapedAllocas)
removeInvalidSlotRanges();
// Maps old slots to new slots.
DenseMap<int, int> SlotRemap;
unsigned RemovedSlots = 0;
unsigned ReducedSize = 0;
// Do not bother looking at empty intervals.
for (unsigned I = 0; I < NumSlots; ++I) {
if (Intervals[SortedSlots[I]]->empty())
SortedSlots[I] = -1;
}
// This is a simple greedy algorithm for merging allocas. First, sort the
// slots, placing the largest slots first. Next, perform an n^2 scan and look
// for disjoint slots. When you find disjoint slots, merge the samller one
// into the bigger one and update the live interval. Remove the small alloca
// and continue.
// Sort the slots according to their size. Place unused slots at the end.
// Use stable sort to guarantee deterministic code generation.
std::stable_sort(SortedSlots.begin(), SortedSlots.end(),
[this](int LHS, int RHS) {
// We use -1 to denote a uninteresting slot. Place these slots at the end.
if (LHS == -1) return false;
if (RHS == -1) return true;
// Sort according to size.
return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
});
bool Changed = true;
while (Changed) {
Changed = false;
for (unsigned I = 0; I < NumSlots; ++I) {
if (SortedSlots[I] == -1)
continue;
for (unsigned J=I+1; J < NumSlots; ++J) {
if (SortedSlots[J] == -1)
continue;
int FirstSlot = SortedSlots[I];
int SecondSlot = SortedSlots[J];
LiveInterval *First = &*Intervals[FirstSlot];
LiveInterval *Second = &*Intervals[SecondSlot];
assert (!First->empty() && !Second->empty() && "Found an empty range");
// Merge disjoint slots.
if (!First->overlaps(*Second)) {
Changed = true;
First->MergeSegmentsInAsValue(*Second, First->getValNumInfo(0));
SlotRemap[SecondSlot] = FirstSlot;
SortedSlots[J] = -1;
DEBUG(dbgs()<<"Merging #"<<FirstSlot<<" and slots #"<<
SecondSlot<<" together.\n");
unsigned MaxAlignment = std::max(MFI->getObjectAlignment(FirstSlot),
MFI->getObjectAlignment(SecondSlot));
assert(MFI->getObjectSize(FirstSlot) >=
MFI->getObjectSize(SecondSlot) &&
"Merging a small object into a larger one");
RemovedSlots+=1;
ReducedSize += MFI->getObjectSize(SecondSlot);
MFI->setObjectAlignment(FirstSlot, MaxAlignment);
MFI->RemoveStackObject(SecondSlot);
}
}
}
}// While changed.
// Record statistics.
StackSpaceSaved += ReducedSize;
StackSlotMerged += RemovedSlots;
DEBUG(dbgs()<<"Merge "<<RemovedSlots<<" slots. Saved "<<
ReducedSize<<" bytes\n");
// Scan the entire function and update all machine operands that use frame
// indices to use the remapped frame index.
expungeSlotMap(SlotRemap, NumSlots);
remapInstructions(SlotRemap);
return removeAllMarkers();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveIntervalAnalysis.cpp | //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveInterval analysis pass which is used
// by the Linear Scan Register allocator. This pass linearizes the
// basic blocks of the function in DFS order and uses the
// LiveVariables pass to conservatively compute live intervals for
// each virtual and physical register.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "LiveRangeCalc.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cmath>
#include <limits>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
char LiveIntervals::ID = 0;
char &llvm::LiveIntervalsID = LiveIntervals::ID;
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
"Live Interval Analysis", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(LiveVariables)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
"Live Interval Analysis", false, false)
#ifndef NDEBUG
static cl::opt<bool> EnablePrecomputePhysRegs(
"precompute-phys-liveness", cl::Hidden,
cl::desc("Eagerly compute live intervals for all physreg units."));
#else
static bool EnablePrecomputePhysRegs = false;
#endif // NDEBUG
static cl::opt<bool> EnableSubRegLiveness(
"enable-subreg-liveness", cl::Hidden, cl::init(true),
cl::desc("Enable subregister liveness tracking."));
namespace llvm {
cl::opt<bool> UseSegmentSetForPhysRegs(
"use-segment-set-for-physregs", cl::Hidden, cl::init(true),
cl::desc(
"Use segment set for the computation of the live ranges of physregs."));
}
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
// LiveVariables isn't really required by this analysis, it is only required
// here to make sure it is live during TwoAddressInstructionPass and
// PHIElimination. This is temporary.
AU.addRequired<LiveVariables>();
AU.addPreserved<LiveVariables>();
AU.addPreservedID(MachineLoopInfoID);
AU.addRequiredTransitiveID(MachineDominatorsID);
AU.addPreservedID(MachineDominatorsID);
AU.addPreserved<SlotIndexes>();
AU.addRequiredTransitive<SlotIndexes>();
MachineFunctionPass::getAnalysisUsage(AU);
}
LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
DomTree(nullptr), LRCalc(nullptr) {
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
}
LiveIntervals::~LiveIntervals() {
delete LRCalc;
}
void LiveIntervals::releaseMemory() {
// Free the live intervals themselves.
for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
VirtRegIntervals.clear();
RegMaskSlots.clear();
RegMaskBits.clear();
RegMaskBlocks.clear();
for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i)
delete RegUnitRanges[i];
RegUnitRanges.clear();
// Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
VNInfoAllocator.Reset();
}
/// runOnMachineFunction - calculates LiveIntervals
///
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
MF = &fn;
MRI = &MF->getRegInfo();
TRI = MF->getSubtarget().getRegisterInfo();
TII = MF->getSubtarget().getInstrInfo();
AA = &getAnalysis<AliasAnalysis>();
Indexes = &getAnalysis<SlotIndexes>();
DomTree = &getAnalysis<MachineDominatorTree>();
if (EnableSubRegLiveness && MF->getSubtarget().enableSubRegLiveness())
MRI->enableSubRegLiveness(true);
if (!LRCalc)
LRCalc = new LiveRangeCalc();
// Allocate space for all virtual registers.
VirtRegIntervals.resize(MRI->getNumVirtRegs());
computeVirtRegs();
computeRegMasks();
computeLiveInRegUnits();
if (EnablePrecomputePhysRegs) {
// For stress testing, precompute live ranges of all physical register
// units, including reserved registers.
for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
getRegUnit(i);
}
DEBUG(dump());
return true;
}
/// print - Implement the dump method.
void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
OS << "********** INTERVALS **********\n";
// Dump the regunits.
for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i)
if (LiveRange *LR = RegUnitRanges[i])
OS << PrintRegUnit(i, TRI) << ' ' << *LR << '\n';
// Dump the virtregs.
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (hasInterval(Reg))
OS << getInterval(Reg) << '\n';
}
OS << "RegMasks:";
for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i)
OS << ' ' << RegMaskSlots[i];
OS << '\n';
printInstrs(OS);
}
void LiveIntervals::printInstrs(raw_ostream &OS) const {
OS << "********** MACHINEINSTRS **********\n";
MF->print(OS, Indexes);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveIntervals::dumpInstrs() const {
printInstrs(dbgs());
}
#endif
LiveInterval* LiveIntervals::createInterval(unsigned reg) {
float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
llvm::huge_valf : 0.0F;
return new LiveInterval(reg, Weight);
}
/// computeVirtRegInterval - Compute the live interval of a virtual register,
/// based on defs and uses.
void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
assert(LRCalc && "LRCalc not initialized.");
assert(LI.empty() && "Should only compute empty intervals.");
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
computeDeadValues(LI, nullptr);
}
void LiveIntervals::computeVirtRegs() {
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
createAndComputeVirtRegInterval(Reg);
}
}
void LiveIntervals::computeRegMasks() {
RegMaskBlocks.resize(MF->getNumBlockIDs());
// Find all instructions with regmask operands.
for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
RMB.first = RegMaskSlots.size();
for (MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end();
MI != ME; ++MI)
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isRegMask())
continue;
RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
RegMaskBits.push_back(MO.getRegMask());
}
// Compute the number of register mask instructions in this block.
RMB.second = RegMaskSlots.size() - RMB.first;
}
}
//===----------------------------------------------------------------------===//
// Register Unit Liveness
//===----------------------------------------------------------------------===//
//
// Fixed interference typically comes from ABI boundaries: Function arguments
// and return values are passed in fixed registers, and so are exception
// pointers entering landing pads. Certain instructions require values to be
// present in specific registers. That is also represented through fixed
// interference.
//
/// computeRegUnitInterval - Compute the live range of a register unit, based
/// on the uses and defs of aliasing registers. The range should be empty,
/// or contain only dead phi-defs from ABI blocks.
void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
assert(LRCalc && "LRCalc not initialized.");
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
// The physregs aliasing Unit are the roots and their super-registers.
// Create all values as dead defs before extending to uses. Note that roots
// may share super-registers. That's OK because createDeadDefs() is
// idempotent. It is very rare for a register unit to have multiple roots, so
// uniquing super-registers is probably not worthwhile.
for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true);
Supers.isValid(); ++Supers) {
if (!MRI->reg_empty(*Supers))
LRCalc->createDeadDefs(LR, *Supers);
}
}
// Now extend LR to reach all uses.
// Ignore uses of reserved registers. We only track defs of those.
for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true);
Supers.isValid(); ++Supers) {
unsigned Reg = *Supers;
if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg))
LRCalc->extendToUses(LR, Reg);
}
}
// Flush the segment set to the segment vector.
if (UseSegmentSetForPhysRegs)
LR.flushSegmentSet();
}
/// computeLiveInRegUnits - Precompute the live ranges of any register units
/// that are live-in to an ABI block somewhere. Register values can appear
/// without a corresponding def when entering the entry block or a landing pad.
///
void LiveIntervals::computeLiveInRegUnits() {
RegUnitRanges.resize(TRI->getNumRegUnits());
DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
// Keep track of the live range sets allocated.
SmallVector<unsigned, 8> NewRanges;
// Check all basic blocks for live-ins.
for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
MFI != MFE; ++MFI) {
const MachineBasicBlock *MBB = MFI;
// We only care about ABI blocks: Entry + landing pads.
if ((MFI != MF->begin() && !MBB->isLandingPad()) || MBB->livein_empty())
continue;
// Create phi-defs at Begin for all live-in registers.
SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber());
for (MachineBasicBlock::livein_iterator LII = MBB->livein_begin(),
LIE = MBB->livein_end(); LII != LIE; ++LII) {
for (MCRegUnitIterator Units(*LII, TRI); Units.isValid(); ++Units) {
unsigned Unit = *Units;
LiveRange *LR = RegUnitRanges[Unit];
if (!LR) {
// Use segment set to speed-up initial computation of the live range.
LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs);
NewRanges.push_back(Unit);
}
VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator());
(void)VNI;
DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id);
}
}
DEBUG(dbgs() << '\n');
}
DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
// Compute the 'normal' part of the ranges.
for (unsigned i = 0, e = NewRanges.size(); i != e; ++i) {
unsigned Unit = NewRanges[i];
computeRegUnitRange(*RegUnitRanges[Unit], Unit);
}
}
static void createSegmentsForValues(LiveRange &LR,
iterator_range<LiveInterval::vni_iterator> VNIs) {
for (auto VNI : VNIs) {
if (VNI->isUnused())
continue;
SlotIndex Def = VNI->def;
LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI));
}
}
typedef SmallVector<std::pair<SlotIndex, VNInfo*>, 16> ShrinkToUsesWorkList;
static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
ShrinkToUsesWorkList &WorkList,
const LiveRange &OldRange) {
// Keep track of the PHIs that are in use.
SmallPtrSet<VNInfo*, 8> UsedPHIs;
// Blocks that have already been added to WorkList as live-out.
SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
// Extend intervals to reach all uses in WorkList.
while (!WorkList.empty()) {
SlotIndex Idx = WorkList.back().first;
VNInfo *VNI = WorkList.back().second;
WorkList.pop_back();
const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot());
SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB);
// Extend the live range for VNI to be live at Idx.
if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) {
assert(ExtVNI == VNI && "Unexpected existing value number");
(void)ExtVNI;
// Is this a PHIDef we haven't seen before?
if (!VNI->isPHIDef() || VNI->def != BlockStart ||
!UsedPHIs.insert(VNI).second)
continue;
// The PHI is live, make sure the predecessors are live-out.
for (auto &Pred : MBB->predecessors()) {
if (!LiveOut.insert(Pred).second)
continue;
SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
// A predecessor is not required to have a live-out value for a PHI.
if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
WorkList.push_back(std::make_pair(Stop, PVNI));
}
continue;
}
// VNI is live-in to MBB.
DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
// Make sure VNI is live-out from the predecessors.
for (auto &Pred : MBB->predecessors()) {
if (!LiveOut.insert(Pred).second)
continue;
SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
assert(OldRange.getVNInfoBefore(Stop) == VNI &&
"Wrong value out of predecessor");
WorkList.push_back(std::make_pair(Stop, VNI));
}
}
}
/// shrinkToUses - After removing some uses of a register, shrink its live
/// range to just the remaining uses. This method does not compute reaching
/// defs for new uses, and it doesn't remove dead defs.
bool LiveIntervals::shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead) {
DEBUG(dbgs() << "Shrink: " << *li << '\n');
assert(TargetRegisterInfo::isVirtualRegister(li->reg)
&& "Can only shrink virtual registers");
// Shrink subregister live ranges.
for (LiveInterval::SubRange &S : li->subranges()) {
shrinkToUses(S, li->reg);
}
// Find all the values used, including PHI kills.
ShrinkToUsesWorkList WorkList;
// Visit all instructions reading li->reg.
for (MachineRegisterInfo::reg_instr_iterator
I = MRI->reg_instr_begin(li->reg), E = MRI->reg_instr_end();
I != E; ) {
MachineInstr *UseMI = &*(I++);
if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
continue;
SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
LiveQueryResult LRQ = li->Query(Idx);
VNInfo *VNI = LRQ.valueIn();
if (!VNI) {
// This shouldn't happen: readsVirtualRegister returns true, but there is
// no live value. It is likely caused by a target getting <undef> flags
// wrong.
DEBUG(dbgs() << Idx << '\t' << *UseMI
<< "Warning: Instr claims to read non-existent value in "
<< *li << '\n');
continue;
}
// Special case: An early-clobber tied operand reads and writes the
// register one slot early.
if (VNInfo *DefVNI = LRQ.valueDefined())
Idx = DefVNI->def;
WorkList.push_back(std::make_pair(Idx, VNI));
}
// Create new live ranges with only minimal live segments per def.
LiveRange NewLR;
createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end()));
extendSegmentsToUses(NewLR, *Indexes, WorkList, *li);
// Move the trimmed segments back.
li->segments.swap(NewLR.segments);
// Handle dead values.
bool CanSeparate = computeDeadValues(*li, dead);
DEBUG(dbgs() << "Shrunk: " << *li << '\n');
return CanSeparate;
}
bool LiveIntervals::computeDeadValues(LiveInterval &LI,
SmallVectorImpl<MachineInstr*> *dead) {
bool PHIRemoved = false;
for (auto VNI : LI.valnos) {
if (VNI->isUnused())
continue;
SlotIndex Def = VNI->def;
LiveRange::iterator I = LI.FindSegmentContaining(Def);
assert(I != LI.end() && "Missing segment for VNI");
// Is the register live before? Otherwise we may have to add a read-undef
// flag for subregister defs.
if (MRI->shouldTrackSubRegLiveness(LI.reg)) {
if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
MachineInstr *MI = getInstructionFromIndex(Def);
MI->addRegisterDefReadUndef(LI.reg);
}
}
if (I->end != Def.getDeadSlot())
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
VNI->markUnused();
LI.removeSegment(I);
DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
PHIRemoved = true;
} else {
// This is a dead def. Make sure the instruction knows.
MachineInstr *MI = getInstructionFromIndex(Def);
assert(MI && "No instruction defining live value");
MI->addRegisterDead(LI.reg, TRI);
if (dead && MI->allDefsAreDead()) {
DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
dead->push_back(MI);
}
}
}
return PHIRemoved;
}
void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg)
{
DEBUG(dbgs() << "Shrink: " << SR << '\n');
assert(TargetRegisterInfo::isVirtualRegister(Reg)
&& "Can only shrink virtual registers");
// Find all the values used, including PHI kills.
ShrinkToUsesWorkList WorkList;
// Visit all instructions reading Reg.
SlotIndex LastIdx;
for (MachineOperand &MO : MRI->reg_operands(Reg)) {
MachineInstr *UseMI = MO.getParent();
if (UseMI->isDebugValue())
continue;
// Maybe the operand is for a subregister we don't care about.
unsigned SubReg = MO.getSubReg();
if (SubReg != 0) {
unsigned SubRegMask = TRI->getSubRegIndexLaneMask(SubReg);
if ((SubRegMask & SR.LaneMask) == 0)
continue;
}
// We only need to visit each instruction once.
SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
if (Idx == LastIdx)
continue;
LastIdx = Idx;
LiveQueryResult LRQ = SR.Query(Idx);
VNInfo *VNI = LRQ.valueIn();
// For Subranges it is possible that only undef values are left in that
// part of the subregister, so there is no real liverange at the use
if (!VNI)
continue;
// Special case: An early-clobber tied operand reads and writes the
// register one slot early.
if (VNInfo *DefVNI = LRQ.valueDefined())
Idx = DefVNI->def;
WorkList.push_back(std::make_pair(Idx, VNI));
}
// Create a new live ranges with only minimal live segments per def.
LiveRange NewLR;
createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end()));
extendSegmentsToUses(NewLR, *Indexes, WorkList, SR);
// Move the trimmed ranges back.
SR.segments.swap(NewLR.segments);
// Remove dead PHI value numbers
for (auto VNI : SR.valnos) {
if (VNI->isUnused())
continue;
const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def);
assert(Segment != nullptr && "Missing segment for VNI");
if (Segment->end != VNI->def.getDeadSlot())
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
VNI->markUnused();
SR.removeSegment(*Segment);
DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
}
}
DEBUG(dbgs() << "Shrunk: " << SR << '\n');
}
void LiveIntervals::extendToIndices(LiveRange &LR,
ArrayRef<SlotIndex> Indices) {
assert(LRCalc && "LRCalc not initialized.");
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
for (unsigned i = 0, e = Indices.size(); i != e; ++i)
LRCalc->extend(LR, Indices[i]);
}
void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
SmallVectorImpl<SlotIndex> *EndPoints) {
LiveQueryResult LRQ = LR.Query(Kill);
VNInfo *VNI = LRQ.valueOutOrDead();
if (!VNI)
return;
MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB);
// If VNI isn't live out from KillMBB, the value is trivially pruned.
if (LRQ.endPoint() < MBBEnd) {
LR.removeSegment(Kill, LRQ.endPoint());
if (EndPoints) EndPoints->push_back(LRQ.endPoint());
return;
}
// VNI is live out of KillMBB.
LR.removeSegment(Kill, MBBEnd);
if (EndPoints) EndPoints->push_back(MBBEnd);
// Find all blocks that are reachable from KillMBB without leaving VNI's live
// range. It is possible that KillMBB itself is reachable, so start a DFS
// from each successor.
typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy;
VisitedTy Visited;
for (MachineBasicBlock::succ_iterator
SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end();
SuccI != SuccE; ++SuccI) {
for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited);
I != E;) {
MachineBasicBlock *MBB = *I;
// Check if VNI is live in to MBB.
SlotIndex MBBStart, MBBEnd;
std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
LiveQueryResult LRQ = LR.Query(MBBStart);
if (LRQ.valueIn() != VNI) {
// This block isn't part of the VNI segment. Prune the search.
I.skipChildren();
continue;
}
// Prune the search if VNI is killed in MBB.
if (LRQ.endPoint() < MBBEnd) {
LR.removeSegment(MBBStart, LRQ.endPoint());
if (EndPoints) EndPoints->push_back(LRQ.endPoint());
I.skipChildren();
continue;
}
// VNI is live through MBB.
LR.removeSegment(MBBStart, MBBEnd);
if (EndPoints) EndPoints->push_back(MBBEnd);
++I;
}
}
}
//===----------------------------------------------------------------------===//
// Register allocator hooks.
//
void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
// Keep track of regunit ranges.
SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU;
// Keep track of subregister ranges.
SmallVector<std::pair<const LiveInterval::SubRange*,
LiveRange::const_iterator>, 4> SRs;
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
const LiveInterval &LI = getInterval(Reg);
if (LI.empty())
continue;
// Find the regunit intervals for the assigned register. They may overlap
// the virtual register live range, cancelling any kills.
RU.clear();
for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid();
++Units) {
const LiveRange &RURange = getRegUnit(*Units);
if (RURange.empty())
continue;
RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end)));
}
if (MRI->subRegLivenessEnabled()) {
SRs.clear();
for (const LiveInterval::SubRange &SR : LI.subranges()) {
SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end)));
}
}
// Every instruction that kills Reg corresponds to a segment range end
// point.
for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE;
++RI) {
// A block index indicates an MBB edge.
if (RI->end.isBlock())
continue;
MachineInstr *MI = getInstructionFromIndex(RI->end);
if (!MI)
continue;
// Check if any of the regunits are live beyond the end of RI. That could
// happen when a physreg is defined as a copy of a virtreg:
//
// %EAX = COPY %vreg5
// FOO %vreg5 <--- MI, cancel kill because %EAX is live.
// BAR %EAX<kill>
//
// There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
for (auto &RUP : RU) {
const LiveRange &RURange = *RUP.first;
LiveRange::const_iterator &I = RUP.second;
if (I == RURange.end())
continue;
I = RURange.advanceTo(I, RI->end);
if (I == RURange.end() || I->start >= RI->end)
continue;
// I is overlapping RI.
goto CancelKill;
}
if (MRI->subRegLivenessEnabled()) {
// When reading a partial undefined value we must not add a kill flag.
// The regalloc might have used the undef lane for something else.
// Example:
// %vreg1 = ... ; R32: %vreg1
// %vreg2:high16 = ... ; R64: %vreg2
// = read %vreg2<kill> ; R64: %vreg2
// = read %vreg1 ; R32: %vreg1
// The <kill> flag is correct for %vreg2, but the register allocator may
// assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0
// are actually never written by %vreg2. After assignment the <kill>
// flag at the read instruction is invalid.
unsigned DefinedLanesMask;
if (!SRs.empty()) {
// Compute a mask of lanes that are defined.
DefinedLanesMask = 0;
for (auto &SRP : SRs) {
const LiveInterval::SubRange &SR = *SRP.first;
LiveRange::const_iterator &I = SRP.second;
if (I == SR.end())
continue;
I = SR.advanceTo(I, RI->end);
if (I == SR.end() || I->start >= RI->end)
continue;
// I is overlapping RI
DefinedLanesMask |= SR.LaneMask;
}
} else
DefinedLanesMask = ~0u;
bool IsFullWrite = false;
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (MO.isUse()) {
// Reading any undefined lanes?
unsigned UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
if ((UseMask & ~DefinedLanesMask) != 0)
goto CancelKill;
} else if (MO.getSubReg() == 0) {
// Writing to the full register?
assert(MO.isDef());
IsFullWrite = true;
}
}
// If an instruction writes to a subregister, a new segment starts in
// the LiveInterval. But as this is only overriding part of the register
// adding kill-flags is not correct here after registers have been
// assigned.
if (!IsFullWrite) {
// Next segment has to be adjacent in the subregister write case.
LiveRange::const_iterator N = std::next(RI);
if (N != LI.end() && N->start == RI->end)
goto CancelKill;
}
}
MI->addRegisterKilled(Reg, nullptr);
continue;
CancelKill:
MI->clearRegisterKills(Reg, nullptr);
}
}
}
MachineBasicBlock*
LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
// A local live range must be fully contained inside the block, meaning it is
// defined and killed at instructions, not at block boundaries. It is not
// live in or or out of any block.
//
// It is technically possible to have a PHI-defined live range identical to a
// single block, but we are going to return false in that case.
SlotIndex Start = LI.beginIndex();
if (Start.isBlock())
return nullptr;
SlotIndex Stop = LI.endIndex();
if (Stop.isBlock())
return nullptr;
// getMBBFromIndex doesn't need to search the MBB table when both indexes
// belong to proper instructions.
MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
return MBB1 == MBB2 ? MBB1 : nullptr;
}
bool
LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
for (const VNInfo *PHI : LI.valnos) {
if (PHI->isUnused() || !PHI->isPHIDef())
continue;
const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
// Conservatively return true instead of scanning huge predecessor lists.
if (PHIMBB->pred_size() > 100)
return true;
for (MachineBasicBlock::const_pred_iterator
PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI)
if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI)))
return true;
}
return false;
}
float
LiveIntervals::getSpillWeight(bool isDef, bool isUse,
const MachineBlockFrequencyInfo *MBFI,
const MachineInstr *MI) {
BlockFrequency Freq = MBFI->getBlockFreq(MI->getParent());
const float Scale = 1.0f / MBFI->getEntryFreq();
return (isDef + isUse) * (Freq.getFrequency() * Scale);
}
LiveRange::Segment
LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr* startInst) {
LiveInterval& Interval = createEmptyInterval(reg);
VNInfo* VN = Interval.getNextValue(
SlotIndex(getInstructionIndex(startInst).getRegSlot()),
getVNInfoAllocator());
LiveRange::Segment S(
SlotIndex(getInstructionIndex(startInst).getRegSlot()),
getMBBEndIdx(startInst->getParent()), VN);
Interval.addSegment(S);
return S;
}
//===----------------------------------------------------------------------===//
// Register mask functions
//===----------------------------------------------------------------------===//
bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
BitVector &UsableRegs) {
if (LI.empty())
return false;
LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
// Use a smaller arrays for local live ranges.
ArrayRef<SlotIndex> Slots;
ArrayRef<const uint32_t*> Bits;
if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
Slots = getRegMaskSlotsInBlock(MBB->getNumber());
Bits = getRegMaskBitsInBlock(MBB->getNumber());
} else {
Slots = getRegMaskSlots();
Bits = getRegMaskBits();
}
// We are going to enumerate all the register mask slots contained in LI.
// Start with a binary search of RegMaskSlots to find a starting point.
ArrayRef<SlotIndex>::iterator SlotI =
std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
// No slots in range, LI begins after the last call.
if (SlotI == SlotE)
return false;
bool Found = false;
for (;;) {
assert(*SlotI >= LiveI->start);
// Loop over all slots overlapping this segment.
while (*SlotI < LiveI->end) {
// *SlotI overlaps LI. Collect mask bits.
if (!Found) {
// This is the first overlap. Initialize UsableRegs to all ones.
UsableRegs.clear();
UsableRegs.resize(TRI->getNumRegs(), true);
Found = true;
}
// Remove usable registers clobbered by this mask.
UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
if (++SlotI == SlotE)
return Found;
}
// *SlotI is beyond the current LI segment.
LiveI = LI.advanceTo(LiveI, *SlotI);
if (LiveI == LiveE)
return Found;
// Advance SlotI until it overlaps.
while (*SlotI < LiveI->start)
if (++SlotI == SlotE)
return Found;
}
}
//===----------------------------------------------------------------------===//
// IntervalUpdate class.
//===----------------------------------------------------------------------===//
// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
class LiveIntervals::HMEditor {
private:
LiveIntervals& LIS;
const MachineRegisterInfo& MRI;
const TargetRegisterInfo& TRI;
SlotIndex OldIdx;
SlotIndex NewIdx;
SmallPtrSet<LiveRange*, 8> Updated;
bool UpdateFlags;
public:
HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
const TargetRegisterInfo& TRI,
SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
: LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
UpdateFlags(UpdateFlags) {}
// FIXME: UpdateFlags is a workaround that creates live intervals for all
// physregs, even those that aren't needed for regalloc, in order to update
// kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
// flags, and postRA passes will use a live register utility instead.
LiveRange *getRegUnitLI(unsigned Unit) {
if (UpdateFlags)
return &LIS.getRegUnit(Unit);
return LIS.getCachedRegUnit(Unit);
}
/// Update all live ranges touched by MI, assuming a move from OldIdx to
/// NewIdx.
void updateAllRanges(MachineInstr *MI) {
DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
bool hasRegMask = false;
for (MachineOperand &MO : MI->operands()) {
if (MO.isRegMask())
hasRegMask = true;
if (!MO.isReg())
continue;
// Aggressively clear all kill flags.
// They are reinserted by VirtRegRewriter.
if (MO.isUse())
MO.setIsKill(false);
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
LiveInterval &LI = LIS.getInterval(Reg);
if (LI.hasSubRanges()) {
unsigned SubReg = MO.getSubReg();
unsigned LaneMask = TRI.getSubRegIndexLaneMask(SubReg);
for (LiveInterval::SubRange &S : LI.subranges()) {
if ((S.LaneMask & LaneMask) == 0)
continue;
updateRange(S, Reg, S.LaneMask);
}
}
updateRange(LI, Reg, 0);
continue;
}
// For physregs, only update the regunits that actually have a
// precomputed live range.
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
if (LiveRange *LR = getRegUnitLI(*Units))
updateRange(*LR, *Units, 0);
}
if (hasRegMask)
updateRegMaskSlots();
}
private:
/// Update a single live range, assuming an instruction has been moved from
/// OldIdx to NewIdx.
void updateRange(LiveRange &LR, unsigned Reg, unsigned LaneMask) {
if (!Updated.insert(&LR).second)
return;
DEBUG({
dbgs() << " ";
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
dbgs() << PrintReg(Reg);
if (LaneMask != 0)
dbgs() << format(" L%04X", LaneMask);
} else {
dbgs() << PrintRegUnit(Reg, &TRI);
}
dbgs() << ":\t" << LR << '\n';
});
if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
handleMoveDown(LR);
else
handleMoveUp(LR, Reg, LaneMask);
DEBUG(dbgs() << " -->\t" << LR << '\n');
LR.verify();
}
/// Update LR to reflect an instruction has been moved downwards from OldIdx
/// to NewIdx.
///
/// 1. Live def at OldIdx:
/// Move def to NewIdx, assert endpoint after NewIdx.
///
/// 2. Live def at OldIdx, killed at NewIdx:
/// Change to dead def at NewIdx.
/// (Happens when bundling def+kill together).
///
/// 3. Dead def at OldIdx:
/// Move def to NewIdx, possibly across another live value.
///
/// 4. Def at OldIdx AND at NewIdx:
/// Remove segment [OldIdx;NewIdx) and value defined at OldIdx.
/// (Happens when bundling multiple defs together).
///
/// 5. Value read at OldIdx, killed before NewIdx:
/// Extend kill to NewIdx.
///
void handleMoveDown(LiveRange &LR) {
// First look for a kill at OldIdx.
LiveRange::iterator I = LR.find(OldIdx.getBaseIndex());
LiveRange::iterator E = LR.end();
// Is LR even live at OldIdx?
if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
return;
// Handle a live-in value.
if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
bool isKill = SlotIndex::isSameInstr(OldIdx, I->end);
// If the live-in value already extends to NewIdx, there is nothing to do.
if (!SlotIndex::isEarlierInstr(I->end, NewIdx))
return;
// Aggressively remove all kill flags from the old kill point.
// Kill flags shouldn't be used while live intervals exist, they will be
// reinserted by VirtRegRewriter.
if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end))
for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO)
if (MO->isReg() && MO->isUse())
MO->setIsKill(false);
// Adjust I->end to reach NewIdx. This may temporarily make LR invalid by
// overlapping ranges. Case 5 above.
I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
// If this was a kill, there may also be a def. Otherwise we're done.
if (!isKill)
return;
++I;
}
// Check for a def at OldIdx.
if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start))
return;
// We have a def at OldIdx.
VNInfo *DefVNI = I->valno;
assert(DefVNI->def == I->start && "Inconsistent def");
DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
// If the defined value extends beyond NewIdx, just move the def down.
// This is case 1 above.
if (SlotIndex::isEarlierInstr(NewIdx, I->end)) {
I->start = DefVNI->def;
return;
}
// The remaining possibilities are now:
// 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx).
// 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot().
// In either case, it is possible that there is an existing def at NewIdx.
assert((I->end == OldIdx.getDeadSlot() ||
SlotIndex::isSameInstr(I->end, NewIdx)) &&
"Cannot move def below kill");
LiveRange::iterator NewI = LR.advanceTo(I, NewIdx.getRegSlot());
if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) {
// There is an existing def at NewIdx, case 4 above. The def at OldIdx is
// coalesced into that value.
assert(NewI->valno != DefVNI && "Multiple defs of value?");
LR.removeValNo(DefVNI);
return;
}
// There was no existing def at NewIdx. Turn *I into a dead def at NewIdx.
// If the def at OldIdx was dead, we allow it to be moved across other LR
// values. The new range should be placed immediately before NewI, move any
// intermediate ranges up.
assert(NewI != I && "Inconsistent iterators");
std::copy(std::next(I), NewI, I);
*std::prev(NewI)
= LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
/// Update LR to reflect an instruction has been moved upwards from OldIdx
/// to NewIdx.
///
/// 1. Live def at OldIdx:
/// Hoist def to NewIdx.
///
/// 2. Dead def at OldIdx:
/// Hoist def+end to NewIdx, possibly move across other values.
///
/// 3. Dead def at OldIdx AND existing def at NewIdx:
/// Remove value defined at OldIdx, coalescing it with existing value.
///
/// 4. Live def at OldIdx AND existing def at NewIdx:
/// Remove value defined at NewIdx, hoist OldIdx def to NewIdx.
/// (Happens when bundling multiple defs together).
///
/// 5. Value killed at OldIdx:
/// Hoist kill to NewIdx, then scan for last kill between NewIdx and
/// OldIdx.
///
void handleMoveUp(LiveRange &LR, unsigned Reg, unsigned LaneMask) {
// First look for a kill at OldIdx.
LiveRange::iterator I = LR.find(OldIdx.getBaseIndex());
LiveRange::iterator E = LR.end();
// Is LR even live at OldIdx?
if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
return;
// Handle a live-in value.
if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
// If the live-in value isn't killed here, there is nothing to do.
if (!SlotIndex::isSameInstr(OldIdx, I->end))
return;
// Adjust I->end to end at NewIdx. If we are hoisting a kill above
// another use, we need to search for that use. Case 5 above.
I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
++I;
// If OldIdx also defines a value, there couldn't have been another use.
if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
// No def, search for the new kill.
// This can never be an early clobber kill since there is no def.
std::prev(I)->end = findLastUseBefore(Reg, LaneMask).getRegSlot();
return;
}
}
// Now deal with the def at OldIdx.
assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?");
VNInfo *DefVNI = I->valno;
assert(DefVNI->def == I->start && "Inconsistent def");
DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
// Check for an existing def at NewIdx.
LiveRange::iterator NewI = LR.find(NewIdx.getRegSlot());
if (SlotIndex::isSameInstr(NewI->start, NewIdx)) {
assert(NewI->valno != DefVNI && "Same value defined more than once?");
// There is an existing def at NewIdx.
if (I->end.isDead()) {
// Case 3: Remove the dead def at OldIdx.
LR.removeValNo(DefVNI);
return;
}
// Case 4: Replace def at NewIdx with live def at OldIdx.
I->start = DefVNI->def;
LR.removeValNo(NewI->valno);
return;
}
// There is no existing def at NewIdx. Hoist DefVNI.
if (!I->end.isDead()) {
// Leave the end point of a live def.
I->start = DefVNI->def;
return;
}
// DefVNI is a dead def. It may have been moved across other values in LR,
// so move I up to NewI. Slide [NewI;I) down one position.
std::copy_backward(NewI, I, std::next(I));
*NewI = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
void updateRegMaskSlots() {
SmallVectorImpl<SlotIndex>::iterator RI =
std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
OldIdx);
assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
"No RegMask at OldIdx.");
*RI = NewIdx.getRegSlot();
assert((RI == LIS.RegMaskSlots.begin() ||
SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
"Cannot move regmask instruction above another call");
assert((std::next(RI) == LIS.RegMaskSlots.end() ||
SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
"Cannot move regmask instruction below another call");
}
// Return the last use of reg between NewIdx and OldIdx.
SlotIndex findLastUseBefore(unsigned Reg, unsigned LaneMask) {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
SlotIndex LastUse = NewIdx;
for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
unsigned SubReg = MO.getSubReg();
if (SubReg != 0 && LaneMask != 0
&& (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask) == 0)
continue;
const MachineInstr *MI = MO.getParent();
SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
if (InstSlot > LastUse && InstSlot < OldIdx)
LastUse = InstSlot;
}
return LastUse;
}
// This is a regunit interval, so scanning the use list could be very
// expensive. Scan upwards from OldIdx instead.
assert(NewIdx < OldIdx && "Expected upwards move");
SlotIndexes *Indexes = LIS.getSlotIndexes();
MachineBasicBlock *MBB = Indexes->getMBBFromIndex(NewIdx);
// OldIdx may not correspond to an instruction any longer, so set MII to
// point to the next instruction after OldIdx, or MBB->end().
MachineBasicBlock::iterator MII = MBB->end();
if (MachineInstr *MI = Indexes->getInstructionFromIndex(
Indexes->getNextNonNullIndex(OldIdx)))
if (MI->getParent() == MBB)
MII = MI;
MachineBasicBlock::iterator Begin = MBB->begin();
while (MII != Begin) {
if ((--MII)->isDebugValue())
continue;
SlotIndex Idx = Indexes->getInstructionIndex(MII);
// Stop searching when NewIdx is reached.
if (!SlotIndex::isEarlierInstr(NewIdx, Idx))
return NewIdx;
// Check if MII uses Reg.
for (MIBundleOperands MO(MII); MO.isValid(); ++MO)
if (MO->isReg() &&
TargetRegisterInfo::isPhysicalRegister(MO->getReg()) &&
TRI.hasRegUnit(MO->getReg(), Reg))
return Idx;
}
// Didn't reach NewIdx. It must be the first instruction in the block.
return NewIdx;
}
};
void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) {
assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
Indexes->removeMachineInstrFromMaps(MI);
SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
OldIndex < getMBBEndIdx(MI->getParent()) &&
"Cannot handle moves across basic block boundaries.");
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
HME.updateAllRanges(MI);
}
void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
MachineInstr* BundleStart,
bool UpdateFlags) {
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
HME.updateAllRanges(MI);
}
void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
const MachineBasicBlock::iterator End,
const SlotIndex endIdx,
LiveRange &LR, const unsigned Reg,
const unsigned LaneMask) {
LiveInterval::iterator LII = LR.find(endIdx);
SlotIndex lastUseIdx;
if (LII != LR.end() && LII->start < endIdx)
lastUseIdx = LII->end;
else
--LII;
for (MachineBasicBlock::iterator I = End; I != Begin;) {
--I;
MachineInstr *MI = I;
if (MI->isDebugValue())
continue;
SlotIndex instrIdx = getInstructionIndex(MI);
bool isStartValid = getInstructionFromIndex(LII->start);
bool isEndValid = getInstructionFromIndex(LII->end);
// FIXME: This doesn't currently handle early-clobber or multiple removed
// defs inside of the region to repair.
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
const MachineOperand &MO = *OI;
if (!MO.isReg() || MO.getReg() != Reg)
continue;
unsigned SubReg = MO.getSubReg();
unsigned Mask = TRI->getSubRegIndexLaneMask(SubReg);
if ((Mask & LaneMask) == 0)
continue;
if (MO.isDef()) {
if (!isStartValid) {
if (LII->end.isDead()) {
SlotIndex prevStart;
if (LII != LR.begin())
prevStart = std::prev(LII)->start;
// FIXME: This could be more efficient if there was a
// removeSegment method that returned an iterator.
LR.removeSegment(*LII, true);
if (prevStart.isValid())
LII = LR.find(prevStart);
else
LII = LR.begin();
} else {
LII->start = instrIdx.getRegSlot();
LII->valno->def = instrIdx.getRegSlot();
if (MO.getSubReg() && !MO.isUndef())
lastUseIdx = instrIdx.getRegSlot();
else
lastUseIdx = SlotIndex();
continue;
}
}
if (!lastUseIdx.isValid()) {
VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
LiveRange::Segment S(instrIdx.getRegSlot(),
instrIdx.getDeadSlot(), VNI);
LII = LR.addSegment(S);
} else if (LII->start != instrIdx.getRegSlot()) {
VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI);
LII = LR.addSegment(S);
}
if (MO.getSubReg() && !MO.isUndef())
lastUseIdx = instrIdx.getRegSlot();
else
lastUseIdx = SlotIndex();
} else if (MO.isUse()) {
// FIXME: This should probably be handled outside of this branch,
// either as part of the def case (for defs inside of the region) or
// after the loop over the region.
if (!isEndValid && !LII->end.isBlock())
LII->end = instrIdx.getRegSlot();
if (!lastUseIdx.isValid())
lastUseIdx = instrIdx.getRegSlot();
}
}
}
}
void
LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
ArrayRef<unsigned> OrigRegs) {
// Find anchor points, which are at the beginning/end of blocks or at
// instructions that already have indexes.
while (Begin != MBB->begin() && !Indexes->hasIndex(Begin))
--Begin;
while (End != MBB->end() && !Indexes->hasIndex(End))
++End;
SlotIndex endIdx;
if (End == MBB->end())
endIdx = getMBBEndIdx(MBB).getPrevSlot();
else
endIdx = getInstructionIndex(End);
Indexes->repairIndexesInRange(MBB, Begin, End);
for (MachineBasicBlock::iterator I = End; I != Begin;) {
--I;
MachineInstr *MI = I;
if (MI->isDebugValue())
continue;
for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
if (MOI->isReg() &&
TargetRegisterInfo::isVirtualRegister(MOI->getReg()) &&
!hasInterval(MOI->getReg())) {
createAndComputeVirtRegInterval(MOI->getReg());
}
}
}
for (unsigned i = 0, e = OrigRegs.size(); i != e; ++i) {
unsigned Reg = OrigRegs[i];
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
LiveInterval &LI = getInterval(Reg);
// FIXME: Should we support undefs that gain defs?
if (!LI.hasAtLeastOneValue())
continue;
for (LiveInterval::SubRange &S : LI.subranges()) {
repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask);
}
repairOldRegInRange(Begin, End, endIdx, LI, Reg);
}
}
void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) {
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
if (LiveRange *LR = getCachedRegUnit(*Units))
if (VNInfo *VNI = LR->getVNInfoAt(Pos))
LR->removeValNo(VNI);
}
}
void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) {
VNInfo *VNI = LI.getVNInfoAt(Pos);
if (VNI == nullptr)
return;
LI.removeValNo(VNI);
// Also remove the value in subranges.
for (LiveInterval::SubRange &S : LI.subranges()) {
if (VNInfo *SVNI = S.getVNInfoAt(Pos))
S.removeValNo(SVNI);
}
LI.removeEmptySubRanges();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.