Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveInterval.cpp | //===-- LiveInterval.cpp - Live Interval Representation -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveRange and LiveInterval classes. Given some
// numbering of each the machine instructions an interval [i, j) is said to be a
// live range for register v if there is no instruction with number j' >= j
// such that v is live at j' and there is no instruction with number i' < i such
// that v is live at i'. In this implementation ranges can have holes,
// i.e. a range might look like [1,20), [50,65), [1000,1001). Each
// individual segment is represented as an instance of LiveRange::Segment,
// and the whole range is represented as an instance of LiveRange.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveInterval.h"
#include "RegisterCoalescer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <algorithm>
using namespace llvm;
namespace {
//===----------------------------------------------------------------------===//
// Implementation of various methods necessary for calculation of live ranges.
// The implementation of the methods abstracts from the concrete type of the
// segment collection.
//
// Implementation of the class follows the Template design pattern. The base
// class contains generic algorithms that call collection-specific methods,
// which are provided in concrete subclasses. In order to avoid virtual calls
// these methods are provided by means of C++ template instantiation.
// The base class calls the methods of the subclass through method impl(),
// which casts 'this' pointer to the type of the subclass.
//
//===----------------------------------------------------------------------===//
template <typename ImplT, typename IteratorT, typename CollectionT>
class CalcLiveRangeUtilBase {
protected:
LiveRange *LR;
protected:
CalcLiveRangeUtilBase(LiveRange *LR) : LR(LR) {}
public:
typedef LiveRange::Segment Segment;
typedef IteratorT iterator;
VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator) {
assert(!Def.isDead() && "Cannot define a value at the dead slot");
iterator I = impl().find(Def);
if (I == segments().end()) {
VNInfo *VNI = LR->getNextValue(Def, VNInfoAllocator);
impl().insertAtEnd(Segment(Def, Def.getDeadSlot(), VNI));
return VNI;
}
Segment *S = segmentAt(I);
if (SlotIndex::isSameInstr(Def, S->start)) {
assert(S->valno->def == S->start && "Inconsistent existing value def");
// It is possible to have both normal and early-clobber defs of the same
// register on an instruction. It doesn't make a lot of sense, but it is
// possible to specify in inline assembly.
//
// Just convert everything to early-clobber.
Def = std::min(Def, S->start);
if (Def != S->start)
S->start = S->valno->def = Def;
return S->valno;
}
assert(SlotIndex::isEarlierInstr(Def, S->start) && "Already live at def");
VNInfo *VNI = LR->getNextValue(Def, VNInfoAllocator);
segments().insert(I, Segment(Def, Def.getDeadSlot(), VNI));
return VNI;
}
VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Use) {
if (segments().empty())
return nullptr;
iterator I =
impl().findInsertPos(Segment(Use.getPrevSlot(), Use, nullptr));
if (I == segments().begin())
return nullptr;
--I;
if (I->end <= StartIdx)
return nullptr;
if (I->end < Use)
extendSegmentEndTo(I, Use);
return I->valno;
}
/// This method is used when we want to extend the segment specified
/// by I to end at the specified endpoint. To do this, we should
/// merge and eliminate all segments that this will overlap
/// with. The iterator is not invalidated.
void extendSegmentEndTo(iterator I, SlotIndex NewEnd) {
assert(I != segments().end() && "Not a valid segment!");
Segment *S = segmentAt(I);
VNInfo *ValNo = I->valno;
// Search for the first segment that we can't merge with.
iterator MergeTo = std::next(I);
for (; MergeTo != segments().end() && NewEnd >= MergeTo->end; ++MergeTo)
assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
// If NewEnd was in the middle of a segment, make sure to get its endpoint.
S->end = std::max(NewEnd, std::prev(MergeTo)->end);
// If the newly formed segment now touches the segment after it and if they
// have the same value number, merge the two segments into one segment.
if (MergeTo != segments().end() && MergeTo->start <= I->end &&
MergeTo->valno == ValNo) {
S->end = MergeTo->end;
++MergeTo;
}
// Erase any dead segments.
segments().erase(std::next(I), MergeTo);
}
/// This method is used when we want to extend the segment specified
/// by I to start at the specified endpoint. To do this, we should
/// merge and eliminate all segments that this will overlap with.
iterator extendSegmentStartTo(iterator I, SlotIndex NewStart) {
assert(I != segments().end() && "Not a valid segment!");
Segment *S = segmentAt(I);
VNInfo *ValNo = I->valno;
// Search for the first segment that we can't merge with.
iterator MergeTo = I;
do {
if (MergeTo == segments().begin()) {
S->start = NewStart;
segments().erase(MergeTo, I);
return I;
}
assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
--MergeTo;
} while (NewStart <= MergeTo->start);
// If we start in the middle of another segment, just delete a range and
// extend that segment.
if (MergeTo->end >= NewStart && MergeTo->valno == ValNo) {
segmentAt(MergeTo)->end = S->end;
} else {
// Otherwise, extend the segment right after.
++MergeTo;
Segment *MergeToSeg = segmentAt(MergeTo);
MergeToSeg->start = NewStart;
MergeToSeg->end = S->end;
}
segments().erase(std::next(MergeTo), std::next(I));
return MergeTo;
}
iterator addSegment(Segment S) {
SlotIndex Start = S.start, End = S.end;
iterator I = impl().findInsertPos(S);
// If the inserted segment starts in the middle or right at the end of
// another segment, just extend that segment to contain the segment of S.
if (I != segments().begin()) {
iterator B = std::prev(I);
if (S.valno == B->valno) {
if (B->start <= Start && B->end >= Start) {
extendSegmentEndTo(B, End);
return B;
}
} else {
// Check to make sure that we are not overlapping two live segments with
// different valno's.
assert(B->end <= Start &&
"Cannot overlap two segments with differing ValID's"
" (did you def the same reg twice in a MachineInstr?)");
}
}
// Otherwise, if this segment ends in the middle of, or right next
// to, another segment, merge it into that segment.
if (I != segments().end()) {
if (S.valno == I->valno) {
if (I->start <= End) {
I = extendSegmentStartTo(I, Start);
// If S is a complete superset of a segment, we may need to grow its
// endpoint as well.
if (End > I->end)
extendSegmentEndTo(I, End);
return I;
}
} else {
// Check to make sure that we are not overlapping two live segments with
// different valno's.
assert(I->start >= End &&
"Cannot overlap two segments with differing ValID's");
}
}
// Otherwise, this is just a new segment that doesn't interact with
// anything.
// Insert it.
return segments().insert(I, S);
}
private:
ImplT &impl() { return *static_cast<ImplT *>(this); }
CollectionT &segments() { return impl().segmentsColl(); }
Segment *segmentAt(iterator I) { return const_cast<Segment *>(&(*I)); }
};
//===----------------------------------------------------------------------===//
// Instantiation of the methods for calculation of live ranges
// based on a segment vector.
//===----------------------------------------------------------------------===//
class CalcLiveRangeUtilVector;
typedef CalcLiveRangeUtilBase<CalcLiveRangeUtilVector, LiveRange::iterator,
LiveRange::Segments> CalcLiveRangeUtilVectorBase;
class CalcLiveRangeUtilVector : public CalcLiveRangeUtilVectorBase {
public:
CalcLiveRangeUtilVector(LiveRange *LR) : CalcLiveRangeUtilVectorBase(LR) {}
private:
friend CalcLiveRangeUtilVectorBase;
LiveRange::Segments &segmentsColl() { return LR->segments; }
void insertAtEnd(const Segment &S) { LR->segments.push_back(S); }
iterator find(SlotIndex Pos) { return LR->find(Pos); }
iterator findInsertPos(Segment S) {
return std::upper_bound(LR->begin(), LR->end(), S.start);
}
};
//===----------------------------------------------------------------------===//
// Instantiation of the methods for calculation of live ranges
// based on a segment set.
//===----------------------------------------------------------------------===//
class CalcLiveRangeUtilSet;
typedef CalcLiveRangeUtilBase<CalcLiveRangeUtilSet,
LiveRange::SegmentSet::iterator,
LiveRange::SegmentSet> CalcLiveRangeUtilSetBase;
class CalcLiveRangeUtilSet : public CalcLiveRangeUtilSetBase {
public:
CalcLiveRangeUtilSet(LiveRange *LR) : CalcLiveRangeUtilSetBase(LR) {}
private:
friend CalcLiveRangeUtilSetBase;
LiveRange::SegmentSet &segmentsColl() { return *LR->segmentSet; }
void insertAtEnd(const Segment &S) {
LR->segmentSet->insert(LR->segmentSet->end(), S);
}
iterator find(SlotIndex Pos) {
iterator I =
LR->segmentSet->upper_bound(Segment(Pos, Pos.getNextSlot(), nullptr));
if (I == LR->segmentSet->begin())
return I;
iterator PrevI = std::prev(I);
if (Pos < (*PrevI).end)
return PrevI;
return I;
}
iterator findInsertPos(Segment S) {
iterator I = LR->segmentSet->upper_bound(S);
if (I != LR->segmentSet->end() && !(S.start < *I))
++I;
return I;
}
};
} // namespace
//===----------------------------------------------------------------------===//
// LiveRange methods
//===----------------------------------------------------------------------===//
LiveRange::iterator LiveRange::find(SlotIndex Pos) {
// This algorithm is basically std::upper_bound.
// Unfortunately, std::upper_bound cannot be used with mixed types until we
// adopt C++0x. Many libraries can do it, but not all.
if (empty() || Pos >= endIndex())
return end();
iterator I = begin();
size_t Len = size();
do {
size_t Mid = Len >> 1;
if (Pos < I[Mid].end)
Len = Mid;
else
I += Mid + 1, Len -= Mid + 1;
} while (Len);
return I;
}
VNInfo *LiveRange::createDeadDef(SlotIndex Def,
VNInfo::Allocator &VNInfoAllocator) {
// Use the segment set, if it is available.
if (segmentSet != nullptr)
return CalcLiveRangeUtilSet(this).createDeadDef(Def, VNInfoAllocator);
// Otherwise use the segment vector.
return CalcLiveRangeUtilVector(this).createDeadDef(Def, VNInfoAllocator);
}
// overlaps - Return true if the intersection of the two live ranges is
// not empty.
//
// An example for overlaps():
//
// 0: A = ...
// 4: B = ...
// 8: C = A + B ;; last use of A
//
// The live ranges should look like:
//
// A = [3, 11)
// B = [7, x)
// C = [11, y)
//
// A->overlaps(C) should return false since we want to be able to join
// A and C.
//
bool LiveRange::overlapsFrom(const LiveRange& other,
const_iterator StartPos) const {
assert(!empty() && "empty range");
const_iterator i = begin();
const_iterator ie = end();
const_iterator j = StartPos;
const_iterator je = other.end();
assert((StartPos->start <= i->start || StartPos == other.begin()) &&
StartPos != other.end() && "Bogus start position hint!");
if (i->start < j->start) {
i = std::upper_bound(i, ie, j->start);
if (i != begin()) --i;
} else if (j->start < i->start) {
++StartPos;
if (StartPos != other.end() && StartPos->start <= i->start) {
assert(StartPos < other.end() && i < end());
j = std::upper_bound(j, je, i->start);
if (j != other.begin()) --j;
}
} else {
return true;
}
if (j == je) return false;
while (i != ie) {
if (i->start > j->start) {
std::swap(i, j);
std::swap(ie, je);
}
if (i->end > j->start)
return true;
++i;
}
return false;
}
bool LiveRange::overlaps(const LiveRange &Other, const CoalescerPair &CP,
const SlotIndexes &Indexes) const {
assert(!empty() && "empty range");
if (Other.empty())
return false;
// Use binary searches to find initial positions.
const_iterator I = find(Other.beginIndex());
const_iterator IE = end();
if (I == IE)
return false;
const_iterator J = Other.find(I->start);
const_iterator JE = Other.end();
if (J == JE)
return false;
for (;;) {
// J has just been advanced to satisfy:
assert(J->end >= I->start);
// Check for an overlap.
if (J->start < I->end) {
// I and J are overlapping. Find the later start.
SlotIndex Def = std::max(I->start, J->start);
// Allow the overlap if Def is a coalescable copy.
if (Def.isBlock() ||
!CP.isCoalescable(Indexes.getInstructionFromIndex(Def)))
return true;
}
// Advance the iterator that ends first to check for more overlaps.
if (J->end > I->end) {
std::swap(I, J);
std::swap(IE, JE);
}
// Advance J until J->end >= I->start.
do
if (++J == JE)
return false;
while (J->end < I->start);
}
}
/// overlaps - Return true if the live range overlaps an interval specified
/// by [Start, End).
bool LiveRange::overlaps(SlotIndex Start, SlotIndex End) const {
assert(Start < End && "Invalid range");
const_iterator I = std::lower_bound(begin(), end(), End);
return I != begin() && (--I)->end > Start;
}
bool LiveRange::covers(const LiveRange &Other) const {
if (empty())
return Other.empty();
const_iterator I = begin();
for (const Segment &O : Other.segments) {
I = advanceTo(I, O.start);
if (I == end() || I->start > O.start)
return false;
// Check adjacent live segments and see if we can get behind O.end.
while (I->end < O.end) {
const_iterator Last = I;
// Get next segment and abort if it was not adjacent.
++I;
if (I == end() || Last->end != I->start)
return false;
}
}
return true;
}
/// ValNo is dead, remove it. If it is the largest value number, just nuke it
/// (and any other deleted values neighboring it), otherwise mark it as ~1U so
/// it can be nuked later.
void LiveRange::markValNoForDeletion(VNInfo *ValNo) {
if (ValNo->id == getNumValNums()-1) {
do {
valnos.pop_back();
} while (!valnos.empty() && valnos.back()->isUnused());
} else {
ValNo->markUnused();
}
}
/// RenumberValues - Renumber all values in order of appearance and delete the
/// remaining unused values.
void LiveRange::RenumberValues() {
SmallPtrSet<VNInfo*, 8> Seen;
valnos.clear();
for (const Segment &S : segments) {
VNInfo *VNI = S.valno;
if (!Seen.insert(VNI).second)
continue;
assert(!VNI->isUnused() && "Unused valno used by live segment");
VNI->id = (unsigned)valnos.size();
valnos.push_back(VNI);
}
}
void LiveRange::addSegmentToSet(Segment S) {
CalcLiveRangeUtilSet(this).addSegment(S);
}
LiveRange::iterator LiveRange::addSegment(Segment S) {
// Use the segment set, if it is available.
if (segmentSet != nullptr) {
addSegmentToSet(S);
return end();
}
// Otherwise use the segment vector.
return CalcLiveRangeUtilVector(this).addSegment(S);
}
void LiveRange::append(const Segment S) {
// Check that the segment belongs to the back of the list.
assert(segments.empty() || segments.back().end <= S.start);
segments.push_back(S);
}
/// extendInBlock - If this range is live before Kill in the basic
/// block that starts at StartIdx, extend it to be live up to Kill and return
/// the value. If there is no live range before Kill, return NULL.
VNInfo *LiveRange::extendInBlock(SlotIndex StartIdx, SlotIndex Kill) {
// Use the segment set, if it is available.
if (segmentSet != nullptr)
return CalcLiveRangeUtilSet(this).extendInBlock(StartIdx, Kill);
// Otherwise use the segment vector.
return CalcLiveRangeUtilVector(this).extendInBlock(StartIdx, Kill);
}
/// Remove the specified segment from this range. Note that the segment must
/// be in a single Segment in its entirety.
void LiveRange::removeSegment(SlotIndex Start, SlotIndex End,
bool RemoveDeadValNo) {
// Find the Segment containing this span.
iterator I = find(Start);
assert(I != end() && "Segment is not in range!");
assert(I->containsInterval(Start, End)
&& "Segment is not entirely in range!");
// If the span we are removing is at the start of the Segment, adjust it.
VNInfo *ValNo = I->valno;
if (I->start == Start) {
if (I->end == End) {
if (RemoveDeadValNo) {
// Check if val# is dead.
bool isDead = true;
for (const_iterator II = begin(), EE = end(); II != EE; ++II)
if (II != I && II->valno == ValNo) {
isDead = false;
break;
}
if (isDead) {
// Now that ValNo is dead, remove it.
markValNoForDeletion(ValNo);
}
}
segments.erase(I); // Removed the whole Segment.
} else
I->start = End;
return;
}
// Otherwise if the span we are removing is at the end of the Segment,
// adjust the other way.
if (I->end == End) {
I->end = Start;
return;
}
// Otherwise, we are splitting the Segment into two pieces.
SlotIndex OldEnd = I->end;
I->end = Start; // Trim the old segment.
// Insert the new one.
segments.insert(std::next(I), Segment(End, OldEnd, ValNo));
}
/// removeValNo - Remove all the segments defined by the specified value#.
/// Also remove the value# from value# list.
void LiveRange::removeValNo(VNInfo *ValNo) {
if (empty()) return;
segments.erase(std::remove_if(begin(), end(), [ValNo](const Segment &S) {
return S.valno == ValNo;
}), end());
// Now that ValNo is dead, remove it.
markValNoForDeletion(ValNo);
}
void LiveRange::join(LiveRange &Other,
const int *LHSValNoAssignments,
const int *RHSValNoAssignments,
SmallVectorImpl<VNInfo *> &NewVNInfo) {
verify();
// Determine if any of our values are mapped. This is uncommon, so we want
// to avoid the range scan if not.
bool MustMapCurValNos = false;
unsigned NumVals = getNumValNums();
unsigned NumNewVals = NewVNInfo.size();
for (unsigned i = 0; i != NumVals; ++i) {
unsigned LHSValID = LHSValNoAssignments[i];
if (i != LHSValID ||
(NewVNInfo[LHSValID] && NewVNInfo[LHSValID] != getValNumInfo(i))) {
MustMapCurValNos = true;
break;
}
}
// If we have to apply a mapping to our base range assignment, rewrite it now.
if (MustMapCurValNos && !empty()) {
// Map the first live range.
iterator OutIt = begin();
OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
for (iterator I = std::next(OutIt), E = end(); I != E; ++I) {
VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
assert(nextValNo && "Huh?");
// If this live range has the same value # as its immediate predecessor,
// and if they are neighbors, remove one Segment. This happens when we
// have [0,4:0)[4,7:1) and map 0/1 onto the same value #.
if (OutIt->valno == nextValNo && OutIt->end == I->start) {
OutIt->end = I->end;
} else {
// Didn't merge. Move OutIt to the next segment,
++OutIt;
OutIt->valno = nextValNo;
if (OutIt != I) {
OutIt->start = I->start;
OutIt->end = I->end;
}
}
}
// If we merge some segments, chop off the end.
++OutIt;
segments.erase(OutIt, end());
}
// Rewrite Other values before changing the VNInfo ids.
// This can leave Other in an invalid state because we're not coalescing
// touching segments that now have identical values. That's OK since Other is
// not supposed to be valid after calling join();
for (Segment &S : Other.segments)
S.valno = NewVNInfo[RHSValNoAssignments[S.valno->id]];
// Update val# info. Renumber them and make sure they all belong to this
// LiveRange now. Also remove dead val#'s.
unsigned NumValNos = 0;
for (unsigned i = 0; i < NumNewVals; ++i) {
VNInfo *VNI = NewVNInfo[i];
if (VNI) {
if (NumValNos >= NumVals)
valnos.push_back(VNI);
else
valnos[NumValNos] = VNI;
VNI->id = NumValNos++; // Renumber val#.
}
}
if (NumNewVals < NumVals)
valnos.resize(NumNewVals); // shrinkify
// Okay, now insert the RHS live segments into the LHS.
LiveRangeUpdater Updater(this);
for (Segment &S : Other.segments)
Updater.add(S);
}
/// Merge all of the segments in RHS into this live range as the specified
/// value number. The segments in RHS are allowed to overlap with segments in
/// the current range, but only if the overlapping segments have the
/// specified value number.
void LiveRange::MergeSegmentsInAsValue(const LiveRange &RHS,
VNInfo *LHSValNo) {
LiveRangeUpdater Updater(this);
for (const Segment &S : RHS.segments)
Updater.add(S.start, S.end, LHSValNo);
}
/// MergeValueInAsValue - Merge all of the live segments of a specific val#
/// in RHS into this live range as the specified value number.
/// The segments in RHS are allowed to overlap with segments in the
/// current range, it will replace the value numbers of the overlaped
/// segments with the specified value number.
void LiveRange::MergeValueInAsValue(const LiveRange &RHS,
const VNInfo *RHSValNo,
VNInfo *LHSValNo) {
LiveRangeUpdater Updater(this);
for (const Segment &S : RHS.segments)
if (S.valno == RHSValNo)
Updater.add(S.start, S.end, LHSValNo);
}
/// MergeValueNumberInto - This method is called when two value nubmers
/// are found to be equivalent. This eliminates V1, replacing all
/// segments with the V1 value number with the V2 value number. This can
/// cause merging of V1/V2 values numbers and compaction of the value space.
VNInfo *LiveRange::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
assert(V1 != V2 && "Identical value#'s are always equivalent!");
// This code actually merges the (numerically) larger value number into the
// smaller value number, which is likely to allow us to compactify the value
// space. The only thing we have to be careful of is to preserve the
// instruction that defines the result value.
// Make sure V2 is smaller than V1.
if (V1->id < V2->id) {
V1->copyFrom(*V2);
std::swap(V1, V2);
}
// Merge V1 segments into V2.
for (iterator I = begin(); I != end(); ) {
iterator S = I++;
if (S->valno != V1) continue; // Not a V1 Segment.
// Okay, we found a V1 live range. If it had a previous, touching, V2 live
// range, extend it.
if (S != begin()) {
iterator Prev = S-1;
if (Prev->valno == V2 && Prev->end == S->start) {
Prev->end = S->end;
// Erase this live-range.
segments.erase(S);
I = Prev+1;
S = Prev;
}
}
// Okay, now we have a V1 or V2 live range that is maximally merged forward.
// Ensure that it is a V2 live-range.
S->valno = V2;
// If we can merge it into later V2 segments, do so now. We ignore any
// following V1 segments, as they will be merged in subsequent iterations
// of the loop.
if (I != end()) {
if (I->start == S->end && I->valno == V2) {
S->end = I->end;
segments.erase(I);
I = S+1;
}
}
}
// Now that V1 is dead, remove it.
markValNoForDeletion(V1);
return V2;
}
void LiveRange::flushSegmentSet() {
assert(segmentSet != nullptr && "segment set must have been created");
assert(
segments.empty() &&
"segment set can be used only initially before switching to the array");
segments.append(segmentSet->begin(), segmentSet->end());
segmentSet = nullptr;
verify();
}
void LiveInterval::freeSubRange(SubRange *S) {
S->~SubRange();
// Memory was allocated with BumpPtr allocator and is not freed here.
}
void LiveInterval::removeEmptySubRanges() {
SubRange **NextPtr = &SubRanges;
SubRange *I = *NextPtr;
while (I != nullptr) {
if (!I->empty()) {
NextPtr = &I->Next;
I = *NextPtr;
continue;
}
// Skip empty subranges until we find the first nonempty one.
do {
SubRange *Next = I->Next;
freeSubRange(I);
I = Next;
} while (I != nullptr && I->empty());
*NextPtr = I;
}
}
void LiveInterval::clearSubRanges() {
for (SubRange *I = SubRanges, *Next; I != nullptr; I = Next) {
Next = I->Next;
freeSubRange(I);
}
SubRanges = nullptr;
}
/// Helper function for constructMainRangeFromSubranges(): Search the CFG
/// backwards until we find a place covered by a LiveRange segment that actually
/// has a valno set.
static VNInfo *searchForVNI(const SlotIndexes &Indexes, LiveRange &LR,
const MachineBasicBlock *MBB,
SmallPtrSetImpl<const MachineBasicBlock*> &Visited) {
// We start the search at the end of MBB.
SlotIndex EndIdx = Indexes.getMBBEndIdx(MBB);
// In our use case we can't live the area covered by the live segments without
// finding an actual VNI def.
LiveRange::iterator I = LR.find(EndIdx.getPrevSlot());
assert(I != LR.end());
LiveRange::Segment &S = *I;
if (S.valno != nullptr)
return S.valno;
VNInfo *VNI = nullptr;
// Continue at predecessors (we could even go to idom with domtree available).
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
// Avoid going in circles.
if (!Visited.insert(Pred).second)
continue;
VNI = searchForVNI(Indexes, LR, Pred, Visited);
if (VNI != nullptr) {
S.valno = VNI;
break;
}
}
return VNI;
}
static void determineMissingVNIs(const SlotIndexes &Indexes, LiveInterval &LI) {
SmallPtrSet<const MachineBasicBlock*, 5> Visited;
LiveRange::iterator OutIt;
VNInfo *PrevValNo = nullptr;
for (LiveRange::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
LiveRange::Segment &S = *I;
// Determine final VNI if necessary.
if (S.valno == nullptr) {
// This can only happen at the begin of a basic block.
assert(S.start.isBlock() && "valno should only be missing at block begin");
Visited.clear();
const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(S.start);
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
VNInfo *VNI = searchForVNI(Indexes, LI, Pred, Visited);
if (VNI != nullptr) {
S.valno = VNI;
break;
}
}
assert(S.valno != nullptr && "could not determine valno");
}
// Merge with previous segment if it has the same VNI.
if (PrevValNo == S.valno && OutIt->end == S.start) {
OutIt->end = S.end;
} else {
// Didn't merge. Move OutIt to next segment.
if (PrevValNo == nullptr)
OutIt = LI.begin();
else
++OutIt;
if (OutIt != I)
*OutIt = *I;
PrevValNo = S.valno;
}
}
// If we merged some segments chop off the end.
++OutIt;
LI.segments.erase(OutIt, LI.end());
}
void LiveInterval::constructMainRangeFromSubranges(
const SlotIndexes &Indexes, VNInfo::Allocator &VNIAllocator) {
// The basic observations on which this algorithm is based:
// - Each Def/ValNo in a subrange must have a corresponding def on the main
// range, but not further defs/valnos are necessary.
// - If any of the subranges is live at a point the main liverange has to be
// live too, conversily if no subrange is live the main range mustn't be
// live either.
// We do this by scannig through all the subranges simultaneously creating new
// segments in the main range as segments start/ends come up in the subranges.
assert(hasSubRanges() && "expected subranges to be present");
assert(segments.empty() && valnos.empty() && "expected empty main range");
// Collect subrange, iterator pairs for the walk and determine first and last
// SlotIndex involved.
SmallVector<std::pair<const SubRange*, const_iterator>, 4> SRs;
SlotIndex First;
SlotIndex Last;
for (const SubRange &SR : subranges()) {
if (SR.empty())
continue;
SRs.push_back(std::make_pair(&SR, SR.begin()));
if (!First.isValid() || SR.segments.front().start < First)
First = SR.segments.front().start;
if (!Last.isValid() || SR.segments.back().end > Last)
Last = SR.segments.back().end;
}
// Walk over all subranges simultaneously.
Segment CurrentSegment;
bool ConstructingSegment = false;
bool NeedVNIFixup = false;
unsigned ActiveMask = 0;
SlotIndex Pos = First;
while (true) {
SlotIndex NextPos = Last;
enum {
NOTHING,
BEGIN_SEGMENT,
END_SEGMENT,
} Event = NOTHING;
// Which subregister lanes are affected by the current event.
unsigned EventMask = 0;
// Whether a BEGIN_SEGMENT is also a valno definition point.
bool IsDef = false;
// Find the next begin or end of a subrange segment. Combine masks if we
// have multiple begins/ends at the same position. Ends take precedence over
// Begins.
for (auto &SRP : SRs) {
const SubRange &SR = *SRP.first;
const_iterator &I = SRP.second;
// Advance iterator of subrange to a segment involving Pos; the earlier
// segments are already merged at this point.
while (I != SR.end() &&
(I->end < Pos ||
(I->end == Pos && (ActiveMask & SR.LaneMask) == 0)))
++I;
if (I == SR.end())
continue;
if ((ActiveMask & SR.LaneMask) == 0 &&
Pos <= I->start && I->start <= NextPos) {
// Merge multiple begins at the same position.
if (I->start == NextPos && Event == BEGIN_SEGMENT) {
EventMask |= SR.LaneMask;
IsDef |= I->valno->def == I->start;
} else if (I->start < NextPos || Event != END_SEGMENT) {
Event = BEGIN_SEGMENT;
NextPos = I->start;
EventMask = SR.LaneMask;
IsDef = I->valno->def == I->start;
}
}
if ((ActiveMask & SR.LaneMask) != 0 &&
Pos <= I->end && I->end <= NextPos) {
// Merge multiple ends at the same position.
if (I->end == NextPos && Event == END_SEGMENT)
EventMask |= SR.LaneMask;
else {
Event = END_SEGMENT;
NextPos = I->end;
EventMask = SR.LaneMask;
}
}
}
// Advance scan position.
Pos = NextPos;
if (Event == BEGIN_SEGMENT) {
if (ConstructingSegment && IsDef) {
// Finish previous segment because we have to start a new one.
CurrentSegment.end = Pos;
append(CurrentSegment);
ConstructingSegment = false;
}
// Start a new segment if necessary.
if (!ConstructingSegment) {
// Determine value number for the segment.
VNInfo *VNI;
if (IsDef) {
VNI = getNextValue(Pos, VNIAllocator);
} else {
// We have to reuse an existing value number, if we are lucky
// then we already passed one of the predecessor blocks and determined
// its value number (with blocks in reverse postorder this would be
// always true but we have no such guarantee).
assert(Pos.isBlock());
const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Pos);
// See if any of the predecessor blocks has a lower number and a VNI
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
SlotIndex PredEnd = Indexes.getMBBEndIdx(Pred);
VNI = getVNInfoBefore(PredEnd);
if (VNI != nullptr)
break;
}
// Def will come later: We have to do an extra fixup pass.
if (VNI == nullptr)
NeedVNIFixup = true;
}
// In rare cases we can produce adjacent segments with the same value
// number (if they come from different subranges, but happen to have
// the same defining instruction). VNIFixup will fix those cases.
if (!empty() && segments.back().end == Pos &&
segments.back().valno == VNI)
NeedVNIFixup = true;
CurrentSegment.start = Pos;
CurrentSegment.valno = VNI;
ConstructingSegment = true;
}
ActiveMask |= EventMask;
} else if (Event == END_SEGMENT) {
assert(ConstructingSegment);
// Finish segment if no lane is active anymore.
ActiveMask &= ~EventMask;
if (ActiveMask == 0) {
CurrentSegment.end = Pos;
append(CurrentSegment);
ConstructingSegment = false;
}
} else {
// We reached the end of the last subranges and can stop.
assert(Event == NOTHING);
break;
}
}
// We might not be able to assign new valnos for all segments if the basic
// block containing the definition comes after a segment using the valno.
// Do a fixup pass for this uncommon case.
if (NeedVNIFixup)
determineMissingVNIs(Indexes, *this);
assert(ActiveMask == 0 && !ConstructingSegment && "all segments ended");
verify();
}
unsigned LiveInterval::getSize() const {
unsigned Sum = 0;
for (const Segment &S : segments)
Sum += S.start.distance(S.end);
return Sum;
}
raw_ostream& llvm::operator<<(raw_ostream& os, const LiveRange::Segment &S) {
return os << '[' << S.start << ',' << S.end << ':' << S.valno->id << ")";
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveRange::Segment::dump() const {
dbgs() << *this << "\n";
}
#endif
void LiveRange::print(raw_ostream &OS) const {
if (empty())
OS << "EMPTY";
else {
for (const Segment &S : segments) {
OS << S;
assert(S.valno == getValNumInfo(S.valno->id) && "Bad VNInfo");
}
}
// Print value number info.
if (getNumValNums()) {
OS << " ";
unsigned vnum = 0;
for (const_vni_iterator i = vni_begin(), e = vni_end(); i != e;
++i, ++vnum) {
const VNInfo *vni = *i;
if (vnum) OS << " ";
OS << vnum << "@";
if (vni->isUnused()) {
OS << "x";
} else {
OS << vni->def;
if (vni->isPHIDef())
OS << "-phi";
}
}
}
}
void LiveInterval::print(raw_ostream &OS) const {
OS << PrintReg(reg) << ' ';
super::print(OS);
// Print subranges
for (const SubRange &SR : subranges()) {
OS << format(" L%04X ", SR.LaneMask) << SR;
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveRange::dump() const {
dbgs() << *this << "\n";
}
void LiveInterval::dump() const {
dbgs() << *this << "\n";
}
#endif
#ifndef NDEBUG
void LiveRange::verify() const {
for (const_iterator I = begin(), E = end(); I != E; ++I) {
assert(I->start.isValid());
assert(I->end.isValid());
assert(I->start < I->end);
assert(I->valno != nullptr);
assert(I->valno->id < valnos.size());
assert(I->valno == valnos[I->valno->id]);
if (std::next(I) != E) {
assert(I->end <= std::next(I)->start);
if (I->end == std::next(I)->start)
assert(I->valno != std::next(I)->valno);
}
}
}
void LiveInterval::verify(const MachineRegisterInfo *MRI) const {
super::verify();
// Make sure SubRanges are fine and LaneMasks are disjunct.
unsigned Mask = 0;
unsigned MaxMask = MRI != nullptr ? MRI->getMaxLaneMaskForVReg(reg) : ~0u;
for (const SubRange &SR : subranges()) {
// Subrange lanemask should be disjunct to any previous subrange masks.
assert((Mask & SR.LaneMask) == 0);
Mask |= SR.LaneMask;
// subrange mask should not contained in maximum lane mask for the vreg.
assert((Mask & ~MaxMask) == 0);
SR.verify();
// Main liverange should cover subrange.
assert(covers(SR));
}
}
#endif
//===----------------------------------------------------------------------===//
// LiveRangeUpdater class
//===----------------------------------------------------------------------===//
//
// The LiveRangeUpdater class always maintains these invariants:
//
// - When LastStart is invalid, Spills is empty and the iterators are invalid.
// This is the initial state, and the state created by flush().
// In this state, isDirty() returns false.
//
// Otherwise, segments are kept in three separate areas:
//
// 1. [begin; WriteI) at the front of LR.
// 2. [ReadI; end) at the back of LR.
// 3. Spills.
//
// - LR.begin() <= WriteI <= ReadI <= LR.end().
// - Segments in all three areas are fully ordered and coalesced.
// - Segments in area 1 precede and can't coalesce with segments in area 2.
// - Segments in Spills precede and can't coalesce with segments in area 2.
// - No coalescing is possible between segments in Spills and segments in area
// 1, and there are no overlapping segments.
//
// The segments in Spills are not ordered with respect to the segments in area
// 1. They need to be merged.
//
// When they exist, Spills.back().start <= LastStart,
// and WriteI[-1].start <= LastStart.
void LiveRangeUpdater::print(raw_ostream &OS) const {
if (!isDirty()) {
if (LR)
OS << "Clean updater: " << *LR << '\n';
else
OS << "Null updater.\n";
return;
}
assert(LR && "Can't have null LR in dirty updater.");
OS << " updater with gap = " << (ReadI - WriteI)
<< ", last start = " << LastStart
<< ":\n Area 1:";
for (const auto &S : make_range(LR->begin(), WriteI))
OS << ' ' << S;
OS << "\n Spills:";
for (unsigned I = 0, E = Spills.size(); I != E; ++I)
OS << ' ' << Spills[I];
OS << "\n Area 2:";
for (const auto &S : make_range(ReadI, LR->end()))
OS << ' ' << S;
OS << '\n';
}
void LiveRangeUpdater::dump() const
{
print(errs());
}
// Determine if A and B should be coalesced.
static inline bool coalescable(const LiveRange::Segment &A,
const LiveRange::Segment &B) {
assert(A.start <= B.start && "Unordered live segments.");
if (A.end == B.start)
return A.valno == B.valno;
if (A.end < B.start)
return false;
assert(A.valno == B.valno && "Cannot overlap different values");
return true;
}
void LiveRangeUpdater::add(LiveRange::Segment Seg) {
assert(LR && "Cannot add to a null destination");
// Fall back to the regular add method if the live range
// is using the segment set instead of the segment vector.
if (LR->segmentSet != nullptr) {
LR->addSegmentToSet(Seg);
return;
}
// Flush the state if Start moves backwards.
if (!LastStart.isValid() || LastStart > Seg.start) {
if (isDirty())
flush();
// This brings us to an uninitialized state. Reinitialize.
assert(Spills.empty() && "Leftover spilled segments");
WriteI = ReadI = LR->begin();
}
// Remember start for next time.
LastStart = Seg.start;
// Advance ReadI until it ends after Seg.start.
LiveRange::iterator E = LR->end();
if (ReadI != E && ReadI->end <= Seg.start) {
// First try to close the gap between WriteI and ReadI with spills.
if (ReadI != WriteI)
mergeSpills();
// Then advance ReadI.
if (ReadI == WriteI)
ReadI = WriteI = LR->find(Seg.start);
else
while (ReadI != E && ReadI->end <= Seg.start)
*WriteI++ = *ReadI++;
}
assert(ReadI == E || ReadI->end > Seg.start);
// Check if the ReadI segment begins early.
if (ReadI != E && ReadI->start <= Seg.start) {
assert(ReadI->valno == Seg.valno && "Cannot overlap different values");
// Bail if Seg is completely contained in ReadI.
if (ReadI->end >= Seg.end)
return;
// Coalesce into Seg.
Seg.start = ReadI->start;
++ReadI;
}
// Coalesce as much as possible from ReadI into Seg.
while (ReadI != E && coalescable(Seg, *ReadI)) {
Seg.end = std::max(Seg.end, ReadI->end);
++ReadI;
}
// Try coalescing Spills.back() into Seg.
if (!Spills.empty() && coalescable(Spills.back(), Seg)) {
Seg.start = Spills.back().start;
Seg.end = std::max(Spills.back().end, Seg.end);
Spills.pop_back();
}
// Try coalescing Seg into WriteI[-1].
if (WriteI != LR->begin() && coalescable(WriteI[-1], Seg)) {
WriteI[-1].end = std::max(WriteI[-1].end, Seg.end);
return;
}
// Seg doesn't coalesce with anything, and needs to be inserted somewhere.
if (WriteI != ReadI) {
*WriteI++ = Seg;
return;
}
// Finally, append to LR or Spills.
if (WriteI == E) {
LR->segments.push_back(Seg);
WriteI = ReadI = LR->end();
} else
Spills.push_back(Seg);
}
// Merge as many spilled segments as possible into the gap between WriteI
// and ReadI. Advance WriteI to reflect the inserted instructions.
void LiveRangeUpdater::mergeSpills() {
// Perform a backwards merge of Spills and [SpillI;WriteI).
size_t GapSize = ReadI - WriteI;
size_t NumMoved = std::min(Spills.size(), GapSize);
LiveRange::iterator Src = WriteI;
LiveRange::iterator Dst = Src + NumMoved;
LiveRange::iterator SpillSrc = Spills.end();
LiveRange::iterator B = LR->begin();
// This is the new WriteI position after merging spills.
WriteI = Dst;
// Now merge Src and Spills backwards.
while (Src != Dst) {
if (Src != B && Src[-1].start > SpillSrc[-1].start)
*--Dst = *--Src;
else
*--Dst = *--SpillSrc;
}
assert(NumMoved == size_t(Spills.end() - SpillSrc));
Spills.erase(SpillSrc, Spills.end());
}
void LiveRangeUpdater::flush() {
if (!isDirty())
return;
// Clear the dirty state.
LastStart = SlotIndex();
assert(LR && "Cannot add to a null destination");
// Nothing to merge?
if (Spills.empty()) {
LR->segments.erase(WriteI, ReadI);
LR->verify();
return;
}
// Resize the WriteI - ReadI gap to match Spills.
size_t GapSize = ReadI - WriteI;
if (GapSize < Spills.size()) {
// The gap is too small. Make some room.
size_t WritePos = WriteI - LR->begin();
LR->segments.insert(ReadI, Spills.size() - GapSize, LiveRange::Segment());
// This also invalidated ReadI, but it is recomputed below.
WriteI = LR->begin() + WritePos;
} else {
// Shrink the gap if necessary.
LR->segments.erase(WriteI + Spills.size(), ReadI);
}
ReadI = WriteI + Spills.size();
mergeSpills();
LR->verify();
}
unsigned ConnectedVNInfoEqClasses::Classify(const LiveInterval *LI) {
// Create initial equivalence classes.
EqClass.clear();
EqClass.grow(LI->getNumValNums());
const VNInfo *used = nullptr, *unused = nullptr;
// Determine connections.
for (const VNInfo *VNI : LI->valnos) {
// Group all unused values into one class.
if (VNI->isUnused()) {
if (unused)
EqClass.join(unused->id, VNI->id);
unused = VNI;
continue;
}
used = VNI;
if (VNI->isPHIDef()) {
const MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
assert(MBB && "Phi-def has no defining MBB");
// Connect to values live out of predecessors.
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI)
if (const VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(*PI)))
EqClass.join(VNI->id, PVNI->id);
} else {
// Normal value defined by an instruction. Check for two-addr redef.
// FIXME: This could be coincidental. Should we really check for a tied
// operand constraint?
// Note that VNI->def may be a use slot for an early clobber def.
if (const VNInfo *UVNI = LI->getVNInfoBefore(VNI->def))
EqClass.join(VNI->id, UVNI->id);
}
}
// Lump all the unused values in with the last used value.
if (used && unused)
EqClass.join(used->id, unused->id);
EqClass.compress();
return EqClass.getNumClasses();
}
void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[],
MachineRegisterInfo &MRI) {
assert(LIV[0] && "LIV[0] must be set");
LiveInterval &LI = *LIV[0];
// Rewrite instructions.
for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg),
RE = MRI.reg_end(); RI != RE;) {
MachineOperand &MO = *RI;
MachineInstr *MI = RI->getParent();
++RI;
// DBG_VALUE instructions don't have slot indexes, so get the index of the
// instruction before them.
// Normally, DBG_VALUE instructions are removed before this function is
// called, but it is not a requirement.
SlotIndex Idx;
if (MI->isDebugValue())
Idx = LIS.getSlotIndexes()->getIndexBefore(MI);
else
Idx = LIS.getInstructionIndex(MI);
LiveQueryResult LRQ = LI.Query(Idx);
const VNInfo *VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined();
// In the case of an <undef> use that isn't tied to any def, VNI will be
// NULL. If the use is tied to a def, VNI will be the defined value.
if (!VNI)
continue;
MO.setReg(LIV[getEqClass(VNI)]->reg);
}
// Move runs to new intervals.
LiveInterval::iterator J = LI.begin(), E = LI.end();
while (J != E && EqClass[J->valno->id] == 0)
++J;
for (LiveInterval::iterator I = J; I != E; ++I) {
if (unsigned eq = EqClass[I->valno->id]) {
assert((LIV[eq]->empty() || LIV[eq]->expiredAt(I->start)) &&
"New intervals should be empty");
LIV[eq]->segments.push_back(*I);
} else
*J++ = *I;
}
// TODO: do not cheat anymore by simply cleaning all subranges
LI.clearSubRanges();
LI.segments.erase(J, E);
// Transfer VNInfos to their new owners and renumber them.
unsigned j = 0, e = LI.getNumValNums();
while (j != e && EqClass[j] == 0)
++j;
for (unsigned i = j; i != e; ++i) {
VNInfo *VNI = LI.getValNumInfo(i);
if (unsigned eq = EqClass[i]) {
VNI->id = LIV[eq]->getNumValNums();
LIV[eq]->valnos.push_back(VNI);
} else {
VNI->id = j;
LI.valnos[j++] = VNI;
}
}
LI.valnos.resize(j);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/SpillPlacement.h | //===-- SpillPlacement.h - Optimal Spill Code Placement --------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This analysis computes the optimal spill code placement between basic blocks.
//
// The runOnMachineFunction() method only precomputes some profiling information
// about the CFG. The real work is done by prepare(), addConstraints(), and
// finish() which are called by the register allocator.
//
// Given a variable that is live across multiple basic blocks, and given
// constraints on the basic blocks where the variable is live, determine which
// edge bundles should have the variable in a register and which edge bundles
// should have the variable in a stack slot.
//
// The returned bit vector can be used to place optimal spill code at basic
// block entries and exits. Spill code placement inside a basic block is not
// considered.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SPILLPLACEMENT_H
#define LLVM_LIB_CODEGEN_SPILLPLACEMENT_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/BlockFrequency.h"
namespace llvm {
class BitVector;
class EdgeBundles;
class MachineBasicBlock;
class MachineLoopInfo;
class MachineBlockFrequencyInfo;
class SpillPlacement : public MachineFunctionPass {
struct Node;
const MachineFunction *MF;
const EdgeBundles *bundles;
const MachineLoopInfo *loops;
const MachineBlockFrequencyInfo *MBFI;
Node *nodes;
// Nodes that are active in the current computation. Owned by the prepare()
// caller.
BitVector *ActiveNodes;
// Nodes with active links. Populated by scanActiveBundles.
SmallVector<unsigned, 8> Linked;
// Nodes that went positive during the last call to scanActiveBundles or
// iterate.
SmallVector<unsigned, 8> RecentPositive;
// Block frequencies are computed once. Indexed by block number.
SmallVector<BlockFrequency, 8> BlockFrequencies;
/// Decision threshold. A node gets the output value 0 if the weighted sum of
/// its inputs falls in the open interval (-Threshold;Threshold).
BlockFrequency Threshold;
public:
static char ID; // Pass identification, replacement for typeid.
SpillPlacement() : MachineFunctionPass(ID), nodes(nullptr) {}
~SpillPlacement() override { releaseMemory(); }
/// BorderConstraint - A basic block has separate constraints for entry and
/// exit.
enum BorderConstraint {
DontCare, ///< Block doesn't care / variable not live.
PrefReg, ///< Block entry/exit prefers a register.
PrefSpill, ///< Block entry/exit prefers a stack slot.
PrefBoth, ///< Block entry prefers both register and stack.
MustSpill ///< A register is impossible, variable must be spilled.
};
/// BlockConstraint - Entry and exit constraints for a basic block.
struct BlockConstraint {
unsigned Number; ///< Basic block number (from MBB::getNumber()).
BorderConstraint Entry : 8; ///< Constraint on block entry.
BorderConstraint Exit : 8; ///< Constraint on block exit.
/// True when this block changes the value of the live range. This means
/// the block has a non-PHI def. When this is false, a live-in value on
/// the stack can be live-out on the stack without inserting a spill.
bool ChangesValue;
};
/// prepare - Reset state and prepare for a new spill placement computation.
/// @param RegBundles Bit vector to receive the edge bundles where the
/// variable should be kept in a register. Each bit
/// corresponds to an edge bundle, a set bit means the
/// variable should be kept in a register through the
/// bundle. A clear bit means the variable should be
/// spilled. This vector is retained.
void prepare(BitVector &RegBundles);
/// addConstraints - Add constraints and biases. This method may be called
/// more than once to accumulate constraints.
/// @param LiveBlocks Constraints for blocks that have the variable live in or
/// live out.
void addConstraints(ArrayRef<BlockConstraint> LiveBlocks);
/// addPrefSpill - Add PrefSpill constraints to all blocks listed. This is
/// equivalent to calling addConstraint with identical BlockConstraints with
/// Entry = Exit = PrefSpill, and ChangesValue = false.
///
/// @param Blocks Array of block numbers that prefer to spill in and out.
/// @param Strong When true, double the negative bias for these blocks.
void addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong);
/// addLinks - Add transparent blocks with the given numbers.
void addLinks(ArrayRef<unsigned> Links);
/// scanActiveBundles - Perform an initial scan of all bundles activated by
/// addConstraints and addLinks, updating their state. Add all the bundles
/// that now prefer a register to RecentPositive.
/// Prepare internal data structures for iterate.
/// Return true is there are any positive nodes.
bool scanActiveBundles();
/// iterate - Update the network iteratively until convergence, or new bundles
/// are found.
void iterate();
/// getRecentPositive - Return an array of bundles that became positive during
/// the previous call to scanActiveBundles or iterate.
ArrayRef<unsigned> getRecentPositive() { return RecentPositive; }
/// finish - Compute the optimal spill code placement given the
/// constraints. No MustSpill constraints will be violated, and the smallest
/// possible number of PrefX constraints will be violated, weighted by
/// expected execution frequencies.
/// The selected bundles are returned in the bitvector passed to prepare().
/// @return True if a perfect solution was found, allowing the variable to be
/// in a register through all relevant bundles.
bool finish();
/// getBlockFrequency - Return the estimated block execution frequency per
/// function invocation.
BlockFrequency getBlockFrequency(unsigned Number) const {
return BlockFrequencies[Number];
}
private:
bool runOnMachineFunction(MachineFunction&) override;
void getAnalysisUsage(AnalysisUsage&) const override;
void releaseMemory() override;
void activate(unsigned);
void setThreshold(const BlockFrequency &Entry);
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TargetRegisterInfo.cpp | //===- TargetRegisterInfo.cpp - Target Register Information Implementation ===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the TargetRegisterInfo interface.
//
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
regclass_iterator RCB, regclass_iterator RCE,
const char *const *SRINames,
const unsigned *SRILaneMasks,
unsigned SRICoveringLanes)
: InfoDesc(ID), SubRegIndexNames(SRINames),
SubRegIndexLaneMasks(SRILaneMasks),
RegClassBegin(RCB), RegClassEnd(RCE),
CoveringLanes(SRICoveringLanes) {
}
TargetRegisterInfo::~TargetRegisterInfo() {}
void PrintReg::print(raw_ostream &OS) const {
if (!Reg)
OS << "%noreg";
else if (TargetRegisterInfo::isStackSlot(Reg))
OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
else if (TargetRegisterInfo::isVirtualRegister(Reg))
OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Reg);
else if (TRI && Reg < TRI->getNumRegs())
OS << '%' << TRI->getName(Reg);
else
OS << "%physreg" << Reg;
if (SubIdx) {
if (TRI)
OS << ':' << TRI->getSubRegIndexName(SubIdx);
else
OS << ":sub(" << SubIdx << ')';
}
}
void PrintRegUnit::print(raw_ostream &OS) const {
// Generic printout when TRI is missing.
if (!TRI) {
OS << "Unit~" << Unit;
return;
}
// Check for invalid register units.
if (Unit >= TRI->getNumRegUnits()) {
OS << "BadUnit~" << Unit;
return;
}
// Normal units have at least one root.
MCRegUnitRootIterator Roots(Unit, TRI);
assert(Roots.isValid() && "Unit has no roots.");
OS << TRI->getName(*Roots);
for (++Roots; Roots.isValid(); ++Roots)
OS << '~' << TRI->getName(*Roots);
}
void PrintVRegOrUnit::print(raw_ostream &OS) const {
if (TRI && TRI->isVirtualRegister(Unit)) {
OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Unit);
return;
}
PrintRegUnit::print(OS);
}
/// getAllocatableClass - Return the maximal subclass of the given register
/// class that is alloctable, or NULL.
const TargetRegisterClass *
TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
if (!RC || RC->isAllocatable())
return RC;
const unsigned *SubClass = RC->getSubClassMask();
for (unsigned Base = 0, BaseE = getNumRegClasses();
Base < BaseE; Base += 32) {
unsigned Idx = Base;
for (unsigned Mask = *SubClass++; Mask; Mask >>= 1) {
unsigned Offset = countTrailingZeros(Mask);
const TargetRegisterClass *SubRC = getRegClass(Idx + Offset);
if (SubRC->isAllocatable())
return SubRC;
Mask >>= Offset;
Idx += Offset + 1;
}
}
return nullptr;
}
/// getMinimalPhysRegClass - Returns the Register Class of a physical
/// register of the given type, picking the most sub register class of
/// the right type that contains this physreg.
const TargetRegisterClass *
TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, MVT VT) const {
assert(isPhysicalRegister(reg) && "reg must be a physical register");
// Pick the most sub register class of the right type that contains
// this physreg.
const TargetRegisterClass* BestRC = nullptr;
for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
const TargetRegisterClass* RC = *I;
if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
(!BestRC || BestRC->hasSubClass(RC)))
BestRC = RC;
}
assert(BestRC && "Couldn't find the register class");
return BestRC;
}
/// getAllocatableSetForRC - Toggle the bits that represent allocatable
/// registers for the specific register class.
static void getAllocatableSetForRC(const MachineFunction &MF,
const TargetRegisterClass *RC, BitVector &R){
assert(RC->isAllocatable() && "invalid for nonallocatable sets");
ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
for (unsigned i = 0; i != Order.size(); ++i)
R.set(Order[i]);
}
BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
const TargetRegisterClass *RC) const {
BitVector Allocatable(getNumRegs());
if (RC) {
// A register class with no allocatable subclass returns an empty set.
const TargetRegisterClass *SubClass = getAllocatableClass(RC);
if (SubClass)
getAllocatableSetForRC(MF, SubClass, Allocatable);
} else {
for (TargetRegisterInfo::regclass_iterator I = regclass_begin(),
E = regclass_end(); I != E; ++I)
if ((*I)->isAllocatable())
getAllocatableSetForRC(MF, *I, Allocatable);
}
// Mask out the reserved registers
BitVector Reserved = getReservedRegs(MF);
Allocatable &= Reserved.flip();
return Allocatable;
}
static inline
const TargetRegisterClass *firstCommonClass(const uint32_t *A,
const uint32_t *B,
const TargetRegisterInfo *TRI) {
for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
if (unsigned Common = *A++ & *B++)
return TRI->getRegClass(I + countTrailingZeros(Common));
return nullptr;
}
const TargetRegisterClass *
TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
const TargetRegisterClass *B) const {
// First take care of the trivial cases.
if (A == B)
return A;
if (!A || !B)
return nullptr;
// Register classes are ordered topologically, so the largest common
// sub-class it the common sub-class with the smallest ID.
return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this);
}
const TargetRegisterClass *
TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B,
unsigned Idx) const {
assert(A && B && "Missing register class");
assert(Idx && "Bad sub-register index");
// Find Idx in the list of super-register indices.
for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
if (RCI.getSubReg() == Idx)
// The bit mask contains all register classes that are projected into B
// by Idx. Find a class that is also a sub-class of A.
return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this);
return nullptr;
}
const TargetRegisterClass *TargetRegisterInfo::
getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
const TargetRegisterClass *RCB, unsigned SubB,
unsigned &PreA, unsigned &PreB) const {
assert(RCA && SubA && RCB && SubB && "Invalid arguments");
// Search all pairs of sub-register indices that project into RCA and RCB
// respectively. This is quadratic, but usually the sets are very small. On
// most targets like X86, there will only be a single sub-register index
// (e.g., sub_16bit projecting into GR16).
//
// The worst case is a register class like DPR on ARM.
// We have indices dsub_0..dsub_7 projecting into that class.
//
// It is very common that one register class is a sub-register of the other.
// Arrange for RCA to be the larger register so the answer will be found in
// the first iteration. This makes the search linear for the most common
// case.
const TargetRegisterClass *BestRC = nullptr;
unsigned *BestPreA = &PreA;
unsigned *BestPreB = &PreB;
if (RCA->getSize() < RCB->getSize()) {
std::swap(RCA, RCB);
std::swap(SubA, SubB);
std::swap(BestPreA, BestPreB);
}
// Also terminate the search one we have found a register class as small as
// RCA.
unsigned MinSize = RCA->getSize();
for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
// Check if a common super-register class exists for this index pair.
const TargetRegisterClass *RC =
firstCommonClass(IA.getMask(), IB.getMask(), this);
if (!RC || RC->getSize() < MinSize)
continue;
// The indexes must compose identically: PreA+SubA == PreB+SubB.
unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB);
if (FinalA != FinalB)
continue;
// Is RC a better candidate than BestRC?
if (BestRC && RC->getSize() >= BestRC->getSize())
continue;
// Yes, RC is the smallest super-register seen so far.
BestRC = RC;
*BestPreA = IA.getSubReg();
*BestPreB = IB.getSubReg();
// Bail early if we reached MinSize. We won't find a better candidate.
if (BestRC->getSize() == MinSize)
return BestRC;
}
}
return BestRC;
}
// Compute target-independent register allocator hints to help eliminate copies.
void
TargetRegisterInfo::getRegAllocationHints(unsigned VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
const VirtRegMap *VRM) const {
const MachineRegisterInfo &MRI = MF.getRegInfo();
std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg);
// Hints with HintType != 0 were set by target-dependent code.
// Such targets must provide their own implementation of
// TRI::getRegAllocationHints to interpret those hint types.
assert(Hint.first == 0 && "Target must implement TRI::getRegAllocationHints");
// Target-independent hints are either a physical or a virtual register.
unsigned Phys = Hint.second;
if (VRM && isVirtualRegister(Phys))
Phys = VRM->getPhys(Phys);
// Check that Phys is a valid hint in VirtReg's register class.
if (!isPhysicalRegister(Phys))
return;
if (MRI.isReserved(Phys))
return;
// Check that Phys is in the allocation order. We shouldn't heed hints
// from VirtReg's register class if they aren't in the allocation order. The
// target probably has a reason for removing the register.
if (std::find(Order.begin(), Order.end(), Phys) == Order.end())
return;
// All clear, tell the register allocator to prefer this register.
Hints.push_back(Phys);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void
TargetRegisterInfo::dumpReg(unsigned Reg, unsigned SubRegIndex,
const TargetRegisterInfo *TRI) {
dbgs() << PrintReg(Reg, TRI, SubRegIndex) << "\n";
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/GCMetadata.cpp | //===-- GCMetadata.cpp - Garbage collector metadata -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the GCFunctionInfo class and GCModuleInfo pass.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
class Printer : public FunctionPass {
static char ID;
raw_ostream &OS;
public:
explicit Printer(raw_ostream &OS) : FunctionPass(ID), OS(OS) {}
StringRef getPassName() const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
bool doFinalization(Module &M) override;
};
}
INITIALIZE_PASS(GCModuleInfo, "collector-metadata",
"Create Garbage Collector Module Metadata", false, false)
// -----------------------------------------------------------------------------
GCFunctionInfo::GCFunctionInfo(const Function &F, GCStrategy &S)
: F(F), S(S), FrameSize(~0LL) {}
GCFunctionInfo::~GCFunctionInfo() {}
// -----------------------------------------------------------------------------
char GCModuleInfo::ID = 0;
GCModuleInfo::GCModuleInfo() : ImmutablePass(ID) {
initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
}
GCFunctionInfo &GCModuleInfo::getFunctionInfo(const Function &F) {
assert(!F.isDeclaration() && "Can only get GCFunctionInfo for a definition!");
assert(F.hasGC());
finfo_map_type::iterator I = FInfoMap.find(&F);
if (I != FInfoMap.end())
return *I->second;
GCStrategy *S = getGCStrategy(F.getGC());
Functions.push_back(make_unique<GCFunctionInfo>(F, *S));
GCFunctionInfo *GFI = Functions.back().get();
FInfoMap[&F] = GFI;
return *GFI;
}
void GCModuleInfo::clear() {
Functions.clear();
FInfoMap.clear();
GCStrategyList.clear();
}
// -----------------------------------------------------------------------------
char Printer::ID = 0;
FunctionPass *llvm::createGCInfoPrinter(raw_ostream &OS) {
return new Printer(OS);
}
const char *Printer::getPassName() const {
return "Print Garbage Collector Information";
}
void Printer::getAnalysisUsage(AnalysisUsage &AU) const {
FunctionPass::getAnalysisUsage(AU);
AU.setPreservesAll();
AU.addRequired<GCModuleInfo>();
}
static const char *DescKind(GC::PointKind Kind) {
switch (Kind) {
case GC::PreCall:
return "pre-call";
case GC::PostCall:
return "post-call";
}
llvm_unreachable("Invalid point kind");
}
bool Printer::runOnFunction(Function &F) {
if (F.hasGC())
return false;
GCFunctionInfo *FD = &getAnalysis<GCModuleInfo>().getFunctionInfo(F);
OS << "GC roots for " << FD->getFunction().getName() << ":\n";
for (GCFunctionInfo::roots_iterator RI = FD->roots_begin(),
RE = FD->roots_end();
RI != RE; ++RI)
OS << "\t" << RI->Num << "\t" << RI->StackOffset << "[sp]\n";
OS << "GC safe points for " << FD->getFunction().getName() << ":\n";
for (GCFunctionInfo::iterator PI = FD->begin(), PE = FD->end(); PI != PE;
++PI) {
OS << "\t" << PI->Label->getName() << ": " << DescKind(PI->Kind)
<< ", live = {";
for (GCFunctionInfo::live_iterator RI = FD->live_begin(PI),
RE = FD->live_end(PI);
;) {
OS << " " << RI->Num;
if (++RI == RE)
break;
OS << ",";
}
OS << " }\n";
}
return false;
}
bool Printer::doFinalization(Module &M) {
GCModuleInfo *GMI = getAnalysisIfAvailable<GCModuleInfo>();
assert(GMI && "Printer didn't require GCModuleInfo?!");
GMI->clear();
return false;
}
GCStrategy *GCModuleInfo::getGCStrategy(const StringRef Name) {
// TODO: Arguably, just doing a linear search would be faster for small N
auto NMI = GCStrategyMap.find(Name);
if (NMI != GCStrategyMap.end())
return NMI->getValue();
for (auto& Entry : GCRegistry::entries()) {
if (Name == Entry.getName()) {
std::unique_ptr<GCStrategy> S = Entry.instantiate();
S->Name = Name;
GCStrategyMap[Name] = S.get();
GCStrategyList.push_back(std::move(S));
return GCStrategyList.back().get();
}
}
if (GCRegistry::begin() == GCRegistry::end()) {
// In normal operation, the registry should not be empty. There should
// be the builtin GCs if nothing else. The most likely scenario here is
// that we got here without running the initializers used by the Registry
// itself and it's registration mechanism.
const std::string error = ("unsupported GC: " + Name).str() +
" (did you remember to link and initialize the CodeGen library?)";
report_fatal_error(error);
} else
report_fatal_error(std::string("unsupported GC: ") + Name);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineInstr.cpp | //===-- lib/CodeGen/MachineInstr.cpp --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Methods common to all machine instructions.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
// MachineOperand Implementation
//===----------------------------------------------------------------------===//
void MachineOperand::setReg(unsigned Reg) {
if (getReg() == Reg) return; // No change.
// Otherwise, we have to change the register. If this operand is embedded
// into a machine function, we need to update the old and new register's
// use/def lists.
if (MachineInstr *MI = getParent())
if (MachineBasicBlock *MBB = MI->getParent())
if (MachineFunction *MF = MBB->getParent()) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.removeRegOperandFromUseList(this);
SmallContents.RegNo = Reg;
MRI.addRegOperandToUseList(this);
return;
}
// Otherwise, just change the register, no problem. :)
SmallContents.RegNo = Reg;
}
void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
const TargetRegisterInfo &TRI) {
assert(TargetRegisterInfo::isVirtualRegister(Reg));
if (SubIdx && getSubReg())
SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg());
setReg(Reg);
if (SubIdx)
setSubReg(SubIdx);
}
void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) {
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
if (getSubReg()) {
Reg = TRI.getSubReg(Reg, getSubReg());
// Note that getSubReg() may return 0 if the sub-register doesn't exist.
// That won't happen in legal code.
setSubReg(0);
}
setReg(Reg);
}
/// Change a def to a use, or a use to a def.
void MachineOperand::setIsDef(bool Val) {
assert(isReg() && "Wrong MachineOperand accessor");
assert((!Val || !isDebug()) && "Marking a debug operation as def");
if (IsDef == Val)
return;
// MRI may keep uses and defs in different list positions.
if (MachineInstr *MI = getParent())
if (MachineBasicBlock *MBB = MI->getParent())
if (MachineFunction *MF = MBB->getParent()) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.removeRegOperandFromUseList(this);
IsDef = Val;
MRI.addRegOperandToUseList(this);
return;
}
IsDef = Val;
}
// If this operand is currently a register operand, and if this is in a
// function, deregister the operand from the register's use/def list.
void MachineOperand::removeRegFromUses() {
if (!isReg() || !isOnRegUseList())
return;
if (MachineInstr *MI = getParent()) {
if (MachineBasicBlock *MBB = MI->getParent()) {
if (MachineFunction *MF = MBB->getParent())
MF->getRegInfo().removeRegOperandFromUseList(this);
}
}
}
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
void MachineOperand::ChangeToImmediate(int64_t ImmVal) {
assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm");
removeRegFromUses();
OpKind = MO_Immediate;
Contents.ImmVal = ImmVal;
}
void MachineOperand::ChangeToFPImmediate(const ConstantFP *FPImm) {
assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm");
removeRegFromUses();
OpKind = MO_FPImmediate;
Contents.CFP = FPImm;
}
void MachineOperand::ChangeToES(const char *SymName, unsigned char TargetFlags) {
assert((!isReg() || !isTied()) &&
"Cannot change a tied operand into an external symbol");
removeRegFromUses();
OpKind = MO_ExternalSymbol;
Contents.OffsetedInfo.Val.SymbolName = SymName;
setOffset(0); // Offset is always 0.
setTargetFlags(TargetFlags);
}
void MachineOperand::ChangeToMCSymbol(MCSymbol *Sym) {
assert((!isReg() || !isTied()) &&
"Cannot change a tied operand into an MCSymbol");
removeRegFromUses();
OpKind = MO_MCSymbol;
Contents.Sym = Sym;
}
/// ChangeToRegister - Replace this operand with a new register operand of
/// the specified value. If an operand is known to be an register already,
/// the setReg method should be used.
void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
bool isKill, bool isDead, bool isUndef,
bool isDebug) {
MachineRegisterInfo *RegInfo = nullptr;
if (MachineInstr *MI = getParent())
if (MachineBasicBlock *MBB = MI->getParent())
if (MachineFunction *MF = MBB->getParent())
RegInfo = &MF->getRegInfo();
// If this operand is already a register operand, remove it from the
// register's use/def lists.
bool WasReg = isReg();
if (RegInfo && WasReg)
RegInfo->removeRegOperandFromUseList(this);
// Change this to a register and set the reg#.
OpKind = MO_Register;
SmallContents.RegNo = Reg;
SubReg_TargetFlags = 0;
IsDef = isDef;
IsImp = isImp;
IsKill = isKill;
IsDead = isDead;
IsUndef = isUndef;
IsInternalRead = false;
IsEarlyClobber = false;
IsDebug = isDebug;
// Ensure isOnRegUseList() returns false.
Contents.Reg.Prev = nullptr;
// Preserve the tie when the operand was already a register.
if (!WasReg)
TiedTo = 0;
// If this operand is embedded in a function, add the operand to the
// register's use/def list.
if (RegInfo)
RegInfo->addRegOperandToUseList(this);
}
/// isIdenticalTo - Return true if this operand is identical to the specified
/// operand. Note that this should stay in sync with the hash_value overload
/// below.
bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
if (getType() != Other.getType() ||
getTargetFlags() != Other.getTargetFlags())
return false;
switch (getType()) {
case MachineOperand::MO_Register:
return getReg() == Other.getReg() && isDef() == Other.isDef() &&
getSubReg() == Other.getSubReg();
case MachineOperand::MO_Immediate:
return getImm() == Other.getImm();
case MachineOperand::MO_CImmediate:
return getCImm() == Other.getCImm();
case MachineOperand::MO_FPImmediate:
return getFPImm() == Other.getFPImm();
case MachineOperand::MO_MachineBasicBlock:
return getMBB() == Other.getMBB();
case MachineOperand::MO_FrameIndex:
return getIndex() == Other.getIndex();
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_TargetIndex:
return getIndex() == Other.getIndex() && getOffset() == Other.getOffset();
case MachineOperand::MO_JumpTableIndex:
return getIndex() == Other.getIndex();
case MachineOperand::MO_GlobalAddress:
return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset();
case MachineOperand::MO_ExternalSymbol:
return !strcmp(getSymbolName(), Other.getSymbolName()) &&
getOffset() == Other.getOffset();
case MachineOperand::MO_BlockAddress:
return getBlockAddress() == Other.getBlockAddress() &&
getOffset() == Other.getOffset();
case MachineOperand::MO_RegisterMask:
case MachineOperand::MO_RegisterLiveOut:
return getRegMask() == Other.getRegMask();
case MachineOperand::MO_MCSymbol:
return getMCSymbol() == Other.getMCSymbol();
case MachineOperand::MO_CFIIndex:
return getCFIIndex() == Other.getCFIIndex();
case MachineOperand::MO_Metadata:
return getMetadata() == Other.getMetadata();
}
llvm_unreachable("Invalid machine operand type");
}
// Note: this must stay exactly in sync with isIdenticalTo above.
hash_code llvm::hash_value(const MachineOperand &MO) {
switch (MO.getType()) {
case MachineOperand::MO_Register:
// Register operands don't have target flags.
return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
case MachineOperand::MO_Immediate:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
case MachineOperand::MO_CImmediate:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCImm());
case MachineOperand::MO_FPImmediate:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getFPImm());
case MachineOperand::MO_MachineBasicBlock:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMBB());
case MachineOperand::MO_FrameIndex:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_TargetIndex:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex(),
MO.getOffset());
case MachineOperand::MO_JumpTableIndex:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
case MachineOperand::MO_ExternalSymbol:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getOffset(),
MO.getSymbolName());
case MachineOperand::MO_GlobalAddress:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getGlobal(),
MO.getOffset());
case MachineOperand::MO_BlockAddress:
return hash_combine(MO.getType(), MO.getTargetFlags(),
MO.getBlockAddress(), MO.getOffset());
case MachineOperand::MO_RegisterMask:
case MachineOperand::MO_RegisterLiveOut:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask());
case MachineOperand::MO_Metadata:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata());
case MachineOperand::MO_MCSymbol:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol());
case MachineOperand::MO_CFIIndex:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex());
}
llvm_unreachable("Invalid machine operand type");
}
void MachineOperand::print(raw_ostream &OS,
const TargetRegisterInfo *TRI) const {
ModuleSlotTracker DummyMST(nullptr);
print(OS, DummyMST, TRI);
}
void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
const TargetRegisterInfo *TRI) const {
switch (getType()) {
case MachineOperand::MO_Register:
OS << PrintReg(getReg(), TRI, getSubReg());
if (isDef() || isKill() || isDead() || isImplicit() || isUndef() ||
isInternalRead() || isEarlyClobber() || isTied()) {
OS << '<';
bool NeedComma = false;
if (isDef()) {
if (NeedComma) OS << ',';
if (isEarlyClobber())
OS << "earlyclobber,";
if (isImplicit())
OS << "imp-";
OS << "def";
NeedComma = true;
// <def,read-undef> only makes sense when getSubReg() is set.
// Don't clutter the output otherwise.
if (isUndef() && getSubReg())
OS << ",read-undef";
} else if (isImplicit()) {
OS << "imp-use";
NeedComma = true;
}
if (isKill()) {
if (NeedComma) OS << ',';
OS << "kill";
NeedComma = true;
}
if (isDead()) {
if (NeedComma) OS << ',';
OS << "dead";
NeedComma = true;
}
if (isUndef() && isUse()) {
if (NeedComma) OS << ',';
OS << "undef";
NeedComma = true;
}
if (isInternalRead()) {
if (NeedComma) OS << ',';
OS << "internal";
NeedComma = true;
}
if (isTied()) {
if (NeedComma) OS << ',';
OS << "tied";
if (TiedTo != 15)
OS << unsigned(TiedTo - 1);
}
OS << '>';
}
break;
case MachineOperand::MO_Immediate:
OS << getImm();
break;
case MachineOperand::MO_CImmediate:
getCImm()->getValue().print(OS, false);
break;
case MachineOperand::MO_FPImmediate:
if (getFPImm()->getType()->isFloatTy())
OS << getFPImm()->getValueAPF().convertToFloat();
else
OS << getFPImm()->getValueAPF().convertToDouble();
break;
case MachineOperand::MO_MachineBasicBlock:
OS << "<BB#" << getMBB()->getNumber() << ">";
break;
case MachineOperand::MO_FrameIndex:
OS << "<fi#" << getIndex() << '>';
break;
case MachineOperand::MO_ConstantPoolIndex:
OS << "<cp#" << getIndex();
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
case MachineOperand::MO_TargetIndex:
OS << "<ti#" << getIndex();
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
case MachineOperand::MO_JumpTableIndex:
OS << "<jt#" << getIndex() << '>';
break;
case MachineOperand::MO_GlobalAddress:
OS << "<ga:";
getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
case MachineOperand::MO_ExternalSymbol:
OS << "<es:" << getSymbolName();
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
case MachineOperand::MO_BlockAddress:
OS << '<';
getBlockAddress()->printAsOperand(OS, /*PrintType=*/false, MST);
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
case MachineOperand::MO_RegisterMask:
OS << "<regmask>";
break;
case MachineOperand::MO_RegisterLiveOut:
OS << "<regliveout>";
break;
case MachineOperand::MO_Metadata:
OS << '<';
getMetadata()->printAsOperand(OS, MST);
OS << '>';
break;
case MachineOperand::MO_MCSymbol:
OS << "<MCSym=" << *getMCSymbol() << '>';
break;
case MachineOperand::MO_CFIIndex:
OS << "<call frame instruction>";
break;
}
if (unsigned TF = getTargetFlags())
OS << "[TF=" << TF << ']';
}
//===----------------------------------------------------------------------===//
// MachineMemOperand Implementation
//===----------------------------------------------------------------------===//
/// getAddrSpace - Return the LLVM IR address space number that this pointer
/// points into.
unsigned MachinePointerInfo::getAddrSpace() const {
if (V.isNull() || V.is<const PseudoSourceValue*>()) return 0;
return cast<PointerType>(V.get<const Value*>()->getType())->getAddressSpace();
}
/// getConstantPool - Return a MachinePointerInfo record that refers to the
/// constant pool.
MachinePointerInfo MachinePointerInfo::getConstantPool() {
return MachinePointerInfo(PseudoSourceValue::getConstantPool());
}
/// getFixedStack - Return a MachinePointerInfo record that refers to the
/// the specified FrameIndex.
MachinePointerInfo MachinePointerInfo::getFixedStack(int FI, int64_t offset) {
return MachinePointerInfo(PseudoSourceValue::getFixedStack(FI), offset);
}
MachinePointerInfo MachinePointerInfo::getJumpTable() {
return MachinePointerInfo(PseudoSourceValue::getJumpTable());
}
MachinePointerInfo MachinePointerInfo::getGOT() {
return MachinePointerInfo(PseudoSourceValue::getGOT());
}
MachinePointerInfo MachinePointerInfo::getStack(int64_t Offset) {
return MachinePointerInfo(PseudoSourceValue::getStack(), Offset);
}
MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f,
uint64_t s, unsigned int a,
const AAMDNodes &AAInfo,
const MDNode *Ranges)
: PtrInfo(ptrinfo), Size(s),
Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)),
AAInfo(AAInfo), Ranges(Ranges) {
assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue*>() ||
isa<PointerType>(PtrInfo.V.get<const Value*>()->getType())) &&
"invalid pointer value");
assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
assert((isLoad() || isStore()) && "Not a load/store!");
}
/// Profile - Gather unique data for the object.
///
void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
ID.AddInteger(getOffset());
ID.AddInteger(Size);
ID.AddPointer(getOpaqueValue());
ID.AddInteger(Flags);
}
void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
// The Value and Offset may differ due to CSE. But the flags and size
// should be the same.
assert(MMO->getFlags() == getFlags() && "Flags mismatch!");
assert(MMO->getSize() == getSize() && "Size mismatch!");
if (MMO->getBaseAlignment() >= getBaseAlignment()) {
// Update the alignment value.
Flags = (Flags & ((1 << MOMaxBits) - 1)) |
((Log2_32(MMO->getBaseAlignment()) + 1) << MOMaxBits);
// Also update the base and offset, because the new alignment may
// not be applicable with the old ones.
PtrInfo = MMO->PtrInfo;
}
}
/// getAlignment - Return the minimum known alignment in bytes of the
/// actual memory reference.
uint64_t MachineMemOperand::getAlignment() const {
return MinAlign(getBaseAlignment(), getOffset());
}
void MachineMemOperand::print(raw_ostream &OS) const {
ModuleSlotTracker DummyMST(nullptr);
print(OS, DummyMST);
}
void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST) const {
assert((isLoad() || isStore()) &&
"SV has to be a load, store or both.");
if (isVolatile())
OS << "Volatile ";
if (isLoad())
OS << "LD";
if (isStore())
OS << "ST";
OS << getSize();
// Print the address information.
OS << "[";
if (const Value *V = getValue())
V->printAsOperand(OS, /*PrintType=*/false, MST);
else if (const PseudoSourceValue *PSV = getPseudoValue())
PSV->printCustom(OS);
else
OS << "<unknown>";
unsigned AS = getAddrSpace();
if (AS != 0)
OS << "(addrspace=" << AS << ')';
// If the alignment of the memory reference itself differs from the alignment
// of the base pointer, print the base alignment explicitly, next to the base
// pointer.
if (getBaseAlignment() != getAlignment())
OS << "(align=" << getBaseAlignment() << ")";
if (getOffset() != 0)
OS << "+" << getOffset();
OS << "]";
// Print the alignment of the reference.
if (getBaseAlignment() != getAlignment() || getBaseAlignment() != getSize())
OS << "(align=" << getAlignment() << ")";
// Print TBAA info.
if (const MDNode *TBAAInfo = getAAInfo().TBAA) {
OS << "(tbaa=";
if (TBAAInfo->getNumOperands() > 0)
TBAAInfo->getOperand(0)->printAsOperand(OS, MST);
else
OS << "<unknown>";
OS << ")";
}
// Print AA scope info.
if (const MDNode *ScopeInfo = getAAInfo().Scope) {
OS << "(alias.scope=";
if (ScopeInfo->getNumOperands() > 0)
for (unsigned i = 0, ie = ScopeInfo->getNumOperands(); i != ie; ++i) {
ScopeInfo->getOperand(i)->printAsOperand(OS, MST);
if (i != ie-1)
OS << ",";
}
else
OS << "<unknown>";
OS << ")";
}
// Print AA noalias scope info.
if (const MDNode *NoAliasInfo = getAAInfo().NoAlias) {
OS << "(noalias=";
if (NoAliasInfo->getNumOperands() > 0)
for (unsigned i = 0, ie = NoAliasInfo->getNumOperands(); i != ie; ++i) {
NoAliasInfo->getOperand(i)->printAsOperand(OS, MST);
if (i != ie-1)
OS << ",";
}
else
OS << "<unknown>";
OS << ")";
}
// Print nontemporal info.
if (isNonTemporal())
OS << "(nontemporal)";
if (isInvariant())
OS << "(invariant)";
}
//===----------------------------------------------------------------------===//
// MachineInstr Implementation
//===----------------------------------------------------------------------===//
void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
if (MCID->ImplicitDefs)
for (const uint16_t *ImpDefs = MCID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
if (MCID->ImplicitUses)
for (const uint16_t *ImpUses = MCID->getImplicitUses(); *ImpUses; ++ImpUses)
addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
}
/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
/// implicit operands. It reserves space for the number of operands specified by
/// the MCInstrDesc.
MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
DebugLoc dl, bool NoImp)
: MCID(&tid), Parent(nullptr), Operands(nullptr), NumOperands(0), Flags(0),
AsmPrinterFlags(0), NumMemRefs(0), MemRefs(nullptr),
debugLoc(std::move(dl)) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
// Reserve space for the expected number of operands.
if (unsigned NumOps = MCID->getNumOperands() +
MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
CapOperands = OperandCapacity::get(NumOps);
Operands = MF.allocateOperandArray(CapOperands);
}
if (!NoImp)
addImplicitDefUseOperands(MF);
}
/// MachineInstr ctor - Copies MachineInstr arg exactly
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
: MCID(&MI.getDesc()), Parent(nullptr), Operands(nullptr), NumOperands(0),
Flags(0), AsmPrinterFlags(0),
NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
debugLoc(MI.getDebugLoc()) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
CapOperands = OperandCapacity::get(MI.getNumOperands());
Operands = MF.allocateOperandArray(CapOperands);
// Copy operands.
for (const MachineOperand &MO : MI.operands())
addOperand(MF, MO);
// Copy all the sensible flags.
setFlags(MI.Flags);
}
/// getRegInfo - If this instruction is embedded into a MachineFunction,
/// return the MachineRegisterInfo object for the current function, otherwise
/// return null.
MachineRegisterInfo *MachineInstr::getRegInfo() {
if (MachineBasicBlock *MBB = getParent())
return &MBB->getParent()->getRegInfo();
return nullptr;
}
/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands already be on their use lists.
void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
for (MachineOperand &MO : operands())
if (MO.isReg())
MRI.removeRegOperandFromUseList(&MO);
}
/// AddRegOperandsToUseLists - Add all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands not be on their use lists yet.
void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
for (MachineOperand &MO : operands())
if (MO.isReg())
MRI.addRegOperandToUseList(&MO);
}
void MachineInstr::addOperand(const MachineOperand &Op) {
MachineBasicBlock *MBB = getParent();
assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
MachineFunction *MF = MBB->getParent();
assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
addOperand(*MF, Op);
}
/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
/// ranges. If MRI is non-null also update use-def chains.
static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
unsigned NumOps, MachineRegisterInfo *MRI) {
if (MRI)
return MRI->moveOperands(Dst, Src, NumOps);
// MachineOperand is a trivially copyable type so we can just use memmove.
std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
}
/// addOperand - Add the specified operand to the instruction. If it is an
/// implicit operand, it is added to the end of the operand list. If it is
/// an explicit operand it is added at the end of the explicit operand list
/// (before the first implicit operand).
void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
assert(MCID && "Cannot add operands before providing an instr descriptor");
// Check if we're adding one of our existing operands.
if (&Op >= Operands && &Op < Operands + NumOperands) {
// This is unusual: MI->addOperand(MI->getOperand(i)).
// If adding Op requires reallocating or moving existing operands around,
// the Op reference could go stale. Support it by copying Op.
MachineOperand CopyOp(Op);
return addOperand(MF, CopyOp);
}
// Find the insert location for the new operand. Implicit registers go at
// the end, everything else goes before the implicit regs.
//
// FIXME: Allow mixed explicit and implicit operands on inline asm.
// InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
// implicit-defs, but they must not be moved around. See the FIXME in
// InstrEmitter.cpp.
unsigned OpNo = getNumOperands();
bool isImpReg = Op.isReg() && Op.isImplicit();
if (!isImpReg && !isInlineAsm()) {
while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
--OpNo;
assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
}
}
#ifndef NDEBUG
bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
// OpNo now points as the desired insertion point. Unless this is a variadic
// instruction, only implicit regs are allowed beyond MCID->getNumOperands().
// RegMask operands go between the explicit and implicit operands.
assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
OpNo < MCID->getNumOperands() || isMetaDataOp) &&
"Trying to add an operand to a machine instr that is already done!");
#endif
MachineRegisterInfo *MRI = getRegInfo();
// Determine if the Operands array needs to be reallocated.
// Save the old capacity and operand array.
OperandCapacity OldCap = CapOperands;
MachineOperand *OldOperands = Operands;
if (!OldOperands || OldCap.getSize() == getNumOperands()) {
CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
Operands = MF.allocateOperandArray(CapOperands);
// Move the operands before the insertion point.
if (OpNo)
moveOperands(Operands, OldOperands, OpNo, MRI);
}
// Move the operands following the insertion point.
if (OpNo != NumOperands)
moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
MRI);
++NumOperands;
// Deallocate the old operand array.
if (OldOperands != Operands && OldOperands)
MF.deallocateOperandArray(OldCap, OldOperands);
// Copy Op into place. It still needs to be inserted into the MRI use lists.
MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
NewMO->ParentMI = this;
// When adding a register operand, tell MRI about it.
if (NewMO->isReg()) {
// Ensure isOnRegUseList() returns false, regardless of Op's status.
NewMO->Contents.Reg.Prev = nullptr;
// Ignore existing ties. This is not a property that can be copied.
NewMO->TiedTo = 0;
// Add the new operand to MRI, but only for instructions in an MBB.
if (MRI)
MRI->addRegOperandToUseList(NewMO);
// The MCID operand information isn't accurate until we start adding
// explicit operands. The implicit operands are added first, then the
// explicits are inserted before them.
if (!isImpReg) {
// Tie uses to defs as indicated in MCInstrDesc.
if (NewMO->isUse()) {
int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
if (DefIdx != -1)
tieOperands(DefIdx, OpNo);
}
// If the register operand is flagged as early, mark the operand as such.
if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
NewMO->setIsEarlyClobber(true);
}
}
}
/// RemoveOperand - Erase an operand from an instruction, leaving it with one
/// fewer operand than it started with.
///
void MachineInstr::RemoveOperand(unsigned OpNo) {
assert(OpNo < getNumOperands() && "Invalid operand number");
untieRegOperand(OpNo);
#ifndef NDEBUG
// Moving tied operands would break the ties.
for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
if (Operands[i].isReg())
assert(!Operands[i].isTied() && "Cannot move tied operands");
#endif
MachineRegisterInfo *MRI = getRegInfo();
if (MRI && Operands[OpNo].isReg())
MRI->removeRegOperandFromUseList(Operands + OpNo);
// Don't call the MachineOperand destructor. A lot of this code depends on
// MachineOperand having a trivial destructor anyway, and adding a call here
// wouldn't make it 'destructor-correct'.
if (unsigned N = NumOperands - 1 - OpNo)
moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
--NumOperands;
}
/// addMemOperand - Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void MachineInstr::addMemOperand(MachineFunction &MF,
MachineMemOperand *MO) {
mmo_iterator OldMemRefs = MemRefs;
unsigned OldNumMemRefs = NumMemRefs;
unsigned NewNum = NumMemRefs + 1;
mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
NewMemRefs[NewNum - 1] = MO;
setMemRefs(NewMemRefs, NewMemRefs + NewNum);
}
bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
assert(!isBundledWithPred() && "Must be called on bundle header");
for (MachineBasicBlock::const_instr_iterator MII = this;; ++MII) {
if (MII->getDesc().getFlags() & Mask) {
if (Type == AnyInBundle)
return true;
} else {
if (Type == AllInBundle && !MII->isBundle())
return false;
}
// This was the last instruction in the bundle.
if (!MII->isBundledWithSucc())
return Type == AllInBundle;
}
}
bool MachineInstr::isIdenticalTo(const MachineInstr *Other,
MICheckType Check) const {
// If opcodes or number of operands are not the same then the two
// instructions are obviously not identical.
if (Other->getOpcode() != getOpcode() ||
Other->getNumOperands() != getNumOperands())
return false;
if (isBundle()) {
// Both instructions are bundles, compare MIs inside the bundle.
MachineBasicBlock::const_instr_iterator I1 = *this;
MachineBasicBlock::const_instr_iterator E1 = getParent()->instr_end();
MachineBasicBlock::const_instr_iterator I2 = *Other;
MachineBasicBlock::const_instr_iterator E2= Other->getParent()->instr_end();
while (++I1 != E1 && I1->isInsideBundle()) {
++I2;
if (I2 == E2 || !I2->isInsideBundle() || !I1->isIdenticalTo(I2, Check))
return false;
}
}
// Check operands to make sure they match.
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
const MachineOperand &OMO = Other->getOperand(i);
if (!MO.isReg()) {
if (!MO.isIdenticalTo(OMO))
return false;
continue;
}
// Clients may or may not want to ignore defs when testing for equality.
// For example, machine CSE pass only cares about finding common
// subexpressions, so it's safe to ignore virtual register defs.
if (MO.isDef()) {
if (Check == IgnoreDefs)
continue;
else if (Check == IgnoreVRegDefs) {
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
TargetRegisterInfo::isPhysicalRegister(OMO.getReg()))
if (MO.getReg() != OMO.getReg())
return false;
} else {
if (!MO.isIdenticalTo(OMO))
return false;
if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
return false;
}
} else {
if (!MO.isIdenticalTo(OMO))
return false;
if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
return false;
}
}
// If DebugLoc does not match then two dbg.values are not identical.
if (isDebugValue())
if (getDebugLoc() && Other->getDebugLoc() &&
getDebugLoc() != Other->getDebugLoc())
return false;
return true;
}
MachineInstr *MachineInstr::removeFromParent() {
assert(getParent() && "Not embedded in a basic block!");
return getParent()->remove(this);
}
MachineInstr *MachineInstr::removeFromBundle() {
assert(getParent() && "Not embedded in a basic block!");
return getParent()->remove_instr(this);
}
void MachineInstr::eraseFromParent() {
assert(getParent() && "Not embedded in a basic block!");
getParent()->erase(this);
}
void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
assert(getParent() && "Not embedded in a basic block!");
MachineBasicBlock *MBB = getParent();
MachineFunction *MF = MBB->getParent();
assert(MF && "Not embedded in a function!");
MachineInstr *MI = (MachineInstr *)this;
MachineRegisterInfo &MRI = MF->getRegInfo();
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
MRI.markUsesInDebugValueAsUndef(Reg);
}
MI->eraseFromParent();
}
void MachineInstr::eraseFromBundle() {
assert(getParent() && "Not embedded in a basic block!");
getParent()->erase_instr(this);
}
/// getNumExplicitOperands - Returns the number of non-implicit operands.
///
unsigned MachineInstr::getNumExplicitOperands() const {
unsigned NumOperands = MCID->getNumOperands();
if (!MCID->isVariadic())
return NumOperands;
for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isImplicit())
NumOperands++;
}
return NumOperands;
}
void MachineInstr::bundleWithPred() {
assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
setFlag(BundledPred);
MachineBasicBlock::instr_iterator Pred = this;
--Pred;
assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
Pred->setFlag(BundledSucc);
}
void MachineInstr::bundleWithSucc() {
assert(!isBundledWithSucc() && "MI is already bundled with its successor");
setFlag(BundledSucc);
MachineBasicBlock::instr_iterator Succ = this;
++Succ;
assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
Succ->setFlag(BundledPred);
}
void MachineInstr::unbundleFromPred() {
assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
clearFlag(BundledPred);
MachineBasicBlock::instr_iterator Pred = this;
--Pred;
assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
Pred->clearFlag(BundledSucc);
}
void MachineInstr::unbundleFromSucc() {
assert(isBundledWithSucc() && "MI isn't bundled with its successor");
clearFlag(BundledSucc);
MachineBasicBlock::instr_iterator Succ = this;
++Succ;
assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
Succ->clearFlag(BundledPred);
}
bool MachineInstr::isStackAligningInlineAsm() const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
return true;
}
return false;
}
InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
}
int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
unsigned *GroupNo) const {
assert(isInlineAsm() && "Expected an inline asm instruction");
assert(OpIdx < getNumOperands() && "OpIdx out of range");
// Ignore queries about the initial operands.
if (OpIdx < InlineAsm::MIOp_FirstOperand)
return -1;
unsigned Group = 0;
unsigned NumOps;
for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
i += NumOps) {
const MachineOperand &FlagMO = getOperand(i);
// If we reach the implicit register operands, stop looking.
if (!FlagMO.isImm())
return -1;
NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
if (i + NumOps > OpIdx) {
if (GroupNo)
*GroupNo = Group;
return i;
}
++Group;
}
return -1;
}
const TargetRegisterClass*
MachineInstr::getRegClassConstraint(unsigned OpIdx,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const {
assert(getParent() && "Can't have an MBB reference here!");
assert(getParent()->getParent() && "Can't have an MF reference here!");
const MachineFunction &MF = *getParent()->getParent();
// Most opcodes have fixed constraints in their MCInstrDesc.
if (!isInlineAsm())
return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
if (!getOperand(OpIdx).isReg())
return nullptr;
// For tied uses on inline asm, get the constraint from the def.
unsigned DefIdx;
if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
OpIdx = DefIdx;
// Inline asm stores register class constraints in the flag word.
int FlagIdx = findInlineAsmFlagIdx(OpIdx);
if (FlagIdx < 0)
return nullptr;
unsigned Flag = getOperand(FlagIdx).getImm();
unsigned RCID;
if (InlineAsm::hasRegClassConstraint(Flag, RCID))
return TRI->getRegClass(RCID);
// Assume that all registers in a memory operand are pointers.
if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
return TRI->getPointerRegClass(MF);
return nullptr;
}
const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI, bool ExploreBundle) const {
// Check every operands inside the bundle if we have
// been asked to.
if (ExploreBundle)
for (ConstMIBundleOperands OpndIt(this); OpndIt.isValid() && CurRC;
++OpndIt)
CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
else
// Otherwise, just check the current operands.
for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
return CurRC;
}
const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
assert(CurRC && "Invalid initial register class");
// Check if Reg is constrained by some of its use/def from MI.
const MachineOperand &MO = getOperand(OpIdx);
if (!MO.isReg() || MO.getReg() != Reg)
return CurRC;
// If yes, accumulate the constraints through the operand.
return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
}
const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
unsigned OpIdx, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
const MachineOperand &MO = getOperand(OpIdx);
assert(MO.isReg() &&
"Cannot get register constraints for non-register operand");
assert(CurRC && "Invalid initial register class");
if (unsigned SubIdx = MO.getSubReg()) {
if (OpRC)
CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
else
CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
} else if (OpRC)
CurRC = TRI->getCommonSubClass(CurRC, OpRC);
return CurRC;
}
/// Return the number of instructions inside the MI bundle, not counting the
/// header instruction.
unsigned MachineInstr::getBundleSize() const {
MachineBasicBlock::const_instr_iterator I = this;
unsigned Size = 0;
while (I->isBundledWithSucc())
++Size, ++I;
return Size;
}
/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
/// the specific register or -1 if it is not found. It further tightens
/// the search criteria to a use that kills the register if isKill is true.
int MachineInstr::findRegisterUseOperandIdx(unsigned Reg, bool isKill,
const TargetRegisterInfo *TRI) const {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MOReg == Reg ||
(TRI &&
TargetRegisterInfo::isPhysicalRegister(MOReg) &&
TargetRegisterInfo::isPhysicalRegister(Reg) &&
TRI->isSubRegister(MOReg, Reg)))
if (!isKill || MO.isKill())
return i;
}
return -1;
}
/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
/// indicating if this instruction reads or writes Reg. This also considers
/// partial defines.
std::pair<bool,bool>
MachineInstr::readsWritesVirtualRegister(unsigned Reg,
SmallVectorImpl<unsigned> *Ops) const {
bool PartDef = false; // Partial redefine.
bool FullDef = false; // Full define.
bool Use = false;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (Ops)
Ops->push_back(i);
if (MO.isUse())
Use |= !MO.isUndef();
else if (MO.getSubReg() && !MO.isUndef())
// A partial <def,undef> doesn't count as reading the register.
PartDef = true;
else
FullDef = true;
}
// A partial redefine uses Reg unless there is also a full define.
return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
}
/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
/// the specified register or -1 if it is not found. If isDead is true, defs
/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
/// also checks if there is a def of a super-register.
int
MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
const TargetRegisterInfo *TRI) const {
bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
// Accept regmask operands when Overlap is set.
// Ignore them when looking for a specific def operand (Overlap == false).
if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
return i;
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
bool Found = (MOReg == Reg);
if (!Found && TRI && isPhys &&
TargetRegisterInfo::isPhysicalRegister(MOReg)) {
if (Overlap)
Found = TRI->regsOverlap(MOReg, Reg);
else
Found = TRI->isSubRegister(MOReg, Reg);
}
if (Found && (!isDead || MO.isDead()))
return i;
}
return -1;
}
/// findFirstPredOperandIdx() - Find the index of the first operand in the
/// operand list that is used to represent the predicate. It returns -1 if
/// none is found.
int MachineInstr::findFirstPredOperandIdx() const {
// Don't call MCID.findFirstPredOperandIdx() because this variant
// is sometimes called on an instruction that's not yet complete, and
// so the number of operands is less than the MCID indicates. In
// particular, the PTX target does this.
const MCInstrDesc &MCID = getDesc();
if (MCID.isPredicable()) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
if (MCID.OpInfo[i].isPredicate())
return i;
}
return -1;
}
// MachineOperand::TiedTo is 4 bits wide.
const unsigned TiedMax = 15;
/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
///
/// Use and def operands can be tied together, indicated by a non-zero TiedTo
/// field. TiedTo can have these values:
///
/// 0: Operand is not tied to anything.
/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
/// TiedMax: Tied to an operand >= TiedMax-1.
///
/// The tied def must be one of the first TiedMax operands on a normal
/// instruction. INLINEASM instructions allow more tied defs.
///
void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
MachineOperand &DefMO = getOperand(DefIdx);
MachineOperand &UseMO = getOperand(UseIdx);
assert(DefMO.isDef() && "DefIdx must be a def operand");
assert(UseMO.isUse() && "UseIdx must be a use operand");
assert(!DefMO.isTied() && "Def is already tied to another use");
assert(!UseMO.isTied() && "Use is already tied to another def");
if (DefIdx < TiedMax)
UseMO.TiedTo = DefIdx + 1;
else {
// Inline asm can use the group descriptors to find tied operands, but on
// normal instruction, the tied def must be within the first TiedMax
// operands.
assert(isInlineAsm() && "DefIdx out of range");
UseMO.TiedTo = TiedMax;
}
// UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
}
/// Given the index of a tied register operand, find the operand it is tied to.
/// Defs are tied to uses and vice versa. Returns the index of the tied operand
/// which must exist.
unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
const MachineOperand &MO = getOperand(OpIdx);
assert(MO.isTied() && "Operand isn't tied");
// Normally TiedTo is in range.
if (MO.TiedTo < TiedMax)
return MO.TiedTo - 1;
// Uses on normal instructions can be out of range.
if (!isInlineAsm()) {
// Normal tied defs must be in the 0..TiedMax-1 range.
if (MO.isUse())
return TiedMax - 1;
// MO is a def. Search for the tied use.
for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
const MachineOperand &UseMO = getOperand(i);
if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
return i;
}
llvm_unreachable("Can't find tied use");
}
// Now deal with inline asm by parsing the operand group descriptor flags.
// Find the beginning of each operand group.
SmallVector<unsigned, 8> GroupIdx;
unsigned OpIdxGroup = ~0u;
unsigned NumOps;
for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
i += NumOps) {
const MachineOperand &FlagMO = getOperand(i);
assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
unsigned CurGroup = GroupIdx.size();
GroupIdx.push_back(i);
NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
// OpIdx belongs to this operand group.
if (OpIdx > i && OpIdx < i + NumOps)
OpIdxGroup = CurGroup;
unsigned TiedGroup;
if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
continue;
// Operands in this group are tied to operands in TiedGroup which must be
// earlier. Find the number of operands between the two groups.
unsigned Delta = i - GroupIdx[TiedGroup];
// OpIdx is a use tied to TiedGroup.
if (OpIdxGroup == CurGroup)
return OpIdx - Delta;
// OpIdx is a def tied to this use group.
if (OpIdxGroup == TiedGroup)
return OpIdx + Delta;
}
llvm_unreachable("Invalid tied operand on inline asm");
}
/// clearKillInfo - Clears kill flags on all operands.
///
void MachineInstr::clearKillInfo() {
for (MachineOperand &MO : operands()) {
if (MO.isReg() && MO.isUse())
MO.setIsKill(false);
}
}
void MachineInstr::substituteRegister(unsigned FromReg,
unsigned ToReg,
unsigned SubIdx,
const TargetRegisterInfo &RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
if (SubIdx)
ToReg = RegInfo.getSubReg(ToReg, SubIdx);
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || MO.getReg() != FromReg)
continue;
MO.substPhysReg(ToReg, RegInfo);
}
} else {
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || MO.getReg() != FromReg)
continue;
MO.substVirtReg(ToReg, SubIdx, RegInfo);
}
}
}
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
// Ignore stuff that we obviously can't move.
//
// Treat volatile loads as stores. This is not strictly necessary for
// volatiles, but it is required for atomic loads. It is not allowed to move
// a load across an atomic load with Ordering > Monotonic.
if (mayStore() || isCall() ||
(mayLoad() && hasOrderedMemoryRef())) {
SawStore = true;
return false;
}
if (isPosition() || isDebugValue() || isTerminator() ||
hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
// loaded value doesn't change between the load and the its intended
// destination. The check for isInvariantLoad gives the targe the chance to
// classify the load as always returning a constant, e.g. a constant pool
// load.
if (mayLoad() && !isInvariantLoad(AA))
// Otherwise, this is a real load. If there is a store between the load and
// end of block, we can't move it.
return !SawStore;
return true;
}
/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
/// or volatile memory reference, or if the information describing the memory
/// reference is not available. Return false if it is known to have no ordered
/// memory references.
bool MachineInstr::hasOrderedMemoryRef() const {
// An instruction known never to access memory won't have a volatile access.
if (!mayStore() &&
!mayLoad() &&
!isCall() &&
!hasUnmodeledSideEffects())
return false;
// Otherwise, if the instruction has no memory reference information,
// conservatively assume it wasn't preserved.
if (memoperands_empty())
return true;
// Check the memory reference information for ordered references.
for (mmo_iterator I = memoperands_begin(), E = memoperands_end(); I != E; ++I)
if (!(*I)->isUnordered())
return true;
return false;
}
/// isInvariantLoad - Return true if this instruction is loading from a
/// location whose value is invariant across the function. For example,
/// loading a value from the constant pool or from the argument area
/// of a function if it does not change. This should only return true of
/// *all* loads the instruction does are invariant (if it does multiple loads).
bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
// If the instruction doesn't load at all, it isn't an invariant load.
if (!mayLoad())
return false;
// If the instruction has lost its memoperands, conservatively assume that
// it may not be an invariant load.
if (memoperands_empty())
return false;
const MachineFrameInfo *MFI = getParent()->getParent()->getFrameInfo();
for (mmo_iterator I = memoperands_begin(),
E = memoperands_end(); I != E; ++I) {
if ((*I)->isVolatile()) return false;
if ((*I)->isStore()) return false;
if ((*I)->isInvariant()) return true;
// A load from a constant PseudoSourceValue is invariant.
if (const PseudoSourceValue *PSV = (*I)->getPseudoValue())
if (PSV->isConstant(MFI))
continue;
if (const Value *V = (*I)->getValue()) {
// If we have an AliasAnalysis, ask it whether the memory is constant.
if (AA &&
AA->pointsToConstantMemory(
MemoryLocation(V, (*I)->getSize(), (*I)->getAAInfo())))
continue;
}
// Otherwise assume conservatively.
return false;
}
// Everything checks out.
return true;
}
/// isConstantValuePHI - If the specified instruction is a PHI that always
/// merges together the same virtual register, return the register, otherwise
/// return 0.
unsigned MachineInstr::isConstantValuePHI() const {
if (!isPHI())
return 0;
assert(getNumOperands() >= 3 &&
"It's illegal to have a PHI without source operands");
unsigned Reg = getOperand(1).getReg();
for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
if (getOperand(i).getReg() != Reg)
return 0;
return Reg;
}
bool MachineInstr::hasUnmodeledSideEffects() const {
if (hasProperty(MCID::UnmodeledSideEffects))
return true;
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
return true;
}
return false;
}
/// allDefsAreDead - Return true if all the defs of this instruction are dead.
///
bool MachineInstr::allDefsAreDead() const {
for (const MachineOperand &MO : operands()) {
if (!MO.isReg() || MO.isUse())
continue;
if (!MO.isDead())
return false;
}
return true;
}
/// copyImplicitOps - Copy implicit register operands from specified
/// instruction to this instruction.
void MachineInstr::copyImplicitOps(MachineFunction &MF,
const MachineInstr *MI) {
for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
addOperand(MF, MO);
}
}
void MachineInstr::dump() const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dbgs() << " " << *this;
#endif
}
void MachineInstr::print(raw_ostream &OS, bool SkipOpers) const {
const Module *M = nullptr;
if (const MachineBasicBlock *MBB = getParent())
if (const MachineFunction *MF = MBB->getParent())
M = MF->getFunction()->getParent();
ModuleSlotTracker MST(M);
print(OS, MST, SkipOpers);
}
void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
bool SkipOpers) const {
// We can be a bit tidier if we know the MachineFunction.
const MachineFunction *MF = nullptr;
const TargetRegisterInfo *TRI = nullptr;
const MachineRegisterInfo *MRI = nullptr;
const TargetInstrInfo *TII = nullptr;
if (const MachineBasicBlock *MBB = getParent()) {
MF = MBB->getParent();
if (MF) {
MRI = &MF->getRegInfo();
TRI = MF->getSubtarget().getRegisterInfo();
TII = MF->getSubtarget().getInstrInfo();
}
}
// Save a list of virtual registers.
SmallVector<unsigned, 8> VirtRegs;
// Print explicitly defined operands on the left of an assignment syntax.
unsigned StartOp = 0, e = getNumOperands();
for (; StartOp < e && getOperand(StartOp).isReg() &&
getOperand(StartOp).isDef() &&
!getOperand(StartOp).isImplicit();
++StartOp) {
if (StartOp != 0) OS << ", ";
getOperand(StartOp).print(OS, MST, TRI);
unsigned Reg = getOperand(StartOp).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
VirtRegs.push_back(Reg);
}
if (StartOp != 0)
OS << " = ";
// Print the opcode name.
if (TII)
OS << TII->getName(getOpcode());
else
OS << "UNKNOWN";
if (SkipOpers)
return;
// Print the rest of the operands.
bool OmittedAnyCallClobbers = false;
bool FirstOp = true;
unsigned AsmDescOp = ~0u;
unsigned AsmOpCount = 0;
if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
// Print asm string.
OS << " ";
getOperand(InlineAsm::MIOp_AsmString).print(OS, MST, TRI);
// Print HasSideEffects, MayLoad, MayStore, IsAlignStack
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
OS << " [sideeffect]";
if (ExtraInfo & InlineAsm::Extra_MayLoad)
OS << " [mayload]";
if (ExtraInfo & InlineAsm::Extra_MayStore)
OS << " [maystore]";
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
OS << " [alignstack]";
if (getInlineAsmDialect() == InlineAsm::AD_ATT)
OS << " [attdialect]";
if (getInlineAsmDialect() == InlineAsm::AD_Intel)
OS << " [inteldialect]";
StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
FirstOp = false;
}
for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
VirtRegs.push_back(MO.getReg());
// Omit call-clobbered registers which aren't used anywhere. This makes
// call instructions much less noisy on targets where calls clobber lots
// of registers. Don't rely on MO.isDead() because we may be called before
// LiveVariables is run, or we may be looking at a non-allocatable reg.
if (MRI && isCall() &&
MO.isReg() && MO.isImplicit() && MO.isDef()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (MRI->use_empty(Reg)) {
bool HasAliasLive = false;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
unsigned AliasReg = *AI;
if (!MRI->use_empty(AliasReg)) {
HasAliasLive = true;
break;
}
}
if (!HasAliasLive) {
OmittedAnyCallClobbers = true;
continue;
}
}
}
}
if (FirstOp) FirstOp = false; else OS << ",";
OS << " ";
if (i < getDesc().NumOperands) {
const MCOperandInfo &MCOI = getDesc().OpInfo[i];
if (MCOI.isPredicate())
OS << "pred:";
if (MCOI.isOptionalDef())
OS << "opt:";
}
if (isDebugValue() && MO.isMetadata()) {
// Pretty print DBG_VALUE instructions.
auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
if (DIV && !DIV->getName().empty())
OS << "!\"" << DIV->getName() << '\"';
else
MO.print(OS, MST, TRI);
} else if (TRI && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
OS << TRI->getSubRegIndexName(MO.getImm());
} else if (i == AsmDescOp && MO.isImm()) {
// Pretty print the inline asm operand descriptor.
OS << '$' << AsmOpCount++;
unsigned Flag = MO.getImm();
switch (InlineAsm::getKind(Flag)) {
case InlineAsm::Kind_RegUse: OS << ":[reguse"; break;
case InlineAsm::Kind_RegDef: OS << ":[regdef"; break;
case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
case InlineAsm::Kind_Clobber: OS << ":[clobber"; break;
case InlineAsm::Kind_Imm: OS << ":[imm"; break;
case InlineAsm::Kind_Mem: OS << ":[mem"; break;
default: OS << ":[??" << InlineAsm::getKind(Flag); break;
}
unsigned RCID = 0;
if (InlineAsm::hasRegClassConstraint(Flag, RCID)) {
if (TRI) {
OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
} else
OS << ":RC" << RCID;
}
unsigned TiedTo = 0;
if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
OS << " tiedto:$" << TiedTo;
OS << ']';
// Compute the index of the next operand descriptor.
AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
} else
MO.print(OS, MST, TRI);
}
// Briefly indicate whether any call clobbers were omitted.
if (OmittedAnyCallClobbers) {
if (!FirstOp) OS << ",";
OS << " ...";
}
bool HaveSemi = false;
const unsigned PrintableFlags = FrameSetup;
if (Flags & PrintableFlags) {
if (!HaveSemi) OS << ";"; HaveSemi = true;
OS << " flags: ";
if (Flags & FrameSetup)
OS << "FrameSetup";
}
if (!memoperands_empty()) {
if (!HaveSemi) OS << ";"; HaveSemi = true;
OS << " mem:";
for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
i != e; ++i) {
(*i)->print(OS, MST);
if (std::next(i) != e)
OS << " ";
}
}
// Print the regclass of any virtual registers encountered.
if (MRI && !VirtRegs.empty()) {
if (!HaveSemi) OS << ";"; HaveSemi = true;
for (unsigned i = 0; i != VirtRegs.size(); ++i) {
const TargetRegisterClass *RC = MRI->getRegClass(VirtRegs[i]);
OS << " " << TRI->getRegClassName(RC)
<< ':' << PrintReg(VirtRegs[i]);
for (unsigned j = i+1; j != VirtRegs.size();) {
if (MRI->getRegClass(VirtRegs[j]) != RC) {
++j;
continue;
}
if (VirtRegs[i] != VirtRegs[j])
OS << "," << PrintReg(VirtRegs[j]);
VirtRegs.erase(VirtRegs.begin()+j);
}
}
}
// Print debug location information.
if (isDebugValue() && getOperand(e - 2).isMetadata()) {
if (!HaveSemi) OS << ";";
auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
OS << " line no:" << DV->getLine();
if (auto *InlinedAt = debugLoc->getInlinedAt()) {
DebugLoc InlinedAtDL(InlinedAt);
if (InlinedAtDL && MF) {
OS << " inlined @[ ";
InlinedAtDL.print(OS);
OS << " ]";
}
}
if (isIndirectDebugValue())
OS << " indirect";
} else if (debugLoc && MF) {
if (!HaveSemi) OS << ";";
OS << " dbg:";
debugLoc.print(OS);
}
OS << '\n';
}
bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
bool hasAliases = isPhysReg &&
MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
bool Found = false;
SmallVector<unsigned,4> DeadOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.isUndef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (Reg == IncomingReg) {
if (!Found) {
if (MO.isKill())
// The register is already marked kill.
return true;
if (isPhysReg && isRegTiedToDefOperand(i))
// Two-address uses of physregs must not be marked kill.
return true;
MO.setIsKill();
Found = true;
}
} else if (hasAliases && MO.isKill() &&
TargetRegisterInfo::isPhysicalRegister(Reg)) {
// A super-register kill already exists.
if (RegInfo->isSuperRegister(IncomingReg, Reg))
return true;
if (RegInfo->isSubRegister(IncomingReg, Reg))
DeadOps.push_back(i);
}
}
// Trim unneeded kill operands.
while (!DeadOps.empty()) {
unsigned OpIdx = DeadOps.back();
if (getOperand(OpIdx).isImplicit())
RemoveOperand(OpIdx);
else
getOperand(OpIdx).setIsKill(false);
DeadOps.pop_back();
}
// If not found, this means an alias of one of the operands is killed. Add a
// new implicit operand if required.
if (!Found && AddIfNotFound) {
addOperand(MachineOperand::CreateReg(IncomingReg,
false /*IsDef*/,
true /*IsImp*/,
true /*IsKill*/));
return true;
}
return Found;
}
void MachineInstr::clearRegisterKills(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
RegInfo = nullptr;
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned OpReg = MO.getReg();
if (OpReg == Reg || (RegInfo && RegInfo->isSuperRegister(Reg, OpReg)))
MO.setIsKill(false);
}
}
bool MachineInstr::addRegisterDead(unsigned Reg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
bool hasAliases = isPhysReg &&
MCRegAliasIterator(Reg, RegInfo, false).isValid();
bool Found = false;
SmallVector<unsigned,4> DeadOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
if (!MOReg)
continue;
if (MOReg == Reg) {
MO.setIsDead();
Found = true;
} else if (hasAliases && MO.isDead() &&
TargetRegisterInfo::isPhysicalRegister(MOReg)) {
// There exists a super-register that's marked dead.
if (RegInfo->isSuperRegister(Reg, MOReg))
return true;
if (RegInfo->isSubRegister(Reg, MOReg))
DeadOps.push_back(i);
}
}
// Trim unneeded dead operands.
while (!DeadOps.empty()) {
unsigned OpIdx = DeadOps.back();
if (getOperand(OpIdx).isImplicit())
RemoveOperand(OpIdx);
else
getOperand(OpIdx).setIsDead(false);
DeadOps.pop_back();
}
// If not found, this means an alias of one of the operands is dead. Add a
// new implicit operand if required.
if (Found || !AddIfNotFound)
return Found;
addOperand(MachineOperand::CreateReg(Reg,
true /*IsDef*/,
true /*IsImp*/,
false /*IsKill*/,
true /*IsDead*/));
return true;
}
void MachineInstr::clearRegisterDeads(unsigned Reg) {
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
continue;
MO.setIsDead(false);
}
}
void MachineInstr::addRegisterDefReadUndef(unsigned Reg) {
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
continue;
MO.setIsUndef();
}
}
void MachineInstr::addRegisterDefined(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
if (MO)
return;
} else {
for (const MachineOperand &MO : operands()) {
if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
MO.getSubReg() == 0)
return;
}
}
addOperand(MachineOperand::CreateReg(Reg,
true /*IsDef*/,
true /*IsImp*/));
}
void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
const TargetRegisterInfo &TRI) {
bool HasRegMask = false;
for (MachineOperand &MO : operands()) {
if (MO.isRegMask()) {
HasRegMask = true;
continue;
}
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
// If there are no uses, including partial uses, the def is dead.
if (std::none_of(UsedRegs.begin(), UsedRegs.end(),
[&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
MO.setIsDead();
}
// This is a call with a register mask operand.
// Mask clobbers are always dead, so add defs for the non-dead defines.
if (HasRegMask)
for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
I != E; ++I)
addRegisterDefined(*I, &TRI);
}
unsigned
MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
// Build up a buffer of hash code components.
SmallVector<size_t, 8> HashComponents;
HashComponents.reserve(MI->getNumOperands() + 1);
HashComponents.push_back(MI->getOpcode());
for (const MachineOperand &MO : MI->operands()) {
if (MO.isReg() && MO.isDef() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue; // Skip virtual register defs.
HashComponents.push_back(hash_value(MO));
}
return hash_combine_range(HashComponents.begin(), HashComponents.end());
}
void MachineInstr::emitError(StringRef Msg) const {
// Find the source location cookie.
unsigned LocCookie = 0;
const MDNode *LocMD = nullptr;
for (unsigned i = getNumOperands(); i != 0; --i) {
if (getOperand(i-1).isMetadata() &&
(LocMD = getOperand(i-1).getMetadata()) &&
LocMD->getNumOperands() != 0) {
if (const ConstantInt *CI =
mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
LocCookie = CI->getZExtValue();
break;
}
}
}
if (const MachineBasicBlock *MBB = getParent())
if (const MachineFunction *MF = MBB->getParent())
return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
report_fatal_error(Msg);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/README.txt | //===---------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
Common register allocation / spilling problem:
mul lr, r4, lr
str lr, [sp, #+52]
ldr lr, [r1, #+32]
sxth r3, r3
ldr r4, [sp, #+52]
mla r4, r3, lr, r4
can be:
mul lr, r4, lr
mov r4, lr
str lr, [sp, #+52]
ldr lr, [r1, #+32]
sxth r3, r3
mla r4, r3, lr, r4
and then "merge" mul and mov:
mul r4, r4, lr
str r4, [sp, #+52]
ldr lr, [r1, #+32]
sxth r3, r3
mla r4, r3, lr, r4
It also increase the likelihood the store may become dead.
//===---------------------------------------------------------------------===//
bb27 ...
...
%reg1037 = ADDri %reg1039, 1
%reg1038 = ADDrs %reg1032, %reg1039, %NOREG, 10
Successors according to CFG: 0x8b03bf0 (#5)
bb76 (0x8b03bf0, LLVM BB @0x8b032d0, ID#5):
Predecessors according to CFG: 0x8b0c5f0 (#3) 0x8b0a7c0 (#4)
%reg1039 = PHI %reg1070, mbb<bb76.outer,0x8b0c5f0>, %reg1037, mbb<bb27,0x8b0a7c0>
Note ADDri is not a two-address instruction. However, its result %reg1037 is an
operand of the PHI node in bb76 and its operand %reg1039 is the result of the
PHI node. We should treat it as a two-address code and make sure the ADDri is
scheduled after any node that reads %reg1039.
//===---------------------------------------------------------------------===//
Use local info (i.e. register scavenger) to assign it a free register to allow
reuse:
ldr r3, [sp, #+4]
add r3, r3, #3
ldr r2, [sp, #+8]
add r2, r2, #2
ldr r1, [sp, #+4] <==
add r1, r1, #1
ldr r0, [sp, #+4]
add r0, r0, #2
//===---------------------------------------------------------------------===//
LLVM aggressively lift CSE out of loop. Sometimes this can be negative side-
effects:
R1 = X + 4
R2 = X + 7
R3 = X + 15
loop:
load [i + R1]
...
load [i + R2]
...
load [i + R3]
Suppose there is high register pressure, R1, R2, R3, can be spilled. We need
to implement proper re-materialization to handle this:
R1 = X + 4
R2 = X + 7
R3 = X + 15
loop:
R1 = X + 4 @ re-materialized
load [i + R1]
...
R2 = X + 7 @ re-materialized
load [i + R2]
...
R3 = X + 15 @ re-materialized
load [i + R3]
Furthermore, with re-association, we can enable sharing:
R1 = X + 4
R2 = X + 7
R3 = X + 15
loop:
T = i + X
load [T + 4]
...
load [T + 7]
...
load [T + 15]
//===---------------------------------------------------------------------===//
It's not always a good idea to choose rematerialization over spilling. If all
the load / store instructions would be folded then spilling is cheaper because
it won't require new live intervals / registers. See 2003-05-31-LongShifts for
an example.
//===---------------------------------------------------------------------===//
With a copying garbage collector, derived pointers must not be retained across
collector safe points; the collector could move the objects and invalidate the
derived pointer. This is bad enough in the first place, but safe points can
crop up unpredictably. Consider:
%array = load { i32, [0 x %obj] }** %array_addr
%nth_el = getelementptr { i32, [0 x %obj] }* %array, i32 0, i32 %n
%old = load %obj** %nth_el
%z = div i64 %x, %y
store %obj* %new, %obj** %nth_el
If the i64 division is lowered to a libcall, then a safe point will (must)
appear for the call site. If a collection occurs, %array and %nth_el no longer
point into the correct object.
The fix for this is to copy address calculations so that dependent pointers
are never live across safe point boundaries. But the loads cannot be copied
like this if there was an intervening store, so may be hard to get right.
Only a concurrent mutator can trigger a collection at the libcall safe point.
So single-threaded programs do not have this requirement, even with a copying
collector. Still, LLVM optimizations would probably undo a front-end's careful
work.
//===---------------------------------------------------------------------===//
The ocaml frametable structure supports liveness information. It would be good
to support it.
//===---------------------------------------------------------------------===//
The FIXME in ComputeCommonTailLength in BranchFolding.cpp needs to be
revisited. The check is there to work around a misuse of directives in inline
assembly.
//===---------------------------------------------------------------------===//
It would be good to detect collector/target compatibility instead of silently
doing the wrong thing.
//===---------------------------------------------------------------------===//
It would be really nice to be able to write patterns in .td files for copies,
which would eliminate a bunch of explicit predicates on them (e.g. no side
effects). Once this is in place, it would be even better to have tblgen
synthesize the various copy insertion/inspection methods in TargetInstrInfo.
//===---------------------------------------------------------------------===//
Stack coloring improvements:
1. Do proper LiveStackAnalysis on all stack objects including those which are
not spill slots.
2. Reorder objects to fill in gaps between objects.
e.g. 4, 1, <gap>, 4, 1, 1, 1, <gap>, 4 => 4, 1, 1, 1, 1, 4, 4
//===---------------------------------------------------------------------===//
The scheduler should be able to sort nearby instructions by their address. For
example, in an expanded memset sequence it's not uncommon to see code like this:
movl $0, 4(%rdi)
movl $0, 8(%rdi)
movl $0, 12(%rdi)
movl $0, 0(%rdi)
Each of the stores is independent, and the scheduler is currently making an
arbitrary decision about the order.
//===---------------------------------------------------------------------===//
Another opportunitiy in this code is that the $0 could be moved to a register:
movl $0, 4(%rdi)
movl $0, 8(%rdi)
movl $0, 12(%rdi)
movl $0, 0(%rdi)
This would save substantial code size, especially for longer sequences like
this. It would be easy to have a rule telling isel to avoid matching MOV32mi
if the immediate has more than some fixed number of uses. It's more involved
to teach the register allocator how to do late folding to recover from
excessive register pressure.
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/SjLjEHPrepare.cpp | //===- SjLjEHPrepare.cpp - Eliminate Invoke & Unwind instructions ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This transformation is designed for use by code generators which use SjLj
// based exception handling.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include <set>
using namespace llvm;
#define DEBUG_TYPE "sjljehprepare"
STATISTIC(NumInvokes, "Number of invokes replaced");
STATISTIC(NumSpilled, "Number of registers live across unwind edges");
namespace {
class SjLjEHPrepare : public FunctionPass {
Type *doubleUnderDataTy;
Type *doubleUnderJBufTy;
Type *FunctionContextTy;
Constant *RegisterFn;
Constant *UnregisterFn;
Constant *BuiltinSetjmpFn;
Constant *FrameAddrFn;
Constant *StackAddrFn;
Constant *StackRestoreFn;
Constant *LSDAAddrFn;
Value *PersonalityFn;
Constant *CallSiteFn;
Constant *FuncCtxFn;
AllocaInst *FuncCtx;
public:
static char ID; // Pass identification, replacement for typeid
explicit SjLjEHPrepare() : FunctionPass(ID) {}
bool doInitialization(Module &M) override;
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {}
StringRef getPassName() const override {
return "SJLJ Exception Handling preparation";
}
private:
bool setupEntryBlockAndCallSites(Function &F);
void substituteLPadValues(LandingPadInst *LPI, Value *ExnVal, Value *SelVal);
Value *setupFunctionContext(Function &F, ArrayRef<LandingPadInst *> LPads);
void lowerIncomingArguments(Function &F);
void lowerAcrossUnwindEdges(Function &F, ArrayRef<InvokeInst *> Invokes);
void insertCallSiteStore(Instruction *I, int Number);
};
} // end anonymous namespace
char SjLjEHPrepare::ID = 0;
INITIALIZE_PASS(SjLjEHPrepare, "sjljehprepare", "Prepare SjLj exceptions",
false, false)
// Public Interface To the SjLjEHPrepare pass.
FunctionPass *llvm::createSjLjEHPreparePass() { return new SjLjEHPrepare(); }
// doInitialization - Set up decalarations and types needed to process
// exceptions.
bool SjLjEHPrepare::doInitialization(Module &M) {
// Build the function context structure.
// builtin_setjmp uses a five word jbuf
Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
Type *Int32Ty = Type::getInt32Ty(M.getContext());
doubleUnderDataTy = ArrayType::get(Int32Ty, 4);
doubleUnderJBufTy = ArrayType::get(VoidPtrTy, 5);
FunctionContextTy = StructType::get(VoidPtrTy, // __prev
Int32Ty, // call_site
doubleUnderDataTy, // __data
VoidPtrTy, // __personality
VoidPtrTy, // __lsda
doubleUnderJBufTy, // __jbuf
nullptr);
RegisterFn = M.getOrInsertFunction(
"_Unwind_SjLj_Register", Type::getVoidTy(M.getContext()),
PointerType::getUnqual(FunctionContextTy), (Type *)nullptr);
UnregisterFn = M.getOrInsertFunction(
"_Unwind_SjLj_Unregister", Type::getVoidTy(M.getContext()),
PointerType::getUnqual(FunctionContextTy), (Type *)nullptr);
FrameAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::frameaddress);
StackAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
BuiltinSetjmpFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_setjmp);
LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda);
CallSiteFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_callsite);
FuncCtxFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_functioncontext);
PersonalityFn = nullptr;
return true;
}
/// insertCallSiteStore - Insert a store of the call-site value to the
/// function context
void SjLjEHPrepare::insertCallSiteStore(Instruction *I, int Number) {
IRBuilder<> Builder(I);
// Get a reference to the call_site field.
Type *Int32Ty = Type::getInt32Ty(I->getContext());
Value *Zero = ConstantInt::get(Int32Ty, 0);
Value *One = ConstantInt::get(Int32Ty, 1);
Value *Idxs[2] = { Zero, One };
Value *CallSite =
Builder.CreateGEP(FunctionContextTy, FuncCtx, Idxs, "call_site");
// Insert a store of the call-site number
ConstantInt *CallSiteNoC =
ConstantInt::get(Type::getInt32Ty(I->getContext()), Number);
Builder.CreateStore(CallSiteNoC, CallSite, true /*volatile*/);
}
/// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
/// we reach blocks we've already seen.
static void MarkBlocksLiveIn(BasicBlock *BB,
SmallPtrSetImpl<BasicBlock *> &LiveBBs) {
if (!LiveBBs.insert(BB).second)
return; // already been here.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
MarkBlocksLiveIn(*PI, LiveBBs);
}
/// substituteLPadValues - Substitute the values returned by the landingpad
/// instruction with those returned by the personality function.
void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
Value *SelVal) {
SmallVector<Value *, 8> UseWorkList(LPI->user_begin(), LPI->user_end());
while (!UseWorkList.empty()) {
Value *Val = UseWorkList.pop_back_val();
ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val);
if (!EVI)
continue;
if (EVI->getNumIndices() != 1)
continue;
if (*EVI->idx_begin() == 0)
EVI->replaceAllUsesWith(ExnVal);
else if (*EVI->idx_begin() == 1)
EVI->replaceAllUsesWith(SelVal);
if (EVI->getNumUses() == 0)
EVI->eraseFromParent();
}
if (LPI->getNumUses() == 0)
return;
// There are still some uses of LPI. Construct an aggregate with the exception
// values and replace the LPI with that aggregate.
Type *LPadType = LPI->getType();
Value *LPadVal = UndefValue::get(LPadType);
IRBuilder<> Builder(
std::next(BasicBlock::iterator(cast<Instruction>(SelVal))));
LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
LPI->replaceAllUsesWith(LPadVal);
}
/// setupFunctionContext - Allocate the function context on the stack and fill
/// it with all of the data that we know at this point.
Value *SjLjEHPrepare::setupFunctionContext(Function &F,
ArrayRef<LandingPadInst *> LPads) {
BasicBlock *EntryBB = F.begin();
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
// that needs to be restored on all exits from the function. This is an alloca
// because the value needs to be added to the global context list.
auto &DL = F.getParent()->getDataLayout();
unsigned Align = DL.getPrefTypeAlignment(FunctionContextTy);
FuncCtx = new AllocaInst(FunctionContextTy, nullptr, Align, "fn_context",
EntryBB->begin());
// Fill in the function context structure.
for (unsigned I = 0, E = LPads.size(); I != E; ++I) {
LandingPadInst *LPI = LPads[I];
IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
// Reference the __data field.
Value *FCData =
Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 2, "__data");
// The exception values come back in context->__data[0].
Value *ExceptionAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
0, 0, "exception_gep");
Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val");
ExnVal = Builder.CreateIntToPtr(ExnVal, Builder.getInt8PtrTy());
Value *SelectorAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
0, 1, "exn_selector_gep");
Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
substituteLPadValues(LPI, ExnVal, SelVal);
}
// Personality function
IRBuilder<> Builder(EntryBB->getTerminator());
if (!PersonalityFn)
PersonalityFn = F.getPersonalityFn();
Value *PersonalityFieldPtr = Builder.CreateConstGEP2_32(
FunctionContextTy, FuncCtx, 0, 3, "pers_fn_gep");
Builder.CreateStore(
Builder.CreateBitCast(PersonalityFn, Builder.getInt8PtrTy()),
PersonalityFieldPtr, /*isVolatile=*/true);
// LSDA address
Value *LSDA = Builder.CreateCall(LSDAAddrFn, {}, "lsda_addr");
Value *LSDAFieldPtr =
Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 4, "lsda_gep");
Builder.CreateStore(LSDA, LSDAFieldPtr, /*isVolatile=*/true);
return FuncCtx;
}
/// lowerIncomingArguments - To avoid having to handle incoming arguments
/// specially, we lower each arg to a copy instruction in the entry block. This
/// ensures that the argument value itself cannot be live out of the entry
/// block.
void SjLjEHPrepare::lowerIncomingArguments(Function &F) {
BasicBlock::iterator AfterAllocaInsPt = F.begin()->begin();
while (isa<AllocaInst>(AfterAllocaInsPt) &&
isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsPt)->getArraySize()))
++AfterAllocaInsPt;
for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end(); AI != AE;
++AI) {
Type *Ty = AI->getType();
// Use 'select i8 true, %arg, undef' to simulate a 'no-op' instruction.
Value *TrueValue = ConstantInt::getTrue(F.getContext());
Value *UndefValue = UndefValue::get(Ty);
Instruction *SI = SelectInst::Create(TrueValue, AI, UndefValue,
AI->getName() + ".tmp",
AfterAllocaInsPt);
AI->replaceAllUsesWith(SI);
// Reset the operand, because it was clobbered by the RAUW above.
SI->setOperand(1, AI);
}
}
/// lowerAcrossUnwindEdges - Find all variables which are alive across an unwind
/// edge and spill them.
void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
ArrayRef<InvokeInst *> Invokes) {
// Finally, scan the code looking for instructions with bad live ranges.
for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
for (BasicBlock::iterator II = BB->begin(), IIE = BB->end(); II != IIE;
++II) {
// Ignore obvious cases we don't have to handle. In particular, most
// instructions either have no uses or only have a single use inside the
// current block. Ignore them quickly.
Instruction *Inst = II;
if (Inst->use_empty())
continue;
if (Inst->hasOneUse() &&
cast<Instruction>(Inst->user_back())->getParent() == BB &&
!isa<PHINode>(Inst->user_back()))
continue;
// If this is an alloca in the entry block, it's not a real register
// value.
if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
if (isa<ConstantInt>(AI->getArraySize()) && BB == F.begin())
continue;
// Avoid iterator invalidation by copying users to a temporary vector.
SmallVector<Instruction *, 16> Users;
for (User *U : Inst->users()) {
Instruction *UI = cast<Instruction>(U);
if (UI->getParent() != BB || isa<PHINode>(UI))
Users.push_back(UI);
}
// Find all of the blocks that this value is live in.
SmallPtrSet<BasicBlock *, 64> LiveBBs;
LiveBBs.insert(Inst->getParent());
while (!Users.empty()) {
Instruction *U = Users.back();
Users.pop_back();
if (!isa<PHINode>(U)) {
MarkBlocksLiveIn(U->getParent(), LiveBBs);
} else {
// Uses for a PHI node occur in their predecessor block.
PHINode *PN = cast<PHINode>(U);
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == Inst)
MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
}
}
// Now that we know all of the blocks that this thing is live in, see if
// it includes any of the unwind locations.
bool NeedsSpill = false;
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
DEBUG(dbgs() << "SJLJ Spill: " << *Inst << " around "
<< UnwindBlock->getName() << "\n");
NeedsSpill = true;
break;
}
}
// If we decided we need a spill, do it.
// FIXME: Spilling this way is overkill, as it forces all uses of
// the value to be reloaded from the stack slot, even those that aren't
// in the unwind blocks. We should be more selective.
if (NeedsSpill) {
DemoteRegToStack(*Inst, true);
++NumSpilled;
}
}
}
// Go through the landing pads and remove any PHIs there.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
LandingPadInst *LPI = UnwindBlock->getLandingPadInst();
// Place PHIs into a set to avoid invalidating the iterator.
SmallPtrSet<PHINode *, 8> PHIsToDemote;
for (BasicBlock::iterator PN = UnwindBlock->begin(); isa<PHINode>(PN); ++PN)
PHIsToDemote.insert(cast<PHINode>(PN));
if (PHIsToDemote.empty())
continue;
// Demote the PHIs to the stack.
for (PHINode *PN : PHIsToDemote)
DemotePHIToStack(PN);
// Move the landingpad instruction back to the top of the landing pad block.
LPI->moveBefore(UnwindBlock->begin());
}
}
/// setupEntryBlockAndCallSites - Setup the entry block by creating and filling
/// the function context and marking the call sites with the appropriate
/// values. These values are used by the DWARF EH emitter.
bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
SmallVector<ReturnInst *, 16> Returns;
SmallVector<InvokeInst *, 16> Invokes;
SmallSetVector<LandingPadInst *, 16> LPads;
// Look through the terminators of the basic blocks to find invokes.
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
if (Function *Callee = II->getCalledFunction())
if (Callee->isIntrinsic() &&
Callee->getIntrinsicID() == Intrinsic::donothing) {
// Remove the NOP invoke.
BranchInst::Create(II->getNormalDest(), II);
II->eraseFromParent();
continue;
}
Invokes.push_back(II);
LPads.insert(II->getUnwindDest()->getLandingPadInst());
} else if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
Returns.push_back(RI);
}
if (Invokes.empty())
return false;
NumInvokes += Invokes.size();
lowerIncomingArguments(F);
lowerAcrossUnwindEdges(F, Invokes);
Value *FuncCtx =
setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end()));
BasicBlock *EntryBB = F.begin();
IRBuilder<> Builder(EntryBB->getTerminator());
// Get a reference to the jump buffer.
Value *JBufPtr =
Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 5, "jbuf_gep");
// Save the frame pointer.
Value *FramePtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 0,
"jbuf_fp_gep");
Value *Val = Builder.CreateCall(FrameAddrFn, Builder.getInt32(0), "fp");
Builder.CreateStore(Val, FramePtr, /*isVolatile=*/true);
// Save the stack pointer.
Value *StackPtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 2,
"jbuf_sp_gep");
Val = Builder.CreateCall(StackAddrFn, {}, "sp");
Builder.CreateStore(Val, StackPtr, /*isVolatile=*/true);
// Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
Value *SetjmpArg = Builder.CreateBitCast(JBufPtr, Builder.getInt8PtrTy());
Builder.CreateCall(BuiltinSetjmpFn, SetjmpArg);
// Store a pointer to the function context so that the back-end will know
// where to look for it.
Value *FuncCtxArg = Builder.CreateBitCast(FuncCtx, Builder.getInt8PtrTy());
Builder.CreateCall(FuncCtxFn, FuncCtxArg);
// At this point, we are all set up, update the invoke instructions to mark
// their call_site values.
for (unsigned I = 0, E = Invokes.size(); I != E; ++I) {
insertCallSiteStore(Invokes[I], I + 1);
ConstantInt *CallSiteNum =
ConstantInt::get(Type::getInt32Ty(F.getContext()), I + 1);
// Record the call site value for the back end so it stays associated with
// the invoke.
CallInst::Create(CallSiteFn, CallSiteNum, "", Invokes[I]);
}
// Mark call instructions that aren't nounwind as no-action (call_site ==
// -1). Skip the entry block, as prior to then, no function context has been
// created for this function and any unexpected exceptions thrown will go
// directly to the caller's context, which is what we want anyway, so no need
// to do anything here.
for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;)
for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
if (CallInst *CI = dyn_cast<CallInst>(I)) {
if (!CI->doesNotThrow())
insertCallSiteStore(CI, -1);
} else if (ResumeInst *RI = dyn_cast<ResumeInst>(I)) {
insertCallSiteStore(RI, -1);
}
// Register the function context and make sure it's known to not throw
CallInst *Register =
CallInst::Create(RegisterFn, FuncCtx, "", EntryBB->getTerminator());
Register->setDoesNotThrow();
// Following any allocas not in the entry block, update the saved SP in the
// jmpbuf to the new value.
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (BB == F.begin())
continue;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
if (CallInst *CI = dyn_cast<CallInst>(I)) {
if (CI->getCalledFunction() != StackRestoreFn)
continue;
} else if (!isa<AllocaInst>(I)) {
continue;
}
Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
StackAddr->insertAfter(I);
Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
StoreStackAddr->insertAfter(StackAddr);
}
}
// Finally, for any returns from this function, if this function contains an
// invoke, add a call to unregister the function context.
for (unsigned I = 0, E = Returns.size(); I != E; ++I)
CallInst::Create(UnregisterFn, FuncCtx, "", Returns[I]);
return true;
}
bool SjLjEHPrepare::runOnFunction(Function &F) {
bool Res = setupEntryBlockAndCallSites(F);
return Res;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/SplitKit.h | //===-------- SplitKit.h - Toolkit for splitting live ranges ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the SplitAnalysis class as well as mutator functions for
// live range splitting.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SPLITKIT_H
#define LLVM_LIB_CODEGEN_SPLITKIT_H
#include "LiveRangeCalc.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/SmallPtrSet.h"
namespace llvm {
class ConnectedVNInfoEqClasses;
class LiveInterval;
class LiveIntervals;
class LiveRangeEdit;
class MachineBlockFrequencyInfo;
class MachineInstr;
class MachineLoopInfo;
class MachineRegisterInfo;
class TargetInstrInfo;
class TargetRegisterInfo;
class VirtRegMap;
class VNInfo;
class raw_ostream;
/// SplitAnalysis - Analyze a LiveInterval, looking for live range splitting
/// opportunities.
class LLVM_LIBRARY_VISIBILITY SplitAnalysis {
public:
const MachineFunction &MF;
const VirtRegMap &VRM;
const LiveIntervals &LIS;
const MachineLoopInfo &Loops;
const TargetInstrInfo &TII;
/// Additional information about basic blocks where the current variable is
/// live. Such a block will look like one of these templates:
///
/// 1. | o---x | Internal to block. Variable is only live in this block.
/// 2. |---x | Live-in, kill.
/// 3. | o---| Def, live-out.
/// 4. |---x o---| Live-in, kill, def, live-out. Counted by NumGapBlocks.
/// 5. |---o---o---| Live-through with uses or defs.
/// 6. |-----------| Live-through without uses. Counted by NumThroughBlocks.
///
/// Two BlockInfo entries are created for template 4. One for the live-in
/// segment, and one for the live-out segment. These entries look as if the
/// block were split in the middle where the live range isn't live.
///
/// Live-through blocks without any uses don't get BlockInfo entries. They
/// are simply listed in ThroughBlocks instead.
///
struct BlockInfo {
MachineBasicBlock *MBB;
SlotIndex FirstInstr; ///< First instr accessing current reg.
SlotIndex LastInstr; ///< Last instr accessing current reg.
SlotIndex FirstDef; ///< First non-phi valno->def, or SlotIndex().
bool LiveIn; ///< Current reg is live in.
bool LiveOut; ///< Current reg is live out.
/// isOneInstr - Returns true when this BlockInfo describes a single
/// instruction.
bool isOneInstr() const {
return SlotIndex::isSameInstr(FirstInstr, LastInstr);
}
};
private:
// Current live interval.
const LiveInterval *CurLI;
// Sorted slot indexes of using instructions.
SmallVector<SlotIndex, 8> UseSlots;
/// LastSplitPoint - Last legal split point in each basic block in the current
/// function. The first entry is the first terminator, the second entry is the
/// last valid split point for a variable that is live in to a landing pad
/// successor.
SmallVector<std::pair<SlotIndex, SlotIndex>, 8> LastSplitPoint;
/// UseBlocks - Blocks where CurLI has uses.
SmallVector<BlockInfo, 8> UseBlocks;
/// NumGapBlocks - Number of duplicate entries in UseBlocks for blocks where
/// the live range has a gap.
unsigned NumGapBlocks;
/// ThroughBlocks - Block numbers where CurLI is live through without uses.
BitVector ThroughBlocks;
/// NumThroughBlocks - Number of live-through blocks.
unsigned NumThroughBlocks;
/// DidRepairRange - analyze was forced to shrinkToUses().
bool DidRepairRange;
SlotIndex computeLastSplitPoint(unsigned Num);
// Sumarize statistics by counting instructions using CurLI.
void analyzeUses();
/// calcLiveBlockInfo - Compute per-block information about CurLI.
bool calcLiveBlockInfo();
public:
SplitAnalysis(const VirtRegMap &vrm, const LiveIntervals &lis,
const MachineLoopInfo &mli);
/// analyze - set CurLI to the specified interval, and analyze how it may be
/// split.
void analyze(const LiveInterval *li);
/// didRepairRange() - Returns true if CurLI was invalid and has been repaired
/// by analyze(). This really shouldn't happen, but sometimes the coalescer
/// can create live ranges that end in mid-air.
bool didRepairRange() const { return DidRepairRange; }
/// clear - clear all data structures so SplitAnalysis is ready to analyze a
/// new interval.
void clear();
/// getParent - Return the last analyzed interval.
const LiveInterval &getParent() const { return *CurLI; }
/// getLastSplitPoint - Return the base index of the last valid split point
/// in the basic block numbered Num.
SlotIndex getLastSplitPoint(unsigned Num) {
// Inline the common simple case.
if (LastSplitPoint[Num].first.isValid() &&
!LastSplitPoint[Num].second.isValid())
return LastSplitPoint[Num].first;
return computeLastSplitPoint(Num);
}
/// getLastSplitPointIter - Returns the last split point as an iterator.
MachineBasicBlock::iterator getLastSplitPointIter(MachineBasicBlock*);
/// isOriginalEndpoint - Return true if the original live range was killed or
/// (re-)defined at Idx. Idx should be the 'def' slot for a normal kill/def,
/// and 'use' for an early-clobber def.
/// This can be used to recognize code inserted by earlier live range
/// splitting.
bool isOriginalEndpoint(SlotIndex Idx) const;
/// getUseSlots - Return an array of SlotIndexes of instructions using CurLI.
/// This include both use and def operands, at most one entry per instruction.
ArrayRef<SlotIndex> getUseSlots() const { return UseSlots; }
/// getUseBlocks - Return an array of BlockInfo objects for the basic blocks
/// where CurLI has uses.
ArrayRef<BlockInfo> getUseBlocks() const { return UseBlocks; }
/// getNumThroughBlocks - Return the number of through blocks.
unsigned getNumThroughBlocks() const { return NumThroughBlocks; }
/// isThroughBlock - Return true if CurLI is live through MBB without uses.
bool isThroughBlock(unsigned MBB) const { return ThroughBlocks.test(MBB); }
/// getThroughBlocks - Return the set of through blocks.
const BitVector &getThroughBlocks() const { return ThroughBlocks; }
/// getNumLiveBlocks - Return the number of blocks where CurLI is live.
unsigned getNumLiveBlocks() const {
return getUseBlocks().size() - NumGapBlocks + getNumThroughBlocks();
}
/// countLiveBlocks - Return the number of blocks where li is live. This is
/// guaranteed to return the same number as getNumLiveBlocks() after calling
/// analyze(li).
unsigned countLiveBlocks(const LiveInterval *li) const;
typedef SmallPtrSet<const MachineBasicBlock*, 16> BlockPtrSet;
/// shouldSplitSingleBlock - Returns true if it would help to create a local
/// live range for the instructions in BI. There is normally no benefit to
/// creating a live range for a single instruction, but it does enable
/// register class inflation if the instruction has a restricted register
/// class.
///
/// @param BI The block to be isolated.
/// @param SingleInstrs True when single instructions should be isolated.
bool shouldSplitSingleBlock(const BlockInfo &BI, bool SingleInstrs) const;
};
/// SplitEditor - Edit machine code and LiveIntervals for live range
/// splitting.
///
/// - Create a SplitEditor from a SplitAnalysis.
/// - Start a new live interval with openIntv.
/// - Mark the places where the new interval is entered using enterIntv*
/// - Mark the ranges where the new interval is used with useIntv*
/// - Mark the places where the interval is exited with exitIntv*.
/// - Finish the current interval with closeIntv and repeat from 2.
/// - Rewrite instructions with finish().
///
class LLVM_LIBRARY_VISIBILITY SplitEditor {
SplitAnalysis &SA;
LiveIntervals &LIS;
VirtRegMap &VRM;
MachineRegisterInfo &MRI;
MachineDominatorTree &MDT;
const TargetInstrInfo &TII;
const TargetRegisterInfo &TRI;
const MachineBlockFrequencyInfo &MBFI;
public:
/// ComplementSpillMode - Select how the complement live range should be
/// created. SplitEditor automatically creates interval 0 to contain
/// anything that isn't added to another interval. This complement interval
/// can get quite complicated, and it can sometimes be an advantage to allow
/// it to overlap the other intervals. If it is going to spill anyway, no
/// registers are wasted by keeping a value in two places at the same time.
enum ComplementSpillMode {
/// SM_Partition(Default) - Try to create the complement interval so it
/// doesn't overlap any other intervals, and the original interval is
/// partitioned. This may require a large number of back copies and extra
/// PHI-defs. Only segments marked with overlapIntv will be overlapping.
SM_Partition,
/// SM_Size - Overlap intervals to minimize the number of inserted COPY
/// instructions. Copies to the complement interval are hoisted to their
/// common dominator, so only one COPY is required per value in the
/// complement interval. This also means that no extra PHI-defs need to be
/// inserted in the complement interval.
SM_Size,
/// SM_Speed - Overlap intervals to minimize the expected execution
/// frequency of the inserted copies. This is very similar to SM_Size, but
/// the complement interval may get some extra PHI-defs.
SM_Speed
};
private:
/// Edit - The current parent register and new intervals created.
LiveRangeEdit *Edit;
/// Index into Edit of the currently open interval.
/// The index 0 is used for the complement, so the first interval started by
/// openIntv will be 1.
unsigned OpenIdx;
/// The current spill mode, selected by reset().
ComplementSpillMode SpillMode;
typedef IntervalMap<SlotIndex, unsigned> RegAssignMap;
/// Allocator for the interval map. This will eventually be shared with
/// SlotIndexes and LiveIntervals.
RegAssignMap::Allocator Allocator;
/// RegAssign - Map of the assigned register indexes.
/// Edit.get(RegAssign.lookup(Idx)) is the register that should be live at
/// Idx.
RegAssignMap RegAssign;
typedef PointerIntPair<VNInfo*, 1> ValueForcePair;
typedef DenseMap<std::pair<unsigned, unsigned>, ValueForcePair> ValueMap;
/// Values - keep track of the mapping from parent values to values in the new
/// intervals. Given a pair (RegIdx, ParentVNI->id), Values contains:
///
/// 1. No entry - the value is not mapped to Edit.get(RegIdx).
/// 2. (Null, false) - the value is mapped to multiple values in
/// Edit.get(RegIdx). Each value is represented by a minimal live range at
/// its def. The full live range can be inferred exactly from the range
/// of RegIdx in RegAssign.
/// 3. (Null, true). As above, but the ranges in RegAssign are too large, and
/// the live range must be recomputed using LiveRangeCalc::extend().
/// 4. (VNI, false) The value is mapped to a single new value.
/// The new value has no live ranges anywhere.
ValueMap Values;
/// LRCalc - Cache for computing live ranges and SSA update. Each instance
/// can only handle non-overlapping live ranges, so use a separate
/// LiveRangeCalc instance for the complement interval when in spill mode.
LiveRangeCalc LRCalc[2];
/// getLRCalc - Return the LRCalc to use for RegIdx. In spill mode, the
/// complement interval can overlap the other intervals, so it gets its own
/// LRCalc instance. When not in spill mode, all intervals can share one.
LiveRangeCalc &getLRCalc(unsigned RegIdx) {
return LRCalc[SpillMode != SM_Partition && RegIdx != 0];
}
/// defValue - define a value in RegIdx from ParentVNI at Idx.
/// Idx does not have to be ParentVNI->def, but it must be contained within
/// ParentVNI's live range in ParentLI. The new value is added to the value
/// map.
/// Return the new LI value.
VNInfo *defValue(unsigned RegIdx, const VNInfo *ParentVNI, SlotIndex Idx);
/// forceRecompute - Force the live range of ParentVNI in RegIdx to be
/// recomputed by LiveRangeCalc::extend regardless of the number of defs.
/// This is used for values whose live range doesn't match RegAssign exactly.
/// They could have rematerialized, or back-copies may have been moved.
void forceRecompute(unsigned RegIdx, const VNInfo *ParentVNI);
/// defFromParent - Define Reg from ParentVNI at UseIdx using either
/// rematerialization or a COPY from parent. Return the new value.
VNInfo *defFromParent(unsigned RegIdx,
VNInfo *ParentVNI,
SlotIndex UseIdx,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I);
/// removeBackCopies - Remove the copy instructions that defines the values
/// in the vector in the complement interval.
void removeBackCopies(SmallVectorImpl<VNInfo*> &Copies);
/// getShallowDominator - Returns the least busy dominator of MBB that is
/// also dominated by DefMBB. Busy is measured by loop depth.
MachineBasicBlock *findShallowDominator(MachineBasicBlock *MBB,
MachineBasicBlock *DefMBB);
/// hoistCopiesForSize - Hoist back-copies to the complement interval in a
/// way that minimizes code size. This implements the SM_Size spill mode.
void hoistCopiesForSize();
/// transferValues - Transfer values to the new ranges.
/// Return true if any ranges were skipped.
bool transferValues();
/// extendPHIKillRanges - Extend the ranges of all values killed by original
/// parent PHIDefs.
void extendPHIKillRanges();
/// rewriteAssigned - Rewrite all uses of Edit.getReg() to assigned registers.
void rewriteAssigned(bool ExtendRanges);
/// deleteRematVictims - Delete defs that are dead after rematerializing.
void deleteRematVictims();
public:
/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
/// Newly created intervals will be appended to newIntervals.
SplitEditor(SplitAnalysis &SA, LiveIntervals&, VirtRegMap&,
MachineDominatorTree&, MachineBlockFrequencyInfo &);
/// reset - Prepare for a new split.
void reset(LiveRangeEdit&, ComplementSpillMode = SM_Partition);
/// Create a new virtual register and live interval.
/// Return the interval index, starting from 1. Interval index 0 is the
/// implicit complement interval.
unsigned openIntv();
/// currentIntv - Return the current interval index.
unsigned currentIntv() const { return OpenIdx; }
/// selectIntv - Select a previously opened interval index.
void selectIntv(unsigned Idx);
/// enterIntvBefore - Enter the open interval before the instruction at Idx.
/// If the parent interval is not live before Idx, a COPY is not inserted.
/// Return the beginning of the new live range.
SlotIndex enterIntvBefore(SlotIndex Idx);
/// enterIntvAfter - Enter the open interval after the instruction at Idx.
/// Return the beginning of the new live range.
SlotIndex enterIntvAfter(SlotIndex Idx);
/// enterIntvAtEnd - Enter the open interval at the end of MBB.
/// Use the open interval from the inserted copy to the MBB end.
/// Return the beginning of the new live range.
SlotIndex enterIntvAtEnd(MachineBasicBlock &MBB);
/// useIntv - indicate that all instructions in MBB should use OpenLI.
void useIntv(const MachineBasicBlock &MBB);
/// useIntv - indicate that all instructions in range should use OpenLI.
void useIntv(SlotIndex Start, SlotIndex End);
/// leaveIntvAfter - Leave the open interval after the instruction at Idx.
/// Return the end of the live range.
SlotIndex leaveIntvAfter(SlotIndex Idx);
/// leaveIntvBefore - Leave the open interval before the instruction at Idx.
/// Return the end of the live range.
SlotIndex leaveIntvBefore(SlotIndex Idx);
/// leaveIntvAtTop - Leave the interval at the top of MBB.
/// Add liveness from the MBB top to the copy.
/// Return the end of the live range.
SlotIndex leaveIntvAtTop(MachineBasicBlock &MBB);
/// overlapIntv - Indicate that all instructions in range should use the open
/// interval, but also let the complement interval be live.
///
/// This doubles the register pressure, but is sometimes required to deal with
/// register uses after the last valid split point.
///
/// The Start index should be a return value from a leaveIntv* call, and End
/// should be in the same basic block. The parent interval must have the same
/// value across the range.
///
void overlapIntv(SlotIndex Start, SlotIndex End);
/// finish - after all the new live ranges have been created, compute the
/// remaining live range, and rewrite instructions to use the new registers.
/// @param LRMap When not null, this vector will map each live range in Edit
/// back to the indices returned by openIntv.
/// There may be extra indices created by dead code elimination.
void finish(SmallVectorImpl<unsigned> *LRMap = nullptr);
/// dump - print the current interval mapping to dbgs().
void dump() const;
// ===--- High level methods ---===
/// splitSingleBlock - Split CurLI into a separate live interval around the
/// uses in a single block. This is intended to be used as part of a larger
/// split, and doesn't call finish().
void splitSingleBlock(const SplitAnalysis::BlockInfo &BI);
/// splitLiveThroughBlock - Split CurLI in the given block such that it
/// enters the block in IntvIn and leaves it in IntvOut. There may be uses in
/// the block, but they will be ignored when placing split points.
///
/// @param MBBNum Block number.
/// @param IntvIn Interval index entering the block.
/// @param LeaveBefore When set, leave IntvIn before this point.
/// @param IntvOut Interval index leaving the block.
/// @param EnterAfter When set, enter IntvOut after this point.
void splitLiveThroughBlock(unsigned MBBNum,
unsigned IntvIn, SlotIndex LeaveBefore,
unsigned IntvOut, SlotIndex EnterAfter);
/// splitRegInBlock - Split CurLI in the given block such that it enters the
/// block in IntvIn and leaves it on the stack (or not at all). Split points
/// are placed in a way that avoids putting uses in the stack interval. This
/// may require creating a local interval when there is interference.
///
/// @param BI Block descriptor.
/// @param IntvIn Interval index entering the block. Not 0.
/// @param LeaveBefore When set, leave IntvIn before this point.
void splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
unsigned IntvIn, SlotIndex LeaveBefore);
/// splitRegOutBlock - Split CurLI in the given block such that it enters the
/// block on the stack (or isn't live-in at all) and leaves it in IntvOut.
/// Split points are placed to avoid interference and such that the uses are
/// not in the stack interval. This may require creating a local interval
/// when there is interference.
///
/// @param BI Block descriptor.
/// @param IntvOut Interval index leaving the block.
/// @param EnterAfter When set, enter IntvOut after this point.
void splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
unsigned IntvOut, SlotIndex EnterAfter);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ScheduleDAGPrinter.cpp | //===-- ScheduleDAGPrinter.cpp - Implement ScheduleDAG::viewGraph() -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the ScheduleDAG::viewGraph method.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <fstream>
using namespace llvm;
namespace llvm {
template<>
struct DOTGraphTraits<ScheduleDAG*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const ScheduleDAG *G) {
return G->MF.getName();
}
static bool renderGraphFromBottomUp() {
return true;
}
static bool isNodeHidden(const SUnit *Node) {
return (Node->NumPreds > 10 || Node->NumSuccs > 10);
}
static bool hasNodeAddressLabel(const SUnit *Node,
const ScheduleDAG *Graph) {
return true;
}
/// If you want to override the dot attributes printed for a particular
/// edge, override this method.
static std::string getEdgeAttributes(const SUnit *Node,
SUnitIterator EI,
const ScheduleDAG *Graph) {
if (EI.isArtificialDep())
return "color=cyan,style=dashed";
if (EI.isCtrlDep())
return "color=blue,style=dashed";
return "";
}
std::string getNodeLabel(const SUnit *Node, const ScheduleDAG *Graph);
static std::string getNodeAttributes(const SUnit *N,
const ScheduleDAG *Graph) {
return "shape=Mrecord";
}
static void addCustomGraphFeatures(ScheduleDAG *G,
GraphWriter<ScheduleDAG*> &GW) {
return G->addCustomGraphFeatures(GW);
}
};
}
std::string DOTGraphTraits<ScheduleDAG*>::getNodeLabel(const SUnit *SU,
const ScheduleDAG *G) {
return G->getGraphNodeLabel(SU);
}
/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
/// rendered using 'dot'.
///
void ScheduleDAG::viewGraph(const Twine &Name, const Twine &Title) {
// This code is only for debugging!
#ifndef NDEBUG
ViewGraph(this, Name, false, Title);
#else
errs() << "ScheduleDAG::viewGraph is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
/// Out-of-line implementation with no arguments is handy for gdb.
void ScheduleDAG::viewGraph() {
viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/PeepholeOptimizer.cpp | //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Perform peephole optimizations on the machine code:
//
// - Optimize Extensions
//
// Optimization of sign / zero extension instructions. It may be extended to
// handle other instructions with similar properties.
//
// On some targets, some instructions, e.g. X86 sign / zero extension, may
// leave the source value in the lower part of the result. This optimization
// will replace some uses of the pre-extension value with uses of the
// sub-register of the results.
//
// - Optimize Comparisons
//
// Optimization of comparison instructions. For instance, in this code:
//
// sub r1, 1
// cmp r1, 0
// bz L1
//
// If the "sub" instruction all ready sets (or could be modified to set) the
// same flag that the "cmp" instruction sets and that "bz" uses, then we can
// eliminate the "cmp" instruction.
//
// Another instance, in this code:
//
// sub r1, r3 | sub r1, imm
// cmp r3, r1 or cmp r1, r3 | cmp r1, imm
// bge L1
//
// If the branch instruction can use flag from "sub", then we can replace
// "sub" with "subs" and eliminate the "cmp" instruction.
//
// - Optimize Loads:
//
// Loads that can be folded into a later instruction. A load is foldable
// if it loads to virtual registers and the virtual register defined has
// a single use.
//
// - Optimize Copies and Bitcast (more generally, target specific copies):
//
// Rewrite copies and bitcasts to avoid cross register bank copies
// when possible.
// E.g., Consider the following example, where capital and lower
// letters denote different register file:
// b = copy A <-- cross-bank copy
// C = copy b <-- cross-bank copy
// =>
// b = copy A <-- cross-bank copy
// C = copy A <-- same-bank copy
//
// E.g., for bitcast:
// b = bitcast A <-- cross-bank copy
// C = bitcast b <-- cross-bank copy
// =>
// b = bitcast A <-- cross-bank copy
// C = copy A <-- same-bank copy
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "peephole-opt"
// Optimize Extensions
static cl::opt<bool>
Aggressive("aggressive-ext-opt", cl::Hidden,
cl::desc("Aggressive extension optimization"));
static cl::opt<bool>
DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
cl::desc("Disable the peephole optimizer"));
static cl::opt<bool>
DisableAdvCopyOpt("disable-adv-copy-opt", cl::Hidden, cl::init(false),
cl::desc("Disable advanced copy optimization"));
STATISTIC(NumReuse, "Number of extension results reused");
STATISTIC(NumCmps, "Number of compares eliminated");
STATISTIC(NumImmFold, "Number of move immediate folded");
STATISTIC(NumLoadFold, "Number of loads folded");
STATISTIC(NumSelects, "Number of selects optimized");
STATISTIC(NumUncoalescableCopies, "Number of uncoalescable copies optimized");
STATISTIC(NumRewrittenCopies, "Number of copies rewritten");
namespace {
class PeepholeOptimizer : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
MachineDominatorTree *DT; // Machine dominator tree
public:
static char ID; // Pass identification
PeepholeOptimizer() : MachineFunctionPass(ID) {
initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
if (Aggressive) {
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
}
}
private:
bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSetImpl<MachineInstr*> &LocalMIs);
bool optimizeSelect(MachineInstr *MI,
SmallPtrSetImpl<MachineInstr *> &LocalMIs);
bool optimizeCondBranch(MachineInstr *MI);
bool optimizeCopyOrBitcast(MachineInstr *MI);
bool optimizeCoalescableCopy(MachineInstr *MI);
bool optimizeUncoalescableCopy(MachineInstr *MI,
SmallPtrSetImpl<MachineInstr *> &LocalMIs);
bool findNextSource(unsigned &Reg, unsigned &SubReg);
bool isMoveImmediate(MachineInstr *MI,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
bool isLoadFoldable(MachineInstr *MI,
SmallSet<unsigned, 16> &FoldAsLoadDefCandidates);
/// \brief Check whether \p MI is understood by the register coalescer
/// but may require some rewriting.
bool isCoalescableCopy(const MachineInstr &MI) {
// SubregToRegs are not interesting, because they are already register
// coalescer friendly.
return MI.isCopy() || (!DisableAdvCopyOpt &&
(MI.isRegSequence() || MI.isInsertSubreg() ||
MI.isExtractSubreg()));
}
/// \brief Check whether \p MI is a copy like instruction that is
/// not recognized by the register coalescer.
bool isUncoalescableCopy(const MachineInstr &MI) {
return MI.isBitcast() ||
(!DisableAdvCopyOpt &&
(MI.isRegSequenceLike() || MI.isInsertSubregLike() ||
MI.isExtractSubregLike()));
}
};
/// \brief Helper class to track the possible sources of a value defined by
/// a (chain of) copy related instructions.
/// Given a definition (instruction and definition index), this class
/// follows the use-def chain to find successive suitable sources.
/// The given source can be used to rewrite the definition into
/// def = COPY src.
///
/// For instance, let us consider the following snippet:
/// v0 =
/// v2 = INSERT_SUBREG v1, v0, sub0
/// def = COPY v2.sub0
///
/// Using a ValueTracker for def = COPY v2.sub0 will give the following
/// suitable sources:
/// v2.sub0 and v0.
/// Then, def can be rewritten into def = COPY v0.
class ValueTracker {
private:
/// The current point into the use-def chain.
const MachineInstr *Def;
/// The index of the definition in Def.
unsigned DefIdx;
/// The sub register index of the definition.
unsigned DefSubReg;
/// The register where the value can be found.
unsigned Reg;
/// Specifiy whether or not the value tracking looks through
/// complex instructions. When this is false, the value tracker
/// bails on everything that is not a copy or a bitcast.
///
/// Note: This could have been implemented as a specialized version of
/// the ValueTracker class but that would have complicated the code of
/// the users of this class.
bool UseAdvancedTracking;
/// MachineRegisterInfo used to perform tracking.
const MachineRegisterInfo &MRI;
/// Optional TargetInstrInfo used to perform some complex
/// tracking.
const TargetInstrInfo *TII;
/// \brief Dispatcher to the right underlying implementation of
/// getNextSource.
bool getNextSourceImpl(unsigned &SrcReg, unsigned &SrcSubReg);
/// \brief Specialized version of getNextSource for Copy instructions.
bool getNextSourceFromCopy(unsigned &SrcReg, unsigned &SrcSubReg);
/// \brief Specialized version of getNextSource for Bitcast instructions.
bool getNextSourceFromBitcast(unsigned &SrcReg, unsigned &SrcSubReg);
/// \brief Specialized version of getNextSource for RegSequence
/// instructions.
bool getNextSourceFromRegSequence(unsigned &SrcReg, unsigned &SrcSubReg);
/// \brief Specialized version of getNextSource for InsertSubreg
/// instructions.
bool getNextSourceFromInsertSubreg(unsigned &SrcReg, unsigned &SrcSubReg);
/// \brief Specialized version of getNextSource for ExtractSubreg
/// instructions.
bool getNextSourceFromExtractSubreg(unsigned &SrcReg, unsigned &SrcSubReg);
/// \brief Specialized version of getNextSource for SubregToReg
/// instructions.
bool getNextSourceFromSubregToReg(unsigned &SrcReg, unsigned &SrcSubReg);
public:
/// \brief Create a ValueTracker instance for the value defined by \p Reg.
/// \p DefSubReg represents the sub register index the value tracker will
/// track. It does not need to match the sub register index used in the
/// definition of \p Reg.
/// \p UseAdvancedTracking specifies whether or not the value tracker looks
/// through complex instructions. By default (false), it handles only copy
/// and bitcast instructions.
/// If \p Reg is a physical register, a value tracker constructed with
/// this constructor will not find any alternative source.
/// Indeed, when \p Reg is a physical register that constructor does not
/// know which definition of \p Reg it should track.
/// Use the next constructor to track a physical register.
ValueTracker(unsigned Reg, unsigned DefSubReg,
const MachineRegisterInfo &MRI,
bool UseAdvancedTracking = false,
const TargetInstrInfo *TII = nullptr)
: Def(nullptr), DefIdx(0), DefSubReg(DefSubReg), Reg(Reg),
UseAdvancedTracking(UseAdvancedTracking), MRI(MRI), TII(TII) {
if (!TargetRegisterInfo::isPhysicalRegister(Reg)) {
Def = MRI.getVRegDef(Reg);
DefIdx = MRI.def_begin(Reg).getOperandNo();
}
}
/// \brief Create a ValueTracker instance for the value defined by
/// the pair \p MI, \p DefIdx.
/// Unlike the other constructor, the value tracker produced by this one
/// may be able to find a new source when the definition is a physical
/// register.
/// This could be useful to rewrite target specific instructions into
/// generic copy instructions.
ValueTracker(const MachineInstr &MI, unsigned DefIdx, unsigned DefSubReg,
const MachineRegisterInfo &MRI,
bool UseAdvancedTracking = false,
const TargetInstrInfo *TII = nullptr)
: Def(&MI), DefIdx(DefIdx), DefSubReg(DefSubReg),
UseAdvancedTracking(UseAdvancedTracking), MRI(MRI), TII(TII) {
assert(DefIdx < Def->getDesc().getNumDefs() &&
Def->getOperand(DefIdx).isReg() && "Invalid definition");
Reg = Def->getOperand(DefIdx).getReg();
}
/// \brief Following the use-def chain, get the next available source
/// for the tracked value.
/// When the returned value is not nullptr, \p SrcReg gives the register
/// that contain the tracked value.
/// \note The sub register index returned in \p SrcSubReg must be used
/// on \p SrcReg to access the actual value.
/// \return Unless the returned value is nullptr (i.e., no source found),
/// \p SrcReg gives the register of the next source used in the returned
/// instruction and \p SrcSubReg the sub-register index to be used on that
/// source to get the tracked value. When nullptr is returned, no
/// alternative source has been found.
const MachineInstr *getNextSource(unsigned &SrcReg, unsigned &SrcSubReg);
/// \brief Get the last register where the initial value can be found.
/// Initially this is the register of the definition.
/// Then, after each successful call to getNextSource, this is the
/// register of the last source.
unsigned getReg() const { return Reg; }
};
}
char PeepholeOptimizer::ID = 0;
char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
"Peephole Optimizations", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
"Peephole Optimizations", false, false)
/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
/// result.
///
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
bool PeepholeOptimizer::
optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSetImpl<MachineInstr*> &LocalMIs) {
unsigned SrcReg, DstReg, SubIdx;
if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
return false;
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
if (MRI->hasOneNonDBGUse(SrcReg))
// No other uses.
return false;
// Ensure DstReg can get a register class that actually supports
// sub-registers. Don't change the class until we commit.
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
DstRC = TRI->getSubClassWithSubReg(DstRC, SubIdx);
if (!DstRC)
return false;
// The ext instr may be operating on a sub-register of SrcReg as well.
// PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
// register.
// If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
// SrcReg:SubIdx should be replaced.
bool UseSrcSubIdx =
TRI->getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != nullptr;
// The source has other uses. See if we can replace the other uses with use of
// the result of the extension.
SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
ReachedBBs.insert(UI.getParent());
// Uses that are in the same BB of uses of the result of the instruction.
SmallVector<MachineOperand*, 8> Uses;
// Uses that the result of the instruction can reach.
SmallVector<MachineOperand*, 8> ExtendedUses;
bool ExtendLife = true;
for (MachineOperand &UseMO : MRI->use_nodbg_operands(SrcReg)) {
MachineInstr *UseMI = UseMO.getParent();
if (UseMI == MI)
continue;
if (UseMI->isPHI()) {
ExtendLife = false;
continue;
}
// Only accept uses of SrcReg:SubIdx.
if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
continue;
// It's an error to translate this:
//
// %reg1025 = <sext> %reg1024
// ...
// %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
//
// into this:
//
// %reg1025 = <sext> %reg1024
// ...
// %reg1027 = COPY %reg1025:4
// %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
//
// The problem here is that SUBREG_TO_REG is there to assert that an
// implicit zext occurs. It doesn't insert a zext instruction. If we allow
// the COPY here, it will give us the value after the <sext>, not the
// original value of %reg1024 before <sext>.
if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
continue;
MachineBasicBlock *UseMBB = UseMI->getParent();
if (UseMBB == MBB) {
// Local uses that come after the extension.
if (!LocalMIs.count(UseMI))
Uses.push_back(&UseMO);
} else if (ReachedBBs.count(UseMBB)) {
// Non-local uses where the result of the extension is used. Always
// replace these unless it's a PHI.
Uses.push_back(&UseMO);
} else if (Aggressive && DT->dominates(MBB, UseMBB)) {
// We may want to extend the live range of the extension result in order
// to replace these uses.
ExtendedUses.push_back(&UseMO);
} else {
// Both will be live out of the def MBB anyway. Don't extend live range of
// the extension result.
ExtendLife = false;
break;
}
}
if (ExtendLife && !ExtendedUses.empty())
// Extend the liveness of the extension result.
Uses.append(ExtendedUses.begin(), ExtendedUses.end());
// Now replace all uses.
bool Changed = false;
if (!Uses.empty()) {
SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
// Look for PHI uses of the extended result, we don't want to extend the
// liveness of a PHI input. It breaks all kinds of assumptions down
// stream. A PHI use is expected to be the kill of its source values.
for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
if (UI.isPHI())
PHIBBs.insert(UI.getParent());
const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
MachineOperand *UseMO = Uses[i];
MachineInstr *UseMI = UseMO->getParent();
MachineBasicBlock *UseMBB = UseMI->getParent();
if (PHIBBs.count(UseMBB))
continue;
// About to add uses of DstReg, clear DstReg's kill flags.
if (!Changed) {
MRI->clearKillFlags(DstReg);
MRI->constrainRegClass(DstReg, DstRC);
}
unsigned NewVR = MRI->createVirtualRegister(RC);
MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
TII->get(TargetOpcode::COPY), NewVR)
.addReg(DstReg, 0, SubIdx);
// SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
if (UseSrcSubIdx) {
Copy->getOperand(0).setSubReg(SubIdx);
Copy->getOperand(0).setIsUndef();
}
UseMO->setReg(NewVR);
++NumReuse;
Changed = true;
}
}
return Changed;
}
/// optimizeCmpInstr - If the instruction is a compare and the previous
/// instruction it's comparing against all ready sets (or could be modified to
/// set) the same flag as the compare, then we can remove the comparison and use
/// the flag from the previous instruction.
bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
MachineBasicBlock *MBB) {
// If this instruction is a comparison against zero and isn't comparing a
// physical register, we can try to optimize it.
unsigned SrcReg, SrcReg2;
int CmpMask, CmpValue;
if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
(SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
return false;
// Attempt to optimize the comparison instruction.
if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
++NumCmps;
return true;
}
return false;
}
/// Optimize a select instruction.
bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI,
SmallPtrSetImpl<MachineInstr *> &LocalMIs) {
unsigned TrueOp = 0;
unsigned FalseOp = 0;
bool Optimizable = false;
SmallVector<MachineOperand, 4> Cond;
if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable))
return false;
if (!Optimizable)
return false;
if (!TII->optimizeSelect(MI, LocalMIs))
return false;
MI->eraseFromParent();
++NumSelects;
return true;
}
/// \brief Check if a simpler conditional branch can be
// generated
bool PeepholeOptimizer::optimizeCondBranch(MachineInstr *MI) {
return TII->optimizeCondBranch(MI);
}
/// \brief Check if the registers defined by the pair (RegisterClass, SubReg)
/// share the same register file.
static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
const TargetRegisterClass *DefRC,
unsigned DefSubReg,
const TargetRegisterClass *SrcRC,
unsigned SrcSubReg) {
// Same register class.
if (DefRC == SrcRC)
return true;
// Both operands are sub registers. Check if they share a register class.
unsigned SrcIdx, DefIdx;
if (SrcSubReg && DefSubReg)
return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
SrcIdx, DefIdx) != nullptr;
// At most one of the register is a sub register, make it Src to avoid
// duplicating the test.
if (!SrcSubReg) {
std::swap(DefSubReg, SrcSubReg);
std::swap(DefRC, SrcRC);
}
// One of the register is a sub register, check if we can get a superclass.
if (SrcSubReg)
return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;
// Plain copy.
return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
}
/// \brief Try to find the next source that share the same register file
/// for the value defined by \p Reg and \p SubReg.
/// When true is returned, \p Reg and \p SubReg are updated with the
/// register number and sub-register index of the new source.
/// \return False if no alternative sources are available. True otherwise.
bool PeepholeOptimizer::findNextSource(unsigned &Reg, unsigned &SubReg) {
// Do not try to find a new source for a physical register.
// So far we do not have any motivating example for doing that.
// Thus, instead of maintaining untested code, we will revisit that if
// that changes at some point.
if (TargetRegisterInfo::isPhysicalRegister(Reg))
return false;
const TargetRegisterClass *DefRC = MRI->getRegClass(Reg);
unsigned DefSubReg = SubReg;
unsigned Src;
unsigned SrcSubReg;
bool ShouldRewrite = false;
// Follow the chain of copies until we reach the top of the use-def chain
// or find a more suitable source.
ValueTracker ValTracker(Reg, DefSubReg, *MRI, !DisableAdvCopyOpt, TII);
do {
unsigned CopySrcReg, CopySrcSubReg;
if (!ValTracker.getNextSource(CopySrcReg, CopySrcSubReg))
break;
Src = CopySrcReg;
SrcSubReg = CopySrcSubReg;
// Do not extend the live-ranges of physical registers as they add
// constraints to the register allocator.
// Moreover, if we want to extend the live-range of a physical register,
// unlike SSA virtual register, we will have to check that they are not
// redefine before the related use.
if (TargetRegisterInfo::isPhysicalRegister(Src))
break;
const TargetRegisterClass *SrcRC = MRI->getRegClass(Src);
// If this source does not incur a cross register bank copy, use it.
ShouldRewrite = shareSameRegisterFile(*TRI, DefRC, DefSubReg, SrcRC,
SrcSubReg);
} while (!ShouldRewrite);
// If we did not find a more suitable source, there is nothing to optimize.
if (!ShouldRewrite || Src == Reg)
return false;
Reg = Src;
SubReg = SrcSubReg;
return true;
}
namespace {
/// \brief Helper class to rewrite the arguments of a copy-like instruction.
class CopyRewriter {
protected:
/// The copy-like instruction.
MachineInstr &CopyLike;
/// The index of the source being rewritten.
unsigned CurrentSrcIdx;
public:
CopyRewriter(MachineInstr &MI) : CopyLike(MI), CurrentSrcIdx(0) {}
virtual ~CopyRewriter() {}
/// \brief Get the next rewritable source (SrcReg, SrcSubReg) and
/// the related value that it affects (TrackReg, TrackSubReg).
/// A source is considered rewritable if its register class and the
/// register class of the related TrackReg may not be register
/// coalescer friendly. In other words, given a copy-like instruction
/// not all the arguments may be returned at rewritable source, since
/// some arguments are none to be register coalescer friendly.
///
/// Each call of this method moves the current source to the next
/// rewritable source.
/// For instance, let CopyLike be the instruction to rewrite.
/// CopyLike has one definition and one source:
/// dst.dstSubIdx = CopyLike src.srcSubIdx.
///
/// The first call will give the first rewritable source, i.e.,
/// the only source this instruction has:
/// (SrcReg, SrcSubReg) = (src, srcSubIdx).
/// This source defines the whole definition, i.e.,
/// (TrackReg, TrackSubReg) = (dst, dstSubIdx).
///
/// The second and subsequent calls will return false, has there is only one
/// rewritable source.
///
/// \return True if a rewritable source has been found, false otherwise.
/// The output arguments are valid if and only if true is returned.
virtual bool getNextRewritableSource(unsigned &SrcReg, unsigned &SrcSubReg,
unsigned &TrackReg,
unsigned &TrackSubReg) {
// If CurrentSrcIdx == 1, this means this function has already been
// called once. CopyLike has one defintiion and one argument, thus,
// there is nothing else to rewrite.
if (!CopyLike.isCopy() || CurrentSrcIdx == 1)
return false;
// This is the first call to getNextRewritableSource.
// Move the CurrentSrcIdx to remember that we made that call.
CurrentSrcIdx = 1;
// The rewritable source is the argument.
const MachineOperand &MOSrc = CopyLike.getOperand(1);
SrcReg = MOSrc.getReg();
SrcSubReg = MOSrc.getSubReg();
// What we track are the alternative sources of the definition.
const MachineOperand &MODef = CopyLike.getOperand(0);
TrackReg = MODef.getReg();
TrackSubReg = MODef.getSubReg();
return true;
}
/// \brief Rewrite the current source with \p NewReg and \p NewSubReg
/// if possible.
/// \return True if the rewritting was possible, false otherwise.
virtual bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) {
if (!CopyLike.isCopy() || CurrentSrcIdx != 1)
return false;
MachineOperand &MOSrc = CopyLike.getOperand(CurrentSrcIdx);
MOSrc.setReg(NewReg);
MOSrc.setSubReg(NewSubReg);
return true;
}
};
/// \brief Specialized rewriter for INSERT_SUBREG instruction.
class InsertSubregRewriter : public CopyRewriter {
public:
InsertSubregRewriter(MachineInstr &MI) : CopyRewriter(MI) {
assert(MI.isInsertSubreg() && "Invalid instruction");
}
/// \brief See CopyRewriter::getNextRewritableSource.
/// Here CopyLike has the following form:
/// dst = INSERT_SUBREG Src1, Src2.src2SubIdx, subIdx.
/// Src1 has the same register class has dst, hence, there is
/// nothing to rewrite.
/// Src2.src2SubIdx, may not be register coalescer friendly.
/// Therefore, the first call to this method returns:
/// (SrcReg, SrcSubReg) = (Src2, src2SubIdx).
/// (TrackReg, TrackSubReg) = (dst, subIdx).
///
/// Subsequence calls will return false.
bool getNextRewritableSource(unsigned &SrcReg, unsigned &SrcSubReg,
unsigned &TrackReg,
unsigned &TrackSubReg) override {
// If we already get the only source we can rewrite, return false.
if (CurrentSrcIdx == 2)
return false;
// We are looking at v2 = INSERT_SUBREG v0, v1, sub0.
CurrentSrcIdx = 2;
const MachineOperand &MOInsertedReg = CopyLike.getOperand(2);
SrcReg = MOInsertedReg.getReg();
SrcSubReg = MOInsertedReg.getSubReg();
const MachineOperand &MODef = CopyLike.getOperand(0);
// We want to track something that is compatible with the
// partial definition.
TrackReg = MODef.getReg();
if (MODef.getSubReg())
// Bails if we have to compose sub-register indices.
return false;
TrackSubReg = (unsigned)CopyLike.getOperand(3).getImm();
return true;
}
bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override {
if (CurrentSrcIdx != 2)
return false;
// We are rewriting the inserted reg.
MachineOperand &MO = CopyLike.getOperand(CurrentSrcIdx);
MO.setReg(NewReg);
MO.setSubReg(NewSubReg);
return true;
}
};
/// \brief Specialized rewriter for EXTRACT_SUBREG instruction.
class ExtractSubregRewriter : public CopyRewriter {
const TargetInstrInfo &TII;
public:
ExtractSubregRewriter(MachineInstr &MI, const TargetInstrInfo &TII)
: CopyRewriter(MI), TII(TII) {
assert(MI.isExtractSubreg() && "Invalid instruction");
}
/// \brief See CopyRewriter::getNextRewritableSource.
/// Here CopyLike has the following form:
/// dst.dstSubIdx = EXTRACT_SUBREG Src, subIdx.
/// There is only one rewritable source: Src.subIdx,
/// which defines dst.dstSubIdx.
bool getNextRewritableSource(unsigned &SrcReg, unsigned &SrcSubReg,
unsigned &TrackReg,
unsigned &TrackSubReg) override {
// If we already get the only source we can rewrite, return false.
if (CurrentSrcIdx == 1)
return false;
// We are looking at v1 = EXTRACT_SUBREG v0, sub0.
CurrentSrcIdx = 1;
const MachineOperand &MOExtractedReg = CopyLike.getOperand(1);
SrcReg = MOExtractedReg.getReg();
// If we have to compose sub-register indices, bails out.
if (MOExtractedReg.getSubReg())
return false;
SrcSubReg = CopyLike.getOperand(2).getImm();
// We want to track something that is compatible with the definition.
const MachineOperand &MODef = CopyLike.getOperand(0);
TrackReg = MODef.getReg();
TrackSubReg = MODef.getSubReg();
return true;
}
bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override {
// The only source we can rewrite is the input register.
if (CurrentSrcIdx != 1)
return false;
CopyLike.getOperand(CurrentSrcIdx).setReg(NewReg);
// If we find a source that does not require to extract something,
// rewrite the operation with a copy.
if (!NewSubReg) {
// Move the current index to an invalid position.
// We do not want another call to this method to be able
// to do any change.
CurrentSrcIdx = -1;
// Rewrite the operation as a COPY.
// Get rid of the sub-register index.
CopyLike.RemoveOperand(2);
// Morph the operation into a COPY.
CopyLike.setDesc(TII.get(TargetOpcode::COPY));
return true;
}
CopyLike.getOperand(CurrentSrcIdx + 1).setImm(NewSubReg);
return true;
}
};
/// \brief Specialized rewriter for REG_SEQUENCE instruction.
class RegSequenceRewriter : public CopyRewriter {
public:
RegSequenceRewriter(MachineInstr &MI) : CopyRewriter(MI) {
assert(MI.isRegSequence() && "Invalid instruction");
}
/// \brief See CopyRewriter::getNextRewritableSource.
/// Here CopyLike has the following form:
/// dst = REG_SEQUENCE Src1.src1SubIdx, subIdx1, Src2.src2SubIdx, subIdx2.
/// Each call will return a different source, walking all the available
/// source.
///
/// The first call returns:
/// (SrcReg, SrcSubReg) = (Src1, src1SubIdx).
/// (TrackReg, TrackSubReg) = (dst, subIdx1).
///
/// The second call returns:
/// (SrcReg, SrcSubReg) = (Src2, src2SubIdx).
/// (TrackReg, TrackSubReg) = (dst, subIdx2).
///
/// And so on, until all the sources have been traversed, then
/// it returns false.
bool getNextRewritableSource(unsigned &SrcReg, unsigned &SrcSubReg,
unsigned &TrackReg,
unsigned &TrackSubReg) override {
// We are looking at v0 = REG_SEQUENCE v1, sub1, v2, sub2, etc.
// If this is the first call, move to the first argument.
if (CurrentSrcIdx == 0) {
CurrentSrcIdx = 1;
} else {
// Otherwise, move to the next argument and check that it is valid.
CurrentSrcIdx += 2;
if (CurrentSrcIdx >= CopyLike.getNumOperands())
return false;
}
const MachineOperand &MOInsertedReg = CopyLike.getOperand(CurrentSrcIdx);
SrcReg = MOInsertedReg.getReg();
// If we have to compose sub-register indices, bails out.
if ((SrcSubReg = MOInsertedReg.getSubReg()))
return false;
// We want to track something that is compatible with the related
// partial definition.
TrackSubReg = CopyLike.getOperand(CurrentSrcIdx + 1).getImm();
const MachineOperand &MODef = CopyLike.getOperand(0);
TrackReg = MODef.getReg();
// If we have to compose sub-registers, bails.
return MODef.getSubReg() == 0;
}
bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override {
// We cannot rewrite out of bound operands.
// Moreover, rewritable sources are at odd positions.
if ((CurrentSrcIdx & 1) != 1 || CurrentSrcIdx > CopyLike.getNumOperands())
return false;
MachineOperand &MO = CopyLike.getOperand(CurrentSrcIdx);
MO.setReg(NewReg);
MO.setSubReg(NewSubReg);
return true;
}
};
} // End namespace.
/// \brief Get the appropriated CopyRewriter for \p MI.
/// \return A pointer to a dynamically allocated CopyRewriter or nullptr
/// if no rewriter works for \p MI.
static CopyRewriter *getCopyRewriter(MachineInstr &MI,
const TargetInstrInfo &TII) {
switch (MI.getOpcode()) {
default:
return nullptr;
case TargetOpcode::COPY:
return new CopyRewriter(MI);
case TargetOpcode::INSERT_SUBREG:
return new InsertSubregRewriter(MI);
case TargetOpcode::EXTRACT_SUBREG:
return new ExtractSubregRewriter(MI, TII);
case TargetOpcode::REG_SEQUENCE:
return new RegSequenceRewriter(MI);
}
llvm_unreachable(nullptr);
}
/// \brief Optimize generic copy instructions to avoid cross
/// register bank copy. The optimization looks through a chain of
/// copies and tries to find a source that has a compatible register
/// class.
/// Two register classes are considered to be compatible if they share
/// the same register bank.
/// New copies issued by this optimization are register allocator
/// friendly. This optimization does not remove any copy as it may
/// overconstraint the register allocator, but replaces some operands
/// when possible.
/// \pre isCoalescableCopy(*MI) is true.
/// \return True, when \p MI has been rewritten. False otherwise.
bool PeepholeOptimizer::optimizeCoalescableCopy(MachineInstr *MI) {
assert(MI && isCoalescableCopy(*MI) && "Invalid argument");
assert(MI->getDesc().getNumDefs() == 1 &&
"Coalescer can understand multiple defs?!");
const MachineOperand &MODef = MI->getOperand(0);
// Do not rewrite physical definitions.
if (TargetRegisterInfo::isPhysicalRegister(MODef.getReg()))
return false;
bool Changed = false;
// Get the right rewriter for the current copy.
std::unique_ptr<CopyRewriter> CpyRewriter(getCopyRewriter(*MI, *TII));
// If none exists, bails out.
if (!CpyRewriter)
return false;
// Rewrite each rewritable source.
unsigned SrcReg, SrcSubReg, TrackReg, TrackSubReg;
while (CpyRewriter->getNextRewritableSource(SrcReg, SrcSubReg, TrackReg,
TrackSubReg)) {
unsigned NewSrc = TrackReg;
unsigned NewSubReg = TrackSubReg;
// Try to find a more suitable source.
// If we failed to do so, or get the actual source,
// move to the next source.
if (!findNextSource(NewSrc, NewSubReg) || SrcReg == NewSrc)
continue;
// Rewrite source.
if (CpyRewriter->RewriteCurrentSource(NewSrc, NewSubReg)) {
// We may have extended the live-range of NewSrc, account for that.
MRI->clearKillFlags(NewSrc);
Changed = true;
}
}
// TODO: We could have a clean-up method to tidy the instruction.
// E.g., v0 = INSERT_SUBREG v1, v1.sub0, sub0
// => v0 = COPY v1
// Currently we haven't seen motivating example for that and we
// want to avoid untested code.
NumRewrittenCopies += Changed;
return Changed;
}
/// \brief Optimize copy-like instructions to create
/// register coalescer friendly instruction.
/// The optimization tries to kill-off the \p MI by looking
/// through a chain of copies to find a source that has a compatible
/// register class.
/// If such a source is found, it replace \p MI by a generic COPY
/// operation.
/// \pre isUncoalescableCopy(*MI) is true.
/// \return True, when \p MI has been optimized. In that case, \p MI has
/// been removed from its parent.
/// All COPY instructions created, are inserted in \p LocalMIs.
bool PeepholeOptimizer::optimizeUncoalescableCopy(
MachineInstr *MI, SmallPtrSetImpl<MachineInstr *> &LocalMIs) {
assert(MI && isUncoalescableCopy(*MI) && "Invalid argument");
// Check if we can rewrite all the values defined by this instruction.
SmallVector<
std::pair<TargetInstrInfo::RegSubRegPair, TargetInstrInfo::RegSubRegPair>,
4> RewritePairs;
for (const MachineOperand &MODef : MI->defs()) {
if (MODef.isDead())
// We can ignore those.
continue;
// If a physical register is here, this is probably for a good reason.
// Do not rewrite that.
if (TargetRegisterInfo::isPhysicalRegister(MODef.getReg()))
return false;
// If we do not know how to rewrite this definition, there is no point
// in trying to kill this instruction.
TargetInstrInfo::RegSubRegPair Def(MODef.getReg(), MODef.getSubReg());
TargetInstrInfo::RegSubRegPair Src = Def;
if (!findNextSource(Src.Reg, Src.SubReg))
return false;
RewritePairs.push_back(std::make_pair(Def, Src));
}
// The change is possible for all defs, do it.
for (const auto &PairDefSrc : RewritePairs) {
const auto &Def = PairDefSrc.first;
const auto &Src = PairDefSrc.second;
// Rewrite the "copy" in a way the register coalescer understands.
assert(!TargetRegisterInfo::isPhysicalRegister(Def.Reg) &&
"We do not rewrite physical registers");
const TargetRegisterClass *DefRC = MRI->getRegClass(Def.Reg);
unsigned NewVR = MRI->createVirtualRegister(DefRC);
MachineInstr *NewCopy = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(TargetOpcode::COPY),
NewVR).addReg(Src.Reg, 0, Src.SubReg);
NewCopy->getOperand(0).setSubReg(Def.SubReg);
if (Def.SubReg)
NewCopy->getOperand(0).setIsUndef();
LocalMIs.insert(NewCopy);
MRI->replaceRegWith(Def.Reg, NewVR);
MRI->clearKillFlags(NewVR);
// We extended the lifetime of Src.
// Clear the kill flags to account for that.
MRI->clearKillFlags(Src.Reg);
}
// MI is now dead.
MI->eraseFromParent();
++NumUncoalescableCopies;
return true;
}
/// isLoadFoldable - Check whether MI is a candidate for folding into a later
/// instruction. We only fold loads to virtual registers and the virtual
/// register defined has a single use.
bool PeepholeOptimizer::isLoadFoldable(
MachineInstr *MI,
SmallSet<unsigned, 16> &FoldAsLoadDefCandidates) {
if (!MI->canFoldAsLoad() || !MI->mayLoad())
return false;
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI->getOperand(0).getReg();
// To reduce compilation time, we check MRI->hasOneNonDBGUse when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
if (!MI->getOperand(0).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(Reg) &&
MRI->hasOneNonDBGUse(Reg)) {
FoldAsLoadDefCandidates.insert(Reg);
return true;
}
return false;
}
bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
const MCInstrDesc &MCID = MI->getDesc();
if (!MI->isMoveImmediate())
return false;
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI->getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
ImmDefMIs.insert(std::make_pair(Reg, MI));
ImmDefRegs.insert(Reg);
return true;
}
return false;
}
/// foldImmediate - Try folding register operands that are defined by move
/// immediate instructions, i.e. a trivial constant folding optimization, if
/// and only if the def and use are in the same BB.
bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (ImmDefRegs.count(Reg) == 0)
continue;
DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
assert(II != ImmDefMIs.end());
if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
++NumImmFold;
return true;
}
}
return false;
}
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
if (DisablePeephole)
return false;
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : nullptr;
bool Changed = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
bool SeenMoveImm = false;
// During this forward scan, at some point it needs to answer the question
// "given a pointer to an MI in the current BB, is it located before or
// after the current instruction".
// To perform this, the following set keeps track of the MIs already seen
// during the scan, if a MI is not in the set, it is assumed to be located
// after. Newly created MIs have to be inserted in the set as well.
SmallPtrSet<MachineInstr*, 16> LocalMIs;
SmallSet<unsigned, 4> ImmDefRegs;
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
SmallSet<unsigned, 16> FoldAsLoadDefCandidates;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
// We may be erasing MI below, increment MII now.
++MII;
LocalMIs.insert(MI);
// Skip debug values. They should not affect this peephole optimization.
if (MI->isDebugValue())
continue;
// If there exists an instruction which belongs to the following
// categories, we will discard the load candidates.
if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() ||
MI->isKill() || MI->isInlineAsm() ||
MI->hasUnmodeledSideEffects()) {
FoldAsLoadDefCandidates.clear();
continue;
}
if (MI->mayStore() || MI->isCall())
FoldAsLoadDefCandidates.clear();
if ((isUncoalescableCopy(*MI) &&
optimizeUncoalescableCopy(MI, LocalMIs)) ||
(MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
(MI->isSelect() && optimizeSelect(MI, LocalMIs))) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
continue;
}
if (MI->isConditionalBranch() && optimizeCondBranch(MI)) {
Changed = true;
continue;
}
if (isCoalescableCopy(*MI) && optimizeCoalescableCopy(MI)) {
// MI is just rewritten.
Changed = true;
continue;
}
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
SeenMoveImm = true;
} else {
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
// optimizeExtInstr might have created new instructions after MI
// and before the already incremented MII. Adjust MII so that the
// next iteration sees the new instructions.
MII = MI;
++MII;
if (SeenMoveImm)
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
// Check whether MI is a load candidate for folding into a later
// instruction. If MI is not a candidate, check whether we can fold an
// earlier load into MI.
if (!isLoadFoldable(MI, FoldAsLoadDefCandidates) &&
!FoldAsLoadDefCandidates.empty()) {
const MCInstrDesc &MIDesc = MI->getDesc();
for (unsigned i = MIDesc.getNumDefs(); i != MIDesc.getNumOperands();
++i) {
const MachineOperand &MOp = MI->getOperand(i);
if (!MOp.isReg())
continue;
unsigned FoldAsLoadDefReg = MOp.getReg();
if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
// We need to fold load after optimizeCmpInstr, since
// optimizeCmpInstr can enable folding by converting SUB to CMP.
// Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and
// we need it for markUsesInDebugValueAsUndef().
unsigned FoldedReg = FoldAsLoadDefReg;
MachineInstr *DefMI = nullptr;
MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
FoldAsLoadDefReg,
DefMI);
if (FoldMI) {
// Update LocalMIs since we replaced MI with FoldMI and deleted
// DefMI.
DEBUG(dbgs() << "Replacing: " << *MI);
DEBUG(dbgs() << " With: " << *FoldMI);
LocalMIs.erase(MI);
LocalMIs.erase(DefMI);
LocalMIs.insert(FoldMI);
MI->eraseFromParent();
DefMI->eraseFromParent();
MRI->markUsesInDebugValueAsUndef(FoldedReg);
FoldAsLoadDefCandidates.erase(FoldedReg);
++NumLoadFold;
// MI is replaced with FoldMI.
Changed = true;
break;
}
}
}
}
}
}
return Changed;
}
bool ValueTracker::getNextSourceFromCopy(unsigned &SrcReg,
unsigned &SrcSubReg) {
assert(Def->isCopy() && "Invalid definition");
// Copy instruction are supposed to be: Def = Src.
// If someone breaks this assumption, bad things will happen everywhere.
assert(Def->getNumOperands() == 2 && "Invalid number of operands");
if (Def->getOperand(DefIdx).getSubReg() != DefSubReg)
// If we look for a different subreg, it means we want a subreg of src.
// Bails as we do not support composing subreg yet.
return false;
// Otherwise, we want the whole source.
const MachineOperand &Src = Def->getOperand(1);
SrcReg = Src.getReg();
SrcSubReg = Src.getSubReg();
return true;
}
bool ValueTracker::getNextSourceFromBitcast(unsigned &SrcReg,
unsigned &SrcSubReg) {
assert(Def->isBitcast() && "Invalid definition");
// Bail if there are effects that a plain copy will not expose.
if (Def->hasUnmodeledSideEffects())
return false;
// Bitcasts with more than one def are not supported.
if (Def->getDesc().getNumDefs() != 1)
return false;
if (Def->getOperand(DefIdx).getSubReg() != DefSubReg)
// If we look for a different subreg, it means we want a subreg of the src.
// Bails as we do not support composing subreg yet.
return false;
unsigned SrcIdx = Def->getNumOperands();
for (unsigned OpIdx = DefIdx + 1, EndOpIdx = SrcIdx; OpIdx != EndOpIdx;
++OpIdx) {
const MachineOperand &MO = Def->getOperand(OpIdx);
if (!MO.isReg() || !MO.getReg())
continue;
assert(!MO.isDef() && "We should have skipped all the definitions by now");
if (SrcIdx != EndOpIdx)
// Multiple sources?
return false;
SrcIdx = OpIdx;
}
const MachineOperand &Src = Def->getOperand(SrcIdx);
SrcReg = Src.getReg();
SrcSubReg = Src.getSubReg();
return true;
}
bool ValueTracker::getNextSourceFromRegSequence(unsigned &SrcReg,
unsigned &SrcSubReg) {
assert((Def->isRegSequence() || Def->isRegSequenceLike()) &&
"Invalid definition");
if (Def->getOperand(DefIdx).getSubReg())
// If we are composing subreg, bails out.
// The case we are checking is Def.<subreg> = REG_SEQUENCE.
// This should almost never happen as the SSA property is tracked at
// the register level (as opposed to the subreg level).
// I.e.,
// Def.sub0 =
// Def.sub1 =
// is a valid SSA representation for Def.sub0 and Def.sub1, but not for
// Def. Thus, it must not be generated.
// However, some code could theoretically generates a single
// Def.sub0 (i.e, not defining the other subregs) and we would
// have this case.
// If we can ascertain (or force) that this never happens, we could
// turn that into an assertion.
return false;
if (!TII)
// We could handle the REG_SEQUENCE here, but we do not want to
// duplicate the code from the generic TII.
return false;
SmallVector<TargetInstrInfo::RegSubRegPairAndIdx, 8> RegSeqInputRegs;
if (!TII->getRegSequenceInputs(*Def, DefIdx, RegSeqInputRegs))
return false;
// We are looking at:
// Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
// Check if one of the operand defines the subreg we are interested in.
for (auto &RegSeqInput : RegSeqInputRegs) {
if (RegSeqInput.SubIdx == DefSubReg) {
if (RegSeqInput.SubReg)
// Bails if we have to compose sub registers.
return false;
SrcReg = RegSeqInput.Reg;
SrcSubReg = RegSeqInput.SubReg;
return true;
}
}
// If the subreg we are tracking is super-defined by another subreg,
// we could follow this value. However, this would require to compose
// the subreg and we do not do that for now.
return false;
}
bool ValueTracker::getNextSourceFromInsertSubreg(unsigned &SrcReg,
unsigned &SrcSubReg) {
assert((Def->isInsertSubreg() || Def->isInsertSubregLike()) &&
"Invalid definition");
if (Def->getOperand(DefIdx).getSubReg())
// If we are composing subreg, bails out.
// Same remark as getNextSourceFromRegSequence.
// I.e., this may be turned into an assert.
return false;
if (!TII)
// We could handle the REG_SEQUENCE here, but we do not want to
// duplicate the code from the generic TII.
return false;
TargetInstrInfo::RegSubRegPair BaseReg;
TargetInstrInfo::RegSubRegPairAndIdx InsertedReg;
if (!TII->getInsertSubregInputs(*Def, DefIdx, BaseReg, InsertedReg))
return false;
// We are looking at:
// Def = INSERT_SUBREG v0, v1, sub1
// There are two cases:
// 1. DefSubReg == sub1, get v1.
// 2. DefSubReg != sub1, the value may be available through v0.
// #1 Check if the inserted register matches the required sub index.
if (InsertedReg.SubIdx == DefSubReg) {
SrcReg = InsertedReg.Reg;
SrcSubReg = InsertedReg.SubReg;
return true;
}
// #2 Otherwise, if the sub register we are looking for is not partial
// defined by the inserted element, we can look through the main
// register (v0).
const MachineOperand &MODef = Def->getOperand(DefIdx);
// If the result register (Def) and the base register (v0) do not
// have the same register class or if we have to compose
// subregisters, bails out.
if (MRI.getRegClass(MODef.getReg()) != MRI.getRegClass(BaseReg.Reg) ||
BaseReg.SubReg)
return false;
// Get the TRI and check if the inserted sub-register overlaps with the
// sub-register we are tracking.
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
if (!TRI ||
(TRI->getSubRegIndexLaneMask(DefSubReg) &
TRI->getSubRegIndexLaneMask(InsertedReg.SubIdx)) != 0)
return false;
// At this point, the value is available in v0 via the same subreg
// we used for Def.
SrcReg = BaseReg.Reg;
SrcSubReg = DefSubReg;
return true;
}
bool ValueTracker::getNextSourceFromExtractSubreg(unsigned &SrcReg,
unsigned &SrcSubReg) {
assert((Def->isExtractSubreg() ||
Def->isExtractSubregLike()) && "Invalid definition");
// We are looking at:
// Def = EXTRACT_SUBREG v0, sub0
// Bails if we have to compose sub registers.
// Indeed, if DefSubReg != 0, we would have to compose it with sub0.
if (DefSubReg)
return false;
if (!TII)
// We could handle the EXTRACT_SUBREG here, but we do not want to
// duplicate the code from the generic TII.
return false;
TargetInstrInfo::RegSubRegPairAndIdx ExtractSubregInputReg;
if (!TII->getExtractSubregInputs(*Def, DefIdx, ExtractSubregInputReg))
return false;
// Bails if we have to compose sub registers.
// Likewise, if v0.subreg != 0, we would have to compose v0.subreg with sub0.
if (ExtractSubregInputReg.SubReg)
return false;
// Otherwise, the value is available in the v0.sub0.
SrcReg = ExtractSubregInputReg.Reg;
SrcSubReg = ExtractSubregInputReg.SubIdx;
return true;
}
bool ValueTracker::getNextSourceFromSubregToReg(unsigned &SrcReg,
unsigned &SrcSubReg) {
assert(Def->isSubregToReg() && "Invalid definition");
// We are looking at:
// Def = SUBREG_TO_REG Imm, v0, sub0
// Bails if we have to compose sub registers.
// If DefSubReg != sub0, we would have to check that all the bits
// we track are included in sub0 and if yes, we would have to
// determine the right subreg in v0.
if (DefSubReg != Def->getOperand(3).getImm())
return false;
// Bails if we have to compose sub registers.
// Likewise, if v0.subreg != 0, we would have to compose it with sub0.
if (Def->getOperand(2).getSubReg())
return false;
SrcReg = Def->getOperand(2).getReg();
SrcSubReg = Def->getOperand(3).getImm();
return true;
}
bool ValueTracker::getNextSourceImpl(unsigned &SrcReg, unsigned &SrcSubReg) {
assert(Def && "This method needs a valid definition");
assert(
(DefIdx < Def->getDesc().getNumDefs() || Def->getDesc().isVariadic()) &&
Def->getOperand(DefIdx).isDef() && "Invalid DefIdx");
if (Def->isCopy())
return getNextSourceFromCopy(SrcReg, SrcSubReg);
if (Def->isBitcast())
return getNextSourceFromBitcast(SrcReg, SrcSubReg);
// All the remaining cases involve "complex" instructions.
// Bails if we did not ask for the advanced tracking.
if (!UseAdvancedTracking)
return false;
if (Def->isRegSequence() || Def->isRegSequenceLike())
return getNextSourceFromRegSequence(SrcReg, SrcSubReg);
if (Def->isInsertSubreg() || Def->isInsertSubregLike())
return getNextSourceFromInsertSubreg(SrcReg, SrcSubReg);
if (Def->isExtractSubreg() || Def->isExtractSubregLike())
return getNextSourceFromExtractSubreg(SrcReg, SrcSubReg);
if (Def->isSubregToReg())
return getNextSourceFromSubregToReg(SrcReg, SrcSubReg);
return false;
}
const MachineInstr *ValueTracker::getNextSource(unsigned &SrcReg,
unsigned &SrcSubReg) {
// If we reach a point where we cannot move up in the use-def chain,
// there is nothing we can get.
if (!Def)
return nullptr;
const MachineInstr *PrevDef = nullptr;
// Try to find the next source.
if (getNextSourceImpl(SrcReg, SrcSubReg)) {
// Update definition, definition index, and subregister for the
// next call of getNextSource.
// Update the current register.
Reg = SrcReg;
// Update the return value before moving up in the use-def chain.
PrevDef = Def;
// If we can still move up in the use-def chain, move to the next
// defintion.
if (!TargetRegisterInfo::isPhysicalRegister(Reg)) {
Def = MRI.getVRegDef(Reg);
DefIdx = MRI.def_begin(Reg).getOperandNo();
DefSubReg = SrcSubReg;
return PrevDef;
}
}
// If we end up here, this means we will not be able to find another source
// for the next iteration.
// Make sure any new call to getNextSource bails out early by cutting the
// use-def chain.
Def = nullptr;
return PrevDef;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineBranchProbabilityInfo.cpp | //===- MachineBranchProbabilityInfo.cpp - Machine Branch Probability Info -===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This analysis uses probability info stored in Machine Basic Blocks.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
INITIALIZE_PASS_BEGIN(MachineBranchProbabilityInfo, "machine-branch-prob",
"Machine Branch Probability Analysis", false, true)
INITIALIZE_PASS_END(MachineBranchProbabilityInfo, "machine-branch-prob",
"Machine Branch Probability Analysis", false, true)
char MachineBranchProbabilityInfo::ID = 0;
void MachineBranchProbabilityInfo::anchor() { }
uint32_t MachineBranchProbabilityInfo::
getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const {
// First we compute the sum with 64-bits of precision, ensuring that cannot
// overflow by bounding the number of weights considered. Hopefully no one
// actually needs 2^32 successors.
assert(MBB->succ_size() < UINT32_MAX);
uint64_t Sum = 0;
Scale = 1;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
uint32_t Weight = getEdgeWeight(MBB, I);
Sum += Weight;
}
// If the computed sum fits in 32-bits, we're done.
if (Sum <= UINT32_MAX)
return Sum;
// Otherwise, compute the scale necessary to cause the weights to fit, and
// re-sum with that scale applied.
assert((Sum / UINT32_MAX) < UINT32_MAX);
Scale = (Sum / UINT32_MAX) + 1;
Sum = 0;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
uint32_t Weight = getEdgeWeight(MBB, I);
Sum += Weight / Scale;
}
assert(Sum <= UINT32_MAX);
return Sum;
}
uint32_t MachineBranchProbabilityInfo::
getEdgeWeight(const MachineBasicBlock *Src,
MachineBasicBlock::const_succ_iterator Dst) const {
uint32_t Weight = Src->getSuccWeight(Dst);
if (!Weight)
return DEFAULT_WEIGHT;
return Weight;
}
uint32_t MachineBranchProbabilityInfo::
getEdgeWeight(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const {
// This is a linear search. Try to use the const_succ_iterator version when
// possible.
return getEdgeWeight(Src, std::find(Src->succ_begin(), Src->succ_end(), Dst));
}
bool
MachineBranchProbabilityInfo::isEdgeHot(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
// FIXME: Compare against a static "hot" BranchProbability.
return getEdgeProbability(Src, Dst) > BranchProbability(4, 5);
}
MachineBasicBlock *
MachineBranchProbabilityInfo::getHotSucc(MachineBasicBlock *MBB) const {
uint32_t MaxWeight = 0;
MachineBasicBlock *MaxSucc = nullptr;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
uint32_t Weight = getEdgeWeight(MBB, I);
if (Weight > MaxWeight) {
MaxWeight = Weight;
MaxSucc = *I;
}
}
if (getEdgeProbability(MBB, MaxSucc) >= BranchProbability(4, 5))
return MaxSucc;
return nullptr;
}
BranchProbability MachineBranchProbabilityInfo::getEdgeProbability(
const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const {
uint32_t Scale = 1;
uint32_t D = getSumForBlock(Src, Scale);
uint32_t N = getEdgeWeight(Src, Dst) / Scale;
return BranchProbability(N, D);
}
raw_ostream &MachineBranchProbabilityInfo::printEdgeProbability(
raw_ostream &OS, const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const {
const BranchProbability Prob = getEdgeProbability(Src, Dst);
OS << "edge MBB#" << Src->getNumber() << " -> MBB#" << Dst->getNumber()
<< " probability is " << Prob
<< (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n");
return OS;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/BranchFolding.h | //===-- BranchFolding.h - Fold machine code branch instructions -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_BRANCHFOLDING_H
#define LLVM_LIB_CODEGEN_BRANCHFOLDING_H
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/BlockFrequency.h"
#include <vector>
namespace llvm {
class MachineBlockFrequencyInfo;
class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineModuleInfo;
class RegScavenger;
class TargetInstrInfo;
class TargetRegisterInfo;
class LLVM_LIBRARY_VISIBILITY BranchFolder {
public:
explicit BranchFolder(bool defaultEnableTailMerge, bool CommonHoist,
const MachineBlockFrequencyInfo &MBFI,
const MachineBranchProbabilityInfo &MBPI);
bool OptimizeFunction(MachineFunction &MF,
const TargetInstrInfo *tii,
const TargetRegisterInfo *tri,
MachineModuleInfo *mmi);
private:
class MergePotentialsElt {
unsigned Hash;
MachineBasicBlock *Block;
public:
MergePotentialsElt(unsigned h, MachineBasicBlock *b)
: Hash(h), Block(b) {}
unsigned getHash() const { return Hash; }
MachineBasicBlock *getBlock() const { return Block; }
void setBlock(MachineBasicBlock *MBB) {
Block = MBB;
}
bool operator<(const MergePotentialsElt &) const;
};
typedef std::vector<MergePotentialsElt>::iterator MPIterator;
std::vector<MergePotentialsElt> MergePotentials;
SmallPtrSet<const MachineBasicBlock*, 2> TriedMerging;
class SameTailElt {
MPIterator MPIter;
MachineBasicBlock::iterator TailStartPos;
public:
SameTailElt(MPIterator mp, MachineBasicBlock::iterator tsp)
: MPIter(mp), TailStartPos(tsp) {}
MPIterator getMPIter() const {
return MPIter;
}
MergePotentialsElt &getMergePotentialsElt() const {
return *getMPIter();
}
MachineBasicBlock::iterator getTailStartPos() const {
return TailStartPos;
}
unsigned getHash() const {
return getMergePotentialsElt().getHash();
}
MachineBasicBlock *getBlock() const {
return getMergePotentialsElt().getBlock();
}
bool tailIsWholeBlock() const {
return TailStartPos == getBlock()->begin();
}
void setBlock(MachineBasicBlock *MBB) {
getMergePotentialsElt().setBlock(MBB);
}
void setTailStartPos(MachineBasicBlock::iterator Pos) {
TailStartPos = Pos;
}
};
std::vector<SameTailElt> SameTails;
bool EnableTailMerge;
bool EnableHoistCommonCode;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineModuleInfo *MMI;
RegScavenger *RS;
/// \brief This class keeps track of branch frequencies of newly created
/// blocks and tail-merged blocks.
class MBFIWrapper {
public:
MBFIWrapper(const MachineBlockFrequencyInfo &I) : MBFI(I) {}
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
void setBlockFreq(const MachineBasicBlock *MBB, BlockFrequency F);
private:
const MachineBlockFrequencyInfo &MBFI;
DenseMap<const MachineBasicBlock *, BlockFrequency> MergedBBFreq;
};
MBFIWrapper MBBFreqInfo;
const MachineBranchProbabilityInfo &MBPI;
bool TailMergeBlocks(MachineFunction &MF);
bool TryTailMergeBlocks(MachineBasicBlock* SuccBB,
MachineBasicBlock* PredBB);
void setCommonTailEdgeWeights(MachineBasicBlock &TailMBB);
void MaintainLiveIns(MachineBasicBlock *CurMBB,
MachineBasicBlock *NewMBB);
void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
MachineBasicBlock *NewDest);
MachineBasicBlock *SplitMBBAt(MachineBasicBlock &CurMBB,
MachineBasicBlock::iterator BBI1,
const BasicBlock *BB);
unsigned ComputeSameTails(unsigned CurHash, unsigned minCommonTailLength,
MachineBasicBlock *SuccBB,
MachineBasicBlock *PredBB);
void RemoveBlocksWithHash(unsigned CurHash, MachineBasicBlock* SuccBB,
MachineBasicBlock* PredBB);
bool CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
MachineBasicBlock *SuccBB,
unsigned maxCommonTailLength,
unsigned &commonTailIndex);
bool OptimizeBranches(MachineFunction &MF);
bool OptimizeBlock(MachineBasicBlock *MBB);
void RemoveDeadBlock(MachineBasicBlock *MBB);
bool OptimizeImpDefsBlock(MachineBasicBlock *MBB);
bool HoistCommonCode(MachineFunction &MF);
bool HoistCommonCodeInSuccs(MachineBasicBlock *MBB);
};
}
#endif /* LLVM_CODEGEN_BRANCHFOLDING_HPP */
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineDominanceFrontier.cpp | //===- MachineDominanceFrontier.cpp ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineDominanceFrontier.h"
#include "llvm/Analysis/DominanceFrontierImpl.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/Passes.h"
using namespace llvm;
namespace llvm {
template class DominanceFrontierBase<MachineBasicBlock>;
template class ForwardDominanceFrontierBase<MachineBasicBlock>;
}
char MachineDominanceFrontier::ID = 0;
INITIALIZE_PASS_BEGIN(MachineDominanceFrontier, "machine-domfrontier",
"Machine Dominance Frontier Construction", true, true)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_END(MachineDominanceFrontier, "machine-domfrontier",
"Machine Dominance Frontier Construction", true, true)
MachineDominanceFrontier::MachineDominanceFrontier()
: MachineFunctionPass(ID),
Base() {
initializeMachineDominanceFrontierPass(*PassRegistry::getPassRegistry());
}
char &llvm::MachineDominanceFrontierID = MachineDominanceFrontier::ID;
bool MachineDominanceFrontier::runOnMachineFunction(MachineFunction &) {
releaseMemory();
Base.analyze(getAnalysis<MachineDominatorTree>().getBase());
return false;
}
void MachineDominanceFrontier::releaseMemory() {
Base.releaseMemory();
}
void MachineDominanceFrontier::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/AntiDepBreaker.h | //=- llvm/CodeGen/AntiDepBreaker.h - Anti-Dependence Breaking -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the AntiDepBreaker class, which implements
// anti-dependence breaking heuristics for post-register-allocation scheduling.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ANTIDEPBREAKER_H
#define LLVM_LIB_CODEGEN_ANTIDEPBREAKER_H
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <vector>
namespace llvm {
/// This class works in conjunction with the post-RA scheduler to rename
/// registers to break register anti-dependencies (WAR hazards).
class LLVM_LIBRARY_VISIBILITY AntiDepBreaker {
public:
typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
DbgValueVector;
virtual ~AntiDepBreaker();
/// Initialize anti-dep breaking for a new basic block.
virtual void StartBlock(MachineBasicBlock *BB) =0;
/// Identifiy anti-dependencies within a basic-block region and break them by
/// renaming registers. Return the number of anti-dependencies broken.
virtual unsigned BreakAntiDependencies(const std::vector<SUnit>& SUnits,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
DbgValueVector &DbgValues) = 0;
/// Update liveness information to account for the current
/// instruction, which will not be scheduled.
virtual void Observe(MachineInstr *MI, unsigned Count,
unsigned InsertPosIndex) =0;
/// Finish anti-dep breaking for a basic block.
virtual void FinishBlock() =0;
/// Update DBG_VALUE if dependency breaker is updating
/// other machine instruction to use NewReg.
void UpdateDbgValue(MachineInstr *MI, unsigned OldReg, unsigned NewReg) {
assert (MI->isDebugValue() && "MI is not DBG_VALUE!");
if (MI && MI->getOperand(0).isReg() && MI->getOperand(0).getReg() == OldReg)
MI->getOperand(0).setReg(NewReg);
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineCombiner.cpp | //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The machine combiner pass uses machine trace metrics to ensure the combined
// instructions does not lengthen the critical path or the resource depth.
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "machine-combiner"
// //
///////////////////////////////////////////////////////////////////////////////
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineTraceMetrics.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
STATISTIC(NumInstCombined, "Number of machineinst combined");
namespace {
class MachineCombiner : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MCSchedModel SchedModel;
MachineRegisterInfo *MRI;
MachineTraceMetrics *Traces;
MachineTraceMetrics::Ensemble *MinInstr;
TargetSchedModel TSchedModel;
/// True if optimizing for code size.
bool OptSize;
public:
static char ID;
MachineCombiner() : MachineFunctionPass(ID) {
initializeMachineCombinerPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction &MF) override;
StringRef getPassName() const override { return "Machine InstCombiner"; }
private:
bool doSubstitute(unsigned NewSize, unsigned OldSize);
bool combineInstructions(MachineBasicBlock *);
MachineInstr *getOperandDef(const MachineOperand &MO);
unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
MachineTraceMetrics::Trace BlockTrace);
unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot,
MachineTraceMetrics::Trace BlockTrace);
bool
improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root,
MachineTraceMetrics::Trace BlockTrace,
SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
bool NewCodeHasLessInsts);
bool preservesResourceLen(MachineBasicBlock *MBB,
MachineTraceMetrics::Trace BlockTrace,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs);
void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
};
}
char MachineCombiner::ID = 0;
char &llvm::MachineCombinerID = MachineCombiner::ID;
INITIALIZE_PASS_BEGIN(MachineCombiner, "machine-combiner",
"Machine InstCombiner", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
INITIALIZE_PASS_END(MachineCombiner, "machine-combiner", "Machine InstCombiner",
false, false)
void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addPreserved<MachineDominatorTree>();
AU.addPreserved<MachineLoopInfo>();
AU.addRequired<MachineTraceMetrics>();
AU.addPreserved<MachineTraceMetrics>();
MachineFunctionPass::getAnalysisUsage(AU);
}
MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) {
MachineInstr *DefInstr = nullptr;
// We need a virtual register definition.
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
DefInstr = MRI->getUniqueVRegDef(MO.getReg());
// PHI's have no depth etc.
if (DefInstr && DefInstr->isPHI())
DefInstr = nullptr;
return DefInstr;
}
/// Computes depth of instructions in vector \InsInstr.
///
/// \param InsInstrs is a vector of machine instructions
/// \param InstrIdxForVirtReg is a dense map of virtual register to index
/// of defining machine instruction in \p InsInstrs
/// \param BlockTrace is a trace of machine instructions
///
/// \returns Depth of last instruction in \InsInstrs ("NewRoot")
unsigned
MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
MachineTraceMetrics::Trace BlockTrace) {
SmallVector<unsigned, 16> InstrDepth;
assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
// For each instruction in the new sequence compute the depth based on the
// operands. Use the trace information when possible. For new operands which
// are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth
for (auto *InstrPtr : InsInstrs) { // for each Use
unsigned IDepth = 0;
DEBUG(dbgs() << "NEW INSTR "; InstrPtr->dump(); dbgs() << "\n";);
for (const MachineOperand &MO : InstrPtr->operands()) {
// Check for virtual register operand.
if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
continue;
if (!MO.isUse())
continue;
unsigned DepthOp = 0;
unsigned LatencyOp = 0;
DenseMap<unsigned, unsigned>::iterator II =
InstrIdxForVirtReg.find(MO.getReg());
if (II != InstrIdxForVirtReg.end()) {
// Operand is new virtual register not in trace
assert(II->second < InstrDepth.size() && "Bad Index");
MachineInstr *DefInstr = InsInstrs[II->second];
assert(DefInstr &&
"There must be a definition for a new virtual register");
DepthOp = InstrDepth[II->second];
LatencyOp = TSchedModel.computeOperandLatency(
DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
} else {
MachineInstr *DefInstr = getOperandDef(MO);
if (DefInstr) {
DepthOp = BlockTrace.getInstrCycles(DefInstr).Depth;
LatencyOp = TSchedModel.computeOperandLatency(
DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
}
}
IDepth = std::max(IDepth, DepthOp + LatencyOp);
}
InstrDepth.push_back(IDepth);
}
unsigned NewRootIdx = InsInstrs.size() - 1;
return InstrDepth[NewRootIdx];
}
/// Computes instruction latency as max of latency of defined operands.
///
/// \param Root is a machine instruction that could be replaced by NewRoot.
/// It is used to compute a more accurate latency information for NewRoot in
/// case there is a dependent instruction in the same trace (\p BlockTrace)
/// \param NewRoot is the instruction for which the latency is computed
/// \param BlockTrace is a trace of machine instructions
///
/// \returns Latency of \p NewRoot
unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
MachineTraceMetrics::Trace BlockTrace) {
assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
// Check each definition in NewRoot and compute the latency
unsigned NewRootLatency = 0;
for (const MachineOperand &MO : NewRoot->operands()) {
// Check for virtual register operand.
if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
continue;
if (!MO.isDef())
continue;
// Get the first instruction that uses MO
MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg());
RI++;
MachineInstr *UseMO = RI->getParent();
unsigned LatencyOp = 0;
if (UseMO && BlockTrace.isDepInTrace(Root, UseMO)) {
LatencyOp = TSchedModel.computeOperandLatency(
NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO,
UseMO->findRegisterUseOperandIdx(MO.getReg()));
} else {
LatencyOp = TSchedModel.computeInstrLatency(NewRoot->getOpcode());
}
NewRootLatency = std::max(NewRootLatency, LatencyOp);
}
return NewRootLatency;
}
/// True when the new instruction sequence does not lengthen the critical path
/// and the new sequence has less instructions or the new sequence improves the
/// critical path.
/// The DAGCombine code sequence ends in MI (Machine Instruction) Root.
/// The new code sequence ends in MI NewRoot. A necessary condition for the new
/// sequence to replace the old sequence is that it cannot lengthen the critical
/// path. This is decided by the formula:
/// (NewRootDepth + NewRootLatency) <= (RootDepth + RootLatency + RootSlack)).
/// If the new sequence has an equal length critical path but does not reduce
/// the number of instructions (NewCodeHasLessInsts is false), then it is not
/// considered an improvement. The slack is the number of cycles Root can be
/// delayed before the critical patch becomes longer.
bool MachineCombiner::improvesCriticalPathLen(
MachineBasicBlock *MBB, MachineInstr *Root,
MachineTraceMetrics::Trace BlockTrace,
SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
bool NewCodeHasLessInsts) {
assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
// NewRoot is the last instruction in the \p InsInstrs vector.
// Get depth and latency of NewRoot.
unsigned NewRootIdx = InsInstrs.size() - 1;
MachineInstr *NewRoot = InsInstrs[NewRootIdx];
unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
// Get depth, latency and slack of Root.
unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth;
unsigned RootLatency = TSchedModel.computeInstrLatency(Root);
unsigned RootSlack = BlockTrace.getInstrSlack(Root);
DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n";
dbgs() << " NewRootDepth: " << NewRootDepth
<< " NewRootLatency: " << NewRootLatency << "\n";
dbgs() << " RootDepth: " << RootDepth << " RootLatency: " << RootLatency
<< " RootSlack: " << RootSlack << "\n";
dbgs() << " NewRootDepth + NewRootLatency "
<< NewRootDepth + NewRootLatency << "\n";
dbgs() << " RootDepth + RootLatency + RootSlack "
<< RootDepth + RootLatency + RootSlack << "\n";);
unsigned NewCycleCount = NewRootDepth + NewRootLatency;
unsigned OldCycleCount = RootDepth + RootLatency + RootSlack;
if (NewCodeHasLessInsts)
return NewCycleCount <= OldCycleCount;
else
return NewCycleCount < OldCycleCount;
}
/// helper routine to convert instructions into SC
void MachineCombiner::instr2instrSC(
SmallVectorImpl<MachineInstr *> &Instrs,
SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) {
for (auto *InstrPtr : Instrs) {
unsigned Opc = InstrPtr->getOpcode();
unsigned Idx = TII->get(Opc).getSchedClass();
const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx);
InstrsSC.push_back(SC);
}
}
/// True when the new instructions do not increase resource length
bool MachineCombiner::preservesResourceLen(
MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs) {
// Compute current resource length
//ArrayRef<const MachineBasicBlock *> MBBarr(MBB);
SmallVector <const MachineBasicBlock *, 1> MBBarr;
MBBarr.push_back(MBB);
unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr);
// Deal with SC rather than Instructions.
SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC;
SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC;
instr2instrSC(InsInstrs, InsInstrsSC);
instr2instrSC(DelInstrs, DelInstrsSC);
ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC);
ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC);
// Compute new resource length.
unsigned ResLenAfterCombine =
BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
DEBUG(dbgs() << "RESOURCE DATA: \n";
dbgs() << " resource len before: " << ResLenBeforeCombine
<< " after: " << ResLenAfterCombine << "\n";);
return ResLenAfterCombine <= ResLenBeforeCombine;
}
/// \returns true when new instruction sequence should be generated
/// independent if it lengthens critical path or not
bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) {
if (OptSize && (NewSize < OldSize))
return true;
if (!TSchedModel.hasInstrSchedModel())
return true;
return false;
}
/// Substitute a slow code sequence with a faster one by
/// evaluating instruction combining pattern.
/// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction
/// combining based on machine trace metrics. Only combine a sequence of
/// instructions when this neither lengthens the critical path nor increases
/// resource pressure. When optimizing for codesize always combine when the new
/// sequence is shorter.
bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
bool Changed = false;
DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n");
auto BlockIter = MBB->begin();
while (BlockIter != MBB->end()) {
auto &MI = *BlockIter++;
DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";);
SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Patterns;
// The motivating example is:
//
// MUL Other MUL_op1 MUL_op2 Other
// \ / \ | /
// ADD/SUB => MADD/MSUB
// (=Root) (=NewRoot)
// The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is
// usually beneficial for code size it unfortunately can hurt performance
// when the ADD is on the critical path, but the MUL is not. With the
// substitution the MUL becomes part of the critical path (in form of the
// MADD) and can lengthen it on architectures where the MADD latency is
// longer than the ADD latency.
//
// For each instruction we check if it can be the root of a combiner
// pattern. Then for each pattern the new code sequence in form of MI is
// generated and evaluated. When the efficiency criteria (don't lengthen
// critical path, don't use more resources) is met the new sequence gets
// hooked up into the basic block before the old sequence is removed.
//
// The algorithm does not try to evaluate all patterns and pick the best.
// This is only an artificial restriction though. In practice there is
// mostly one pattern, and getMachineCombinerPatterns() can order patterns
// based on an internal cost heuristic.
if (TII->getMachineCombinerPatterns(MI, Patterns)) {
for (auto P : Patterns) {
SmallVector<MachineInstr *, 16> InsInstrs;
SmallVector<MachineInstr *, 16> DelInstrs;
DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
if (!MinInstr)
MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB);
Traces->verifyAnalysis();
TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs,
InstrIdxForVirtReg);
unsigned NewInstCount = InsInstrs.size();
unsigned OldInstCount = DelInstrs.size();
// Found pattern, but did not generate alternative sequence.
// This can happen e.g. when an immediate could not be materialized
// in a single instruction.
if (!NewInstCount)
continue;
// Substitute when we optimize for codesize and the new sequence has
// fewer instructions OR
// the new sequence neither lengthens the critical path nor increases
// resource pressure.
if (doSubstitute(NewInstCount, OldInstCount) ||
(improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs,
InstrIdxForVirtReg,
NewInstCount < OldInstCount) &&
preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) {
for (auto *InstrPtr : InsInstrs)
MBB->insert((MachineBasicBlock::iterator) &MI, InstrPtr);
for (auto *InstrPtr : DelInstrs)
InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval();
Changed = true;
++NumInstCombined;
Traces->invalidate(MBB);
Traces->verifyAnalysis();
// Eagerly stop after the first pattern fires.
break;
} else {
// Cleanup instructions of the alternative code sequence. There is no
// use for them.
MachineFunction *MF = MBB->getParent();
for (auto *InstrPtr : InsInstrs)
MF->DeleteMachineInstr(InstrPtr);
}
InstrIdxForVirtReg.clear();
}
}
}
return Changed;
}
bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
const TargetSubtargetInfo &STI = MF.getSubtarget();
TII = STI.getInstrInfo();
TRI = STI.getRegisterInfo();
SchedModel = STI.getSchedModel();
TSchedModel.init(SchedModel, &STI, TII);
MRI = &MF.getRegInfo();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = 0;
OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
if (!TII->useMachineCombiner()) {
DEBUG(dbgs() << " Skipping pass: Target does not support machine combiner\n");
return false;
}
bool Changed = false;
// Try to combine instructions.
for (auto &MBB : MF)
Changed |= combineInstructions(&MBB);
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ExpandISelPseudos.cpp | //===-- llvm/CodeGen/ExpandISelPseudos.cpp ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Expand Pseudo-instructions produced by ISel. These are usually to allow
// the expansion to contain control flow, such as a conditional move
// implemented with a conditional branch and a phi, or an atomic operation
// implemented with a loop.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "expand-isel-pseudos"
namespace {
class ExpandISelPseudos : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
ExpandISelPseudos() : MachineFunctionPass(ID) {}
private:
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
} // end anonymous namespace
char ExpandISelPseudos::ID = 0;
char &llvm::ExpandISelPseudosID = ExpandISelPseudos::ID;
INITIALIZE_PASS(ExpandISelPseudos, "expand-isel-pseudos",
"Expand ISel Pseudo-instructions", false, false)
bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
// Iterate through each instruction in the function, looking for pseudos.
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = I;
for (MachineBasicBlock::iterator MBBI = MBB->begin(), MBBE = MBB->end();
MBBI != MBBE; ) {
MachineInstr *MI = MBBI++;
// If MI is a pseudo, expand it.
if (MI->usesCustomInsertionHook()) {
Changed = true;
MachineBasicBlock *NewMBB =
TLI->EmitInstrWithCustomInserter(MI, MBB);
// The expansion may involve new basic blocks.
if (NewMBB != MBB) {
MBB = NewMBB;
I = NewMBB;
MBBI = NewMBB->begin();
MBBE = NewMBB->end();
}
}
}
}
return Changed;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/CoreCLRGC.cpp | //===-- CoreCLRGC.cpp - CoreCLR Runtime GC Strategy -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a GCStrategy for the CoreCLR Runtime.
// The strategy is similar to Statepoint-example GC, but differs from it in
// certain aspects, such as:
// 1) Base-pointers need not be explicitly tracked and reported for
// interior pointers
// 2) Uses a different format for encoding stack-maps
// 3) Location of Safe-point polls: polls are only needed before loop-back edges
// and before tail-calls (not needed at function-entry)
//
// The above differences in behavior are to be implemented in upcoming checkins.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Value.h"
using namespace llvm;
namespace {
class CoreCLRGC : public GCStrategy {
public:
CoreCLRGC() {
UseStatepoints = true;
// These options are all gc.root specific, we specify them so that the
// gc.root lowering code doesn't run.
InitRoots = false;
NeededSafePoints = 0;
UsesMetadata = false;
CustomRoots = false;
}
Optional<bool> isGCManagedPointer(const Value *V) const override {
// Method is only valid on pointer typed values.
PointerType *PT = cast<PointerType>(V->getType());
// We pick addrspace(1) as our GC managed heap.
return (1 == PT->getAddressSpace());
}
};
}
static GCRegistry::Add<CoreCLRGC> X("coreclr", "CoreCLR-compatible GC");
namespace llvm {
void linkCoreCLRGC() {}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineTraceMetrics.cpp | //===- lib/CodeGen/MachineTraceMetrics.cpp ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineTraceMetrics.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "machine-trace-metrics"
char MachineTraceMetrics::ID = 0;
char &llvm::MachineTraceMetricsID = MachineTraceMetrics::ID;
INITIALIZE_PASS_BEGIN(MachineTraceMetrics,
"machine-trace-metrics", "Machine Trace Metrics", false, true)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(MachineTraceMetrics,
"machine-trace-metrics", "Machine Trace Metrics", false, true)
MachineTraceMetrics::MachineTraceMetrics()
: MachineFunctionPass(ID), MF(nullptr), TII(nullptr), TRI(nullptr),
MRI(nullptr), Loops(nullptr) {
std::fill(std::begin(Ensembles), std::end(Ensembles), nullptr);
}
void MachineTraceMetrics::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool MachineTraceMetrics::runOnMachineFunction(MachineFunction &Func) {
MF = &Func;
const TargetSubtargetInfo &ST = MF->getSubtarget();
TII = ST.getInstrInfo();
TRI = ST.getRegisterInfo();
MRI = &MF->getRegInfo();
Loops = &getAnalysis<MachineLoopInfo>();
SchedModel.init(ST.getSchedModel(), &ST, TII);
BlockInfo.resize(MF->getNumBlockIDs());
ProcResourceCycles.resize(MF->getNumBlockIDs() *
SchedModel.getNumProcResourceKinds());
return false;
}
void MachineTraceMetrics::releaseMemory() {
MF = nullptr;
BlockInfo.clear();
for (unsigned i = 0; i != TS_NumStrategies; ++i) {
delete Ensembles[i];
Ensembles[i] = nullptr;
}
}
//===----------------------------------------------------------------------===//
// Fixed block information
//===----------------------------------------------------------------------===//
//
// The number of instructions in a basic block and the CPU resources used by
// those instructions don't depend on any given trace strategy.
/// Compute the resource usage in basic block MBB.
const MachineTraceMetrics::FixedBlockInfo*
MachineTraceMetrics::getResources(const MachineBasicBlock *MBB) {
assert(MBB && "No basic block");
FixedBlockInfo *FBI = &BlockInfo[MBB->getNumber()];
if (FBI->hasResources())
return FBI;
// Compute resource usage in the block.
FBI->HasCalls = false;
unsigned InstrCount = 0;
// Add up per-processor resource cycles as well.
unsigned PRKinds = SchedModel.getNumProcResourceKinds();
SmallVector<unsigned, 32> PRCycles(PRKinds);
for (const auto &MI : *MBB) {
if (MI.isTransient())
continue;
++InstrCount;
if (MI.isCall())
FBI->HasCalls = true;
// Count processor resources used.
if (!SchedModel.hasInstrSchedModel())
continue;
const MCSchedClassDesc *SC = SchedModel.resolveSchedClass(&MI);
if (!SC->isValid())
continue;
for (TargetSchedModel::ProcResIter
PI = SchedModel.getWriteProcResBegin(SC),
PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) {
assert(PI->ProcResourceIdx < PRKinds && "Bad processor resource kind");
PRCycles[PI->ProcResourceIdx] += PI->Cycles;
}
}
FBI->InstrCount = InstrCount;
// Scale the resource cycles so they are comparable.
unsigned PROffset = MBB->getNumber() * PRKinds;
for (unsigned K = 0; K != PRKinds; ++K)
ProcResourceCycles[PROffset + K] =
PRCycles[K] * SchedModel.getResourceFactor(K);
return FBI;
}
ArrayRef<unsigned>
MachineTraceMetrics::getProcResourceCycles(unsigned MBBNum) const {
assert(BlockInfo[MBBNum].hasResources() &&
"getResources() must be called before getProcResourceCycles()");
unsigned PRKinds = SchedModel.getNumProcResourceKinds();
assert((MBBNum+1) * PRKinds <= ProcResourceCycles.size());
return makeArrayRef(ProcResourceCycles.data() + MBBNum * PRKinds, PRKinds);
}
//===----------------------------------------------------------------------===//
// Ensemble utility functions
//===----------------------------------------------------------------------===//
MachineTraceMetrics::Ensemble::Ensemble(MachineTraceMetrics *ct)
: MTM(*ct) {
BlockInfo.resize(MTM.BlockInfo.size());
unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
ProcResourceDepths.resize(MTM.BlockInfo.size() * PRKinds);
ProcResourceHeights.resize(MTM.BlockInfo.size() * PRKinds);
}
// Virtual destructor serves as an anchor.
MachineTraceMetrics::Ensemble::~Ensemble() {}
const MachineLoop*
MachineTraceMetrics::Ensemble::getLoopFor(const MachineBasicBlock *MBB) const {
return MTM.Loops->getLoopFor(MBB);
}
// Update resource-related information in the TraceBlockInfo for MBB.
// Only update resources related to the trace above MBB.
void MachineTraceMetrics::Ensemble::
computeDepthResources(const MachineBasicBlock *MBB) {
TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
unsigned PROffset = MBB->getNumber() * PRKinds;
// Compute resources from trace above. The top block is simple.
if (!TBI->Pred) {
TBI->InstrDepth = 0;
TBI->Head = MBB->getNumber();
std::fill(ProcResourceDepths.begin() + PROffset,
ProcResourceDepths.begin() + PROffset + PRKinds, 0);
return;
}
// Compute from the block above. A post-order traversal ensures the
// predecessor is always computed first.
unsigned PredNum = TBI->Pred->getNumber();
TraceBlockInfo *PredTBI = &BlockInfo[PredNum];
assert(PredTBI->hasValidDepth() && "Trace above has not been computed yet");
const FixedBlockInfo *PredFBI = MTM.getResources(TBI->Pred);
TBI->InstrDepth = PredTBI->InstrDepth + PredFBI->InstrCount;
TBI->Head = PredTBI->Head;
// Compute per-resource depths.
ArrayRef<unsigned> PredPRDepths = getProcResourceDepths(PredNum);
ArrayRef<unsigned> PredPRCycles = MTM.getProcResourceCycles(PredNum);
for (unsigned K = 0; K != PRKinds; ++K)
ProcResourceDepths[PROffset + K] = PredPRDepths[K] + PredPRCycles[K];
}
// Update resource-related information in the TraceBlockInfo for MBB.
// Only update resources related to the trace below MBB.
void MachineTraceMetrics::Ensemble::
computeHeightResources(const MachineBasicBlock *MBB) {
TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
unsigned PROffset = MBB->getNumber() * PRKinds;
// Compute resources for the current block.
TBI->InstrHeight = MTM.getResources(MBB)->InstrCount;
ArrayRef<unsigned> PRCycles = MTM.getProcResourceCycles(MBB->getNumber());
// The trace tail is done.
if (!TBI->Succ) {
TBI->Tail = MBB->getNumber();
std::copy(PRCycles.begin(), PRCycles.end(),
ProcResourceHeights.begin() + PROffset);
return;
}
// Compute from the block below. A post-order traversal ensures the
// predecessor is always computed first.
unsigned SuccNum = TBI->Succ->getNumber();
TraceBlockInfo *SuccTBI = &BlockInfo[SuccNum];
assert(SuccTBI->hasValidHeight() && "Trace below has not been computed yet");
TBI->InstrHeight += SuccTBI->InstrHeight;
TBI->Tail = SuccTBI->Tail;
// Compute per-resource heights.
ArrayRef<unsigned> SuccPRHeights = getProcResourceHeights(SuccNum);
for (unsigned K = 0; K != PRKinds; ++K)
ProcResourceHeights[PROffset + K] = SuccPRHeights[K] + PRCycles[K];
}
// Check if depth resources for MBB are valid and return the TBI.
// Return NULL if the resources have been invalidated.
const MachineTraceMetrics::TraceBlockInfo*
MachineTraceMetrics::Ensemble::
getDepthResources(const MachineBasicBlock *MBB) const {
const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
return TBI->hasValidDepth() ? TBI : nullptr;
}
// Check if height resources for MBB are valid and return the TBI.
// Return NULL if the resources have been invalidated.
const MachineTraceMetrics::TraceBlockInfo*
MachineTraceMetrics::Ensemble::
getHeightResources(const MachineBasicBlock *MBB) const {
const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
return TBI->hasValidHeight() ? TBI : nullptr;
}
/// Get an array of processor resource depths for MBB. Indexed by processor
/// resource kind, this array contains the scaled processor resources consumed
/// by all blocks preceding MBB in its trace. It does not include instructions
/// in MBB.
///
/// Compare TraceBlockInfo::InstrDepth.
ArrayRef<unsigned>
MachineTraceMetrics::Ensemble::
getProcResourceDepths(unsigned MBBNum) const {
unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
assert((MBBNum+1) * PRKinds <= ProcResourceDepths.size());
return makeArrayRef(ProcResourceDepths.data() + MBBNum * PRKinds, PRKinds);
}
/// Get an array of processor resource heights for MBB. Indexed by processor
/// resource kind, this array contains the scaled processor resources consumed
/// by this block and all blocks following it in its trace.
///
/// Compare TraceBlockInfo::InstrHeight.
ArrayRef<unsigned>
MachineTraceMetrics::Ensemble::
getProcResourceHeights(unsigned MBBNum) const {
unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
assert((MBBNum+1) * PRKinds <= ProcResourceHeights.size());
return makeArrayRef(ProcResourceHeights.data() + MBBNum * PRKinds, PRKinds);
}
//===----------------------------------------------------------------------===//
// Trace Selection Strategies
//===----------------------------------------------------------------------===//
//
// A trace selection strategy is implemented as a sub-class of Ensemble. The
// trace through a block B is computed by two DFS traversals of the CFG
// starting from B. One upwards, and one downwards. During the upwards DFS,
// pickTracePred() is called on the post-ordered blocks. During the downwards
// DFS, pickTraceSucc() is called in a post-order.
//
// We never allow traces that leave loops, but we do allow traces to enter
// nested loops. We also never allow traces to contain back-edges.
//
// This means that a loop header can never appear above the center block of a
// trace, except as the trace head. Below the center block, loop exiting edges
// are banned.
//
// Return true if an edge from the From loop to the To loop is leaving a loop.
// Either of To and From can be null.
static bool isExitingLoop(const MachineLoop *From, const MachineLoop *To) {
return From && !From->contains(To);
}
// MinInstrCountEnsemble - Pick the trace that executes the least number of
// instructions.
namespace {
class MinInstrCountEnsemble : public MachineTraceMetrics::Ensemble {
const char *getName() const override { return "MinInstr"; }
const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) override;
const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) override;
public:
MinInstrCountEnsemble(MachineTraceMetrics *mtm)
: MachineTraceMetrics::Ensemble(mtm) {}
};
}
// Select the preferred predecessor for MBB.
const MachineBasicBlock*
MinInstrCountEnsemble::pickTracePred(const MachineBasicBlock *MBB) {
if (MBB->pred_empty())
return nullptr;
const MachineLoop *CurLoop = getLoopFor(MBB);
// Don't leave loops, and never follow back-edges.
if (CurLoop && MBB == CurLoop->getHeader())
return nullptr;
unsigned CurCount = MTM.getResources(MBB)->InstrCount;
const MachineBasicBlock *Best = nullptr;
unsigned BestDepth = 0;
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
const MachineTraceMetrics::TraceBlockInfo *PredTBI =
getDepthResources(Pred);
// Ignore cycles that aren't natural loops.
if (!PredTBI)
continue;
// Pick the predecessor that would give this block the smallest InstrDepth.
unsigned Depth = PredTBI->InstrDepth + CurCount;
if (!Best || Depth < BestDepth)
Best = Pred, BestDepth = Depth;
}
return Best;
}
// Select the preferred successor for MBB.
const MachineBasicBlock*
MinInstrCountEnsemble::pickTraceSucc(const MachineBasicBlock *MBB) {
if (MBB->pred_empty())
return nullptr;
const MachineLoop *CurLoop = getLoopFor(MBB);
const MachineBasicBlock *Best = nullptr;
unsigned BestHeight = 0;
for (const MachineBasicBlock *Succ : MBB->successors()) {
// Don't consider back-edges.
if (CurLoop && Succ == CurLoop->getHeader())
continue;
// Don't consider successors exiting CurLoop.
if (isExitingLoop(CurLoop, getLoopFor(Succ)))
continue;
const MachineTraceMetrics::TraceBlockInfo *SuccTBI =
getHeightResources(Succ);
// Ignore cycles that aren't natural loops.
if (!SuccTBI)
continue;
// Pick the successor that would give this block the smallest InstrHeight.
unsigned Height = SuccTBI->InstrHeight;
if (!Best || Height < BestHeight)
Best = Succ, BestHeight = Height;
}
return Best;
}
// Get an Ensemble sub-class for the requested trace strategy.
MachineTraceMetrics::Ensemble *
MachineTraceMetrics::getEnsemble(MachineTraceMetrics::Strategy strategy) {
assert(strategy < TS_NumStrategies && "Invalid trace strategy enum");
Ensemble *&E = Ensembles[strategy];
if (E)
return E;
// Allocate new Ensemble on demand.
switch (strategy) {
case TS_MinInstrCount: return (E = new MinInstrCountEnsemble(this));
default: llvm_unreachable("Invalid trace strategy enum");
}
}
void MachineTraceMetrics::invalidate(const MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Invalidate traces through BB#" << MBB->getNumber() << '\n');
BlockInfo[MBB->getNumber()].invalidate();
for (unsigned i = 0; i != TS_NumStrategies; ++i)
if (Ensembles[i])
Ensembles[i]->invalidate(MBB);
}
void MachineTraceMetrics::verifyAnalysis() const {
if (!MF)
return;
#ifndef NDEBUG
assert(BlockInfo.size() == MF->getNumBlockIDs() && "Outdated BlockInfo size");
for (unsigned i = 0; i != TS_NumStrategies; ++i)
if (Ensembles[i])
Ensembles[i]->verify();
#endif
}
//===----------------------------------------------------------------------===//
// Trace building
//===----------------------------------------------------------------------===//
//
// Traces are built by two CFG traversals. To avoid recomputing too much, use a
// set abstraction that confines the search to the current loop, and doesn't
// revisit blocks.
namespace {
struct LoopBounds {
MutableArrayRef<MachineTraceMetrics::TraceBlockInfo> Blocks;
SmallPtrSet<const MachineBasicBlock*, 8> Visited;
const MachineLoopInfo *Loops;
bool Downward;
LoopBounds(MutableArrayRef<MachineTraceMetrics::TraceBlockInfo> blocks,
const MachineLoopInfo *loops)
: Blocks(blocks), Loops(loops), Downward(false) {}
};
}
// Specialize po_iterator_storage in order to prune the post-order traversal so
// it is limited to the current loop and doesn't traverse the loop back edges.
namespace llvm {
template<>
class po_iterator_storage<LoopBounds, true> {
LoopBounds &LB;
public:
po_iterator_storage(LoopBounds &lb) : LB(lb) {}
void finishPostorder(const MachineBasicBlock*) {}
bool insertEdge(const MachineBasicBlock *From, const MachineBasicBlock *To) {
// Skip already visited To blocks.
MachineTraceMetrics::TraceBlockInfo &TBI = LB.Blocks[To->getNumber()];
if (LB.Downward ? TBI.hasValidHeight() : TBI.hasValidDepth())
return false;
// From is null once when To is the trace center block.
if (From) {
if (const MachineLoop *FromLoop = LB.Loops->getLoopFor(From)) {
// Don't follow backedges, don't leave FromLoop when going upwards.
if ((LB.Downward ? To : From) == FromLoop->getHeader())
return false;
// Don't leave FromLoop.
if (isExitingLoop(FromLoop, LB.Loops->getLoopFor(To)))
return false;
}
}
// To is a new block. Mark the block as visited in case the CFG has cycles
// that MachineLoopInfo didn't recognize as a natural loop.
return LB.Visited.insert(To).second;
}
};
}
/// Compute the trace through MBB.
void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Computing " << getName() << " trace through BB#"
<< MBB->getNumber() << '\n');
// Set up loop bounds for the backwards post-order traversal.
LoopBounds Bounds(BlockInfo, MTM.Loops);
// Run an upwards post-order search for the trace start.
Bounds.Downward = false;
Bounds.Visited.clear();
for (auto I : inverse_post_order_ext(MBB, Bounds)) {
DEBUG(dbgs() << " pred for BB#" << I->getNumber() << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the predecessors have been visited, pick the preferred one.
TBI.Pred = pickTracePred(I);
DEBUG({
if (TBI.Pred)
dbgs() << "BB#" << TBI.Pred->getNumber() << '\n';
else
dbgs() << "null\n";
});
// The trace leading to I is now known, compute the depth resources.
computeDepthResources(I);
}
// Run a downwards post-order search for the trace end.
Bounds.Downward = true;
Bounds.Visited.clear();
for (auto I : post_order_ext(MBB, Bounds)) {
DEBUG(dbgs() << " succ for BB#" << I->getNumber() << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the successors have been visited, pick the preferred one.
TBI.Succ = pickTraceSucc(I);
DEBUG({
if (TBI.Succ)
dbgs() << "BB#" << TBI.Succ->getNumber() << '\n';
else
dbgs() << "null\n";
});
// The trace leaving I is now known, compute the height resources.
computeHeightResources(I);
}
}
/// Invalidate traces through BadMBB.
void
MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
SmallVector<const MachineBasicBlock*, 16> WorkList;
TraceBlockInfo &BadTBI = BlockInfo[BadMBB->getNumber()];
// Invalidate height resources of blocks above MBB.
if (BadTBI.hasValidHeight()) {
BadTBI.invalidateHeight();
WorkList.push_back(BadMBB);
do {
const MachineBasicBlock *MBB = WorkList.pop_back_val();
DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName()
<< " height.\n");
// Find any MBB predecessors that have MBB as their preferred successor.
// They are the only ones that need to be invalidated.
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
TraceBlockInfo &TBI = BlockInfo[Pred->getNumber()];
if (!TBI.hasValidHeight())
continue;
if (TBI.Succ == MBB) {
TBI.invalidateHeight();
WorkList.push_back(Pred);
continue;
}
// Verify that TBI.Succ is actually a *I successor.
assert((!TBI.Succ || Pred->isSuccessor(TBI.Succ)) && "CFG changed");
}
} while (!WorkList.empty());
}
// Invalidate depth resources of blocks below MBB.
if (BadTBI.hasValidDepth()) {
BadTBI.invalidateDepth();
WorkList.push_back(BadMBB);
do {
const MachineBasicBlock *MBB = WorkList.pop_back_val();
DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName()
<< " depth.\n");
// Find any MBB successors that have MBB as their preferred predecessor.
// They are the only ones that need to be invalidated.
for (const MachineBasicBlock *Succ : MBB->successors()) {
TraceBlockInfo &TBI = BlockInfo[Succ->getNumber()];
if (!TBI.hasValidDepth())
continue;
if (TBI.Pred == MBB) {
TBI.invalidateDepth();
WorkList.push_back(Succ);
continue;
}
// Verify that TBI.Pred is actually a *I predecessor.
assert((!TBI.Pred || Succ->isPredecessor(TBI.Pred)) && "CFG changed");
}
} while (!WorkList.empty());
}
// Clear any per-instruction data. We only have to do this for BadMBB itself
// because the instructions in that block may change. Other blocks may be
// invalidated, but their instructions will stay the same, so there is no
// need to erase the Cycle entries. They will be overwritten when we
// recompute.
for (const auto &I : *BadMBB)
Cycles.erase(&I);
}
void MachineTraceMetrics::Ensemble::verify() const {
#ifndef NDEBUG
assert(BlockInfo.size() == MTM.MF->getNumBlockIDs() &&
"Outdated BlockInfo size");
for (unsigned Num = 0, e = BlockInfo.size(); Num != e; ++Num) {
const TraceBlockInfo &TBI = BlockInfo[Num];
if (TBI.hasValidDepth() && TBI.Pred) {
const MachineBasicBlock *MBB = MTM.MF->getBlockNumbered(Num);
assert(MBB->isPredecessor(TBI.Pred) && "CFG doesn't match trace");
assert(BlockInfo[TBI.Pred->getNumber()].hasValidDepth() &&
"Trace is broken, depth should have been invalidated.");
const MachineLoop *Loop = getLoopFor(MBB);
assert(!(Loop && MBB == Loop->getHeader()) && "Trace contains backedge");
}
if (TBI.hasValidHeight() && TBI.Succ) {
const MachineBasicBlock *MBB = MTM.MF->getBlockNumbered(Num);
assert(MBB->isSuccessor(TBI.Succ) && "CFG doesn't match trace");
assert(BlockInfo[TBI.Succ->getNumber()].hasValidHeight() &&
"Trace is broken, height should have been invalidated.");
const MachineLoop *Loop = getLoopFor(MBB);
const MachineLoop *SuccLoop = getLoopFor(TBI.Succ);
assert(!(Loop && Loop == SuccLoop && TBI.Succ == Loop->getHeader()) &&
"Trace contains backedge");
}
}
#endif
}
//===----------------------------------------------------------------------===//
// Data Dependencies
//===----------------------------------------------------------------------===//
//
// Compute the depth and height of each instruction based on data dependencies
// and instruction latencies. These cycle numbers assume that the CPU can issue
// an infinite number of instructions per cycle as long as their dependencies
// are ready.
// A data dependency is represented as a defining MI and operand numbers on the
// defining and using MI.
namespace {
struct DataDep {
const MachineInstr *DefMI;
unsigned DefOp;
unsigned UseOp;
DataDep(const MachineInstr *DefMI, unsigned DefOp, unsigned UseOp)
: DefMI(DefMI), DefOp(DefOp), UseOp(UseOp) {}
/// Create a DataDep from an SSA form virtual register.
DataDep(const MachineRegisterInfo *MRI, unsigned VirtReg, unsigned UseOp)
: UseOp(UseOp) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg));
MachineRegisterInfo::def_iterator DefI = MRI->def_begin(VirtReg);
assert(!DefI.atEnd() && "Register has no defs");
DefMI = DefI->getParent();
DefOp = DefI.getOperandNo();
assert((++DefI).atEnd() && "Register has multiple defs");
}
};
}
// Get the input data dependencies that must be ready before UseMI can issue.
// Return true if UseMI has any physreg operands.
static bool getDataDeps(const MachineInstr *UseMI,
SmallVectorImpl<DataDep> &Deps,
const MachineRegisterInfo *MRI) {
// Debug values should not be included in any calculations.
if (UseMI->isDebugValue())
return false;
bool HasPhysRegs = false;
for (MachineInstr::const_mop_iterator I = UseMI->operands_begin(),
E = UseMI->operands_end(); I != E; ++I) {
const MachineOperand &MO = *I;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
HasPhysRegs = true;
continue;
}
// Collect virtual register reads.
if (MO.readsReg())
Deps.push_back(DataDep(MRI, Reg, UseMI->getOperandNo(I)));
}
return HasPhysRegs;
}
// Get the input data dependencies of a PHI instruction, using Pred as the
// preferred predecessor.
// This will add at most one dependency to Deps.
static void getPHIDeps(const MachineInstr *UseMI,
SmallVectorImpl<DataDep> &Deps,
const MachineBasicBlock *Pred,
const MachineRegisterInfo *MRI) {
// No predecessor at the beginning of a trace. Ignore dependencies.
if (!Pred)
return;
assert(UseMI->isPHI() && UseMI->getNumOperands() % 2 && "Bad PHI");
for (unsigned i = 1; i != UseMI->getNumOperands(); i += 2) {
if (UseMI->getOperand(i + 1).getMBB() == Pred) {
unsigned Reg = UseMI->getOperand(i).getReg();
Deps.push_back(DataDep(MRI, Reg, i));
return;
}
}
}
// Keep track of physreg data dependencies by recording each live register unit.
// Associate each regunit with an instruction operand. Depending on the
// direction instructions are scanned, it could be the operand that defined the
// regunit, or the highest operand to read the regunit.
namespace {
struct LiveRegUnit {
unsigned RegUnit;
unsigned Cycle;
const MachineInstr *MI;
unsigned Op;
unsigned getSparseSetIndex() const { return RegUnit; }
LiveRegUnit(unsigned RU) : RegUnit(RU), Cycle(0), MI(nullptr), Op(0) {}
};
}
// Identify physreg dependencies for UseMI, and update the live regunit
// tracking set when scanning instructions downwards.
static void updatePhysDepsDownwards(const MachineInstr *UseMI,
SmallVectorImpl<DataDep> &Deps,
SparseSet<LiveRegUnit> &RegUnits,
const TargetRegisterInfo *TRI) {
SmallVector<unsigned, 8> Kills;
SmallVector<unsigned, 8> LiveDefOps;
for (MachineInstr::const_mop_iterator MI = UseMI->operands_begin(),
ME = UseMI->operands_end(); MI != ME; ++MI) {
const MachineOperand &MO = *MI;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
// Track live defs and kills for updating RegUnits.
if (MO.isDef()) {
if (MO.isDead())
Kills.push_back(Reg);
else
LiveDefOps.push_back(UseMI->getOperandNo(MI));
} else if (MO.isKill())
Kills.push_back(Reg);
// Identify dependencies.
if (!MO.readsReg())
continue;
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
SparseSet<LiveRegUnit>::iterator I = RegUnits.find(*Units);
if (I == RegUnits.end())
continue;
Deps.push_back(DataDep(I->MI, I->Op, UseMI->getOperandNo(MI)));
break;
}
}
// Update RegUnits to reflect live registers after UseMI.
// First kills.
for (unsigned i = 0, e = Kills.size(); i != e; ++i)
for (MCRegUnitIterator Units(Kills[i], TRI); Units.isValid(); ++Units)
RegUnits.erase(*Units);
// Second, live defs.
for (unsigned i = 0, e = LiveDefOps.size(); i != e; ++i) {
unsigned DefOp = LiveDefOps[i];
for (MCRegUnitIterator Units(UseMI->getOperand(DefOp).getReg(), TRI);
Units.isValid(); ++Units) {
LiveRegUnit &LRU = RegUnits[*Units];
LRU.MI = UseMI;
LRU.Op = DefOp;
}
}
}
/// The length of the critical path through a trace is the maximum of two path
/// lengths:
///
/// 1. The maximum height+depth over all instructions in the trace center block.
///
/// 2. The longest cross-block dependency chain. For small blocks, it is
/// possible that the critical path through the trace doesn't include any
/// instructions in the block.
///
/// This function computes the second number from the live-in list of the
/// center block.
unsigned MachineTraceMetrics::Ensemble::
computeCrossBlockCriticalPath(const TraceBlockInfo &TBI) {
assert(TBI.HasValidInstrDepths && "Missing depth info");
assert(TBI.HasValidInstrHeights && "Missing height info");
unsigned MaxLen = 0;
for (unsigned i = 0, e = TBI.LiveIns.size(); i != e; ++i) {
const LiveInReg &LIR = TBI.LiveIns[i];
if (!TargetRegisterInfo::isVirtualRegister(LIR.Reg))
continue;
const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
// Ignore dependencies outside the current trace.
const TraceBlockInfo &DefTBI = BlockInfo[DefMI->getParent()->getNumber()];
if (!DefTBI.isUsefulDominator(TBI))
continue;
unsigned Len = LIR.Height + Cycles[DefMI].Depth;
MaxLen = std::max(MaxLen, Len);
}
return MaxLen;
}
/// Compute instruction depths for all instructions above or in MBB in its
/// trace. This assumes that the trace through MBB has already been computed.
void MachineTraceMetrics::Ensemble::
computeInstrDepths(const MachineBasicBlock *MBB) {
// The top of the trace may already be computed, and HasValidInstrDepths
// implies Head->HasValidInstrDepths, so we only need to start from the first
// block in the trace that needs to be recomputed.
SmallVector<const MachineBasicBlock*, 8> Stack;
do {
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
assert(TBI.hasValidDepth() && "Incomplete trace");
if (TBI.HasValidInstrDepths)
break;
Stack.push_back(MBB);
MBB = TBI.Pred;
} while (MBB);
// FIXME: If MBB is non-null at this point, it is the last pre-computed block
// in the trace. We should track any live-out physregs that were defined in
// the trace. This is quite rare in SSA form, typically created by CSE
// hoisting a compare.
SparseSet<LiveRegUnit> RegUnits;
RegUnits.setUniverse(MTM.TRI->getNumRegUnits());
// Go through trace blocks in top-down order, stopping after the center block.
SmallVector<DataDep, 8> Deps;
while (!Stack.empty()) {
MBB = Stack.pop_back_val();
DEBUG(dbgs() << "\nDepths for BB#" << MBB->getNumber() << ":\n");
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
TBI.HasValidInstrDepths = true;
TBI.CriticalPath = 0;
// Print out resource depths here as well.
DEBUG({
dbgs() << format("%7u Instructions\n", TBI.InstrDepth);
ArrayRef<unsigned> PRDepths = getProcResourceDepths(MBB->getNumber());
for (unsigned K = 0; K != PRDepths.size(); ++K)
if (PRDepths[K]) {
unsigned Factor = MTM.SchedModel.getResourceFactor(K);
dbgs() << format("%6uc @ ", MTM.getCycles(PRDepths[K]))
<< MTM.SchedModel.getProcResource(K)->Name << " ("
<< PRDepths[K]/Factor << " ops x" << Factor << ")\n";
}
});
// Also compute the critical path length through MBB when possible.
if (TBI.HasValidInstrHeights)
TBI.CriticalPath = computeCrossBlockCriticalPath(TBI);
for (const auto &UseMI : *MBB) {
// Collect all data dependencies.
Deps.clear();
if (UseMI.isPHI())
getPHIDeps(&UseMI, Deps, TBI.Pred, MTM.MRI);
else if (getDataDeps(&UseMI, Deps, MTM.MRI))
updatePhysDepsDownwards(&UseMI, Deps, RegUnits, MTM.TRI);
// Filter and process dependencies, computing the earliest issue cycle.
unsigned Cycle = 0;
for (const DataDep &Dep : Deps) {
const TraceBlockInfo&DepTBI =
BlockInfo[Dep.DefMI->getParent()->getNumber()];
// Ignore dependencies from outside the current trace.
if (!DepTBI.isUsefulDominator(TBI))
continue;
assert(DepTBI.HasValidInstrDepths && "Inconsistent dependency");
unsigned DepCycle = Cycles.lookup(Dep.DefMI).Depth;
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
DepCycle += MTM.SchedModel
.computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI, Dep.UseOp);
Cycle = std::max(Cycle, DepCycle);
}
// Remember the instruction depth.
InstrCycles &MICycles = Cycles[&UseMI];
MICycles.Depth = Cycle;
if (!TBI.HasValidInstrHeights) {
DEBUG(dbgs() << Cycle << '\t' << UseMI);
continue;
}
// Update critical path length.
TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Height);
DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << UseMI);
}
}
}
// Identify physreg dependencies for MI when scanning instructions upwards.
// Return the issue height of MI after considering any live regunits.
// Height is the issue height computed from virtual register dependencies alone.
static unsigned updatePhysDepsUpwards(const MachineInstr *MI, unsigned Height,
SparseSet<LiveRegUnit> &RegUnits,
const TargetSchedModel &SchedModel,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
SmallVector<unsigned, 8> ReadOps;
for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
const MachineOperand &MO = *MOI;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (MO.readsReg())
ReadOps.push_back(MI->getOperandNo(MOI));
if (!MO.isDef())
continue;
// This is a def of Reg. Remove corresponding entries from RegUnits, and
// update MI Height to consider the physreg dependencies.
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
SparseSet<LiveRegUnit>::iterator I = RegUnits.find(*Units);
if (I == RegUnits.end())
continue;
unsigned DepHeight = I->Cycle;
if (!MI->isTransient()) {
// We may not know the UseMI of this dependency, if it came from the
// live-in list. SchedModel can handle a NULL UseMI.
DepHeight += SchedModel
.computeOperandLatency(MI, MI->getOperandNo(MOI), I->MI, I->Op);
}
Height = std::max(Height, DepHeight);
// This regunit is dead above MI.
RegUnits.erase(I);
}
}
// Now we know the height of MI. Update any regunits read.
for (unsigned i = 0, e = ReadOps.size(); i != e; ++i) {
unsigned Reg = MI->getOperand(ReadOps[i]).getReg();
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
LiveRegUnit &LRU = RegUnits[*Units];
// Set the height to the highest reader of the unit.
if (LRU.Cycle <= Height && LRU.MI != MI) {
LRU.Cycle = Height;
LRU.MI = MI;
LRU.Op = ReadOps[i];
}
}
}
return Height;
}
typedef DenseMap<const MachineInstr *, unsigned> MIHeightMap;
// Push the height of DefMI upwards if required to match UseMI.
// Return true if this is the first time DefMI was seen.
static bool pushDepHeight(const DataDep &Dep,
const MachineInstr *UseMI, unsigned UseHeight,
MIHeightMap &Heights,
const TargetSchedModel &SchedModel,
const TargetInstrInfo *TII) {
// Adjust height by Dep.DefMI latency.
if (!Dep.DefMI->isTransient())
UseHeight += SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp,
UseMI, Dep.UseOp);
// Update Heights[DefMI] to be the maximum height seen.
MIHeightMap::iterator I;
bool New;
std::tie(I, New) = Heights.insert(std::make_pair(Dep.DefMI, UseHeight));
if (New)
return true;
// DefMI has been pushed before. Give it the max height.
if (I->second < UseHeight)
I->second = UseHeight;
return false;
}
/// Assuming that the virtual register defined by DefMI:DefOp was used by
/// Trace.back(), add it to the live-in lists of all the blocks in Trace. Stop
/// when reaching the block that contains DefMI.
void MachineTraceMetrics::Ensemble::
addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
ArrayRef<const MachineBasicBlock*> Trace) {
assert(!Trace.empty() && "Trace should contain at least one block");
unsigned Reg = DefMI->getOperand(DefOp).getReg();
assert(TargetRegisterInfo::isVirtualRegister(Reg));
const MachineBasicBlock *DefMBB = DefMI->getParent();
// Reg is live-in to all blocks in Trace that follow DefMBB.
for (unsigned i = Trace.size(); i; --i) {
const MachineBasicBlock *MBB = Trace[i-1];
if (MBB == DefMBB)
return;
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
// Just add the register. The height will be updated later.
TBI.LiveIns.push_back(Reg);
}
}
/// Compute instruction heights in the trace through MBB. This updates MBB and
/// the blocks below it in the trace. It is assumed that the trace has already
/// been computed.
void MachineTraceMetrics::Ensemble::
computeInstrHeights(const MachineBasicBlock *MBB) {
// The bottom of the trace may already be computed.
// Find the blocks that need updating.
SmallVector<const MachineBasicBlock*, 8> Stack;
do {
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
assert(TBI.hasValidHeight() && "Incomplete trace");
if (TBI.HasValidInstrHeights)
break;
Stack.push_back(MBB);
TBI.LiveIns.clear();
MBB = TBI.Succ;
} while (MBB);
// As we move upwards in the trace, keep track of instructions that are
// required by deeper trace instructions. Map MI -> height required so far.
MIHeightMap Heights;
// For physregs, the def isn't known when we see the use.
// Instead, keep track of the highest use of each regunit.
SparseSet<LiveRegUnit> RegUnits;
RegUnits.setUniverse(MTM.TRI->getNumRegUnits());
// If the bottom of the trace was already precomputed, initialize heights
// from its live-in list.
// MBB is the highest precomputed block in the trace.
if (MBB) {
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
for (LiveInReg &LI : TBI.LiveIns) {
if (TargetRegisterInfo::isVirtualRegister(LI.Reg)) {
// For virtual registers, the def latency is included.
unsigned &Height = Heights[MTM.MRI->getVRegDef(LI.Reg)];
if (Height < LI.Height)
Height = LI.Height;
} else {
// For register units, the def latency is not included because we don't
// know the def yet.
RegUnits[LI.Reg].Cycle = LI.Height;
}
}
}
// Go through the trace blocks in bottom-up order.
SmallVector<DataDep, 8> Deps;
for (;!Stack.empty(); Stack.pop_back()) {
MBB = Stack.back();
DEBUG(dbgs() << "Heights for BB#" << MBB->getNumber() << ":\n");
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
TBI.HasValidInstrHeights = true;
TBI.CriticalPath = 0;
DEBUG({
dbgs() << format("%7u Instructions\n", TBI.InstrHeight);
ArrayRef<unsigned> PRHeights = getProcResourceHeights(MBB->getNumber());
for (unsigned K = 0; K != PRHeights.size(); ++K)
if (PRHeights[K]) {
unsigned Factor = MTM.SchedModel.getResourceFactor(K);
dbgs() << format("%6uc @ ", MTM.getCycles(PRHeights[K]))
<< MTM.SchedModel.getProcResource(K)->Name << " ("
<< PRHeights[K]/Factor << " ops x" << Factor << ")\n";
}
});
// Get dependencies from PHIs in the trace successor.
const MachineBasicBlock *Succ = TBI.Succ;
// If MBB is the last block in the trace, and it has a back-edge to the
// loop header, get loop-carried dependencies from PHIs in the header. For
// that purpose, pretend that all the loop header PHIs have height 0.
if (!Succ)
if (const MachineLoop *Loop = getLoopFor(MBB))
if (MBB->isSuccessor(Loop->getHeader()))
Succ = Loop->getHeader();
if (Succ) {
for (const auto &PHI : *Succ) {
if (!PHI.isPHI())
break;
Deps.clear();
getPHIDeps(&PHI, Deps, MBB, MTM.MRI);
if (!Deps.empty()) {
// Loop header PHI heights are all 0.
unsigned Height = TBI.Succ ? Cycles.lookup(&PHI).Height : 0;
DEBUG(dbgs() << "pred\t" << Height << '\t' << PHI);
if (pushDepHeight(Deps.front(), &PHI, Height,
Heights, MTM.SchedModel, MTM.TII))
addLiveIns(Deps.front().DefMI, Deps.front().DefOp, Stack);
}
}
}
// Go through the block backwards.
for (MachineBasicBlock::const_iterator BI = MBB->end(), BB = MBB->begin();
BI != BB;) {
const MachineInstr *MI = --BI;
// Find the MI height as determined by virtual register uses in the
// trace below.
unsigned Cycle = 0;
MIHeightMap::iterator HeightI = Heights.find(MI);
if (HeightI != Heights.end()) {
Cycle = HeightI->second;
// We won't be seeing any more MI uses.
Heights.erase(HeightI);
}
// Don't process PHI deps. They depend on the specific predecessor, and
// we'll get them when visiting the predecessor.
Deps.clear();
bool HasPhysRegs = !MI->isPHI() && getDataDeps(MI, Deps, MTM.MRI);
// There may also be regunit dependencies to include in the height.
if (HasPhysRegs)
Cycle = updatePhysDepsUpwards(MI, Cycle, RegUnits,
MTM.SchedModel, MTM.TII, MTM.TRI);
// Update the required height of any virtual registers read by MI.
for (const DataDep &Dep : Deps)
if (pushDepHeight(Dep, MI, Cycle, Heights, MTM.SchedModel, MTM.TII))
addLiveIns(Dep.DefMI, Dep.DefOp, Stack);
InstrCycles &MICycles = Cycles[MI];
MICycles.Height = Cycle;
if (!TBI.HasValidInstrDepths) {
DEBUG(dbgs() << Cycle << '\t' << *MI);
continue;
}
// Update critical path length.
TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Depth);
DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << *MI);
}
// Update virtual live-in heights. They were added by addLiveIns() with a 0
// height because the final height isn't known until now.
DEBUG(dbgs() << "BB#" << MBB->getNumber() << " Live-ins:");
for (LiveInReg &LIR : TBI.LiveIns) {
const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
LIR.Height = Heights.lookup(DefMI);
DEBUG(dbgs() << ' ' << PrintReg(LIR.Reg) << '@' << LIR.Height);
}
// Transfer the live regunits to the live-in list.
for (SparseSet<LiveRegUnit>::const_iterator
RI = RegUnits.begin(), RE = RegUnits.end(); RI != RE; ++RI) {
TBI.LiveIns.push_back(LiveInReg(RI->RegUnit, RI->Cycle));
DEBUG(dbgs() << ' ' << PrintRegUnit(RI->RegUnit, MTM.TRI)
<< '@' << RI->Cycle);
}
DEBUG(dbgs() << '\n');
if (!TBI.HasValidInstrDepths)
continue;
// Add live-ins to the critical path length.
TBI.CriticalPath = std::max(TBI.CriticalPath,
computeCrossBlockCriticalPath(TBI));
DEBUG(dbgs() << "Critical path: " << TBI.CriticalPath << '\n');
}
}
MachineTraceMetrics::Trace
MachineTraceMetrics::Ensemble::getTrace(const MachineBasicBlock *MBB) {
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
if (!TBI.hasValidDepth() || !TBI.hasValidHeight())
computeTrace(MBB);
if (!TBI.HasValidInstrDepths)
computeInstrDepths(MBB);
if (!TBI.HasValidInstrHeights)
computeInstrHeights(MBB);
return Trace(*this, TBI);
}
unsigned
MachineTraceMetrics::Trace::getInstrSlack(const MachineInstr *MI) const {
assert(MI && "Not an instruction.");
assert(getBlockNum() == unsigned(MI->getParent()->getNumber()) &&
"MI must be in the trace center block");
InstrCycles Cyc = getInstrCycles(MI);
return getCriticalPath() - (Cyc.Depth + Cyc.Height);
}
unsigned
MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr *PHI) const {
const MachineBasicBlock *MBB = TE.MTM.MF->getBlockNumbered(getBlockNum());
SmallVector<DataDep, 1> Deps;
getPHIDeps(PHI, Deps, MBB, TE.MTM.MRI);
assert(Deps.size() == 1 && "PHI doesn't have MBB as a predecessor");
DataDep &Dep = Deps.front();
unsigned DepCycle = getInstrCycles(Dep.DefMI).Depth;
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
DepCycle += TE.MTM.SchedModel
.computeOperandLatency(Dep.DefMI, Dep.DefOp, PHI, Dep.UseOp);
return DepCycle;
}
/// When bottom is set include instructions in current block in estimate.
unsigned MachineTraceMetrics::Trace::getResourceDepth(bool Bottom) const {
// Find the limiting processor resource.
// Numbers have been pre-scaled to be comparable.
unsigned PRMax = 0;
ArrayRef<unsigned> PRDepths = TE.getProcResourceDepths(getBlockNum());
if (Bottom) {
ArrayRef<unsigned> PRCycles = TE.MTM.getProcResourceCycles(getBlockNum());
for (unsigned K = 0; K != PRDepths.size(); ++K)
PRMax = std::max(PRMax, PRDepths[K] + PRCycles[K]);
} else {
for (unsigned K = 0; K != PRDepths.size(); ++K)
PRMax = std::max(PRMax, PRDepths[K]);
}
// Convert to cycle count.
PRMax = TE.MTM.getCycles(PRMax);
/// All instructions before current block
unsigned Instrs = TBI.InstrDepth;
// plus instructions in current block
if (Bottom)
Instrs += TE.MTM.BlockInfo[getBlockNum()].InstrCount;
if (unsigned IW = TE.MTM.SchedModel.getIssueWidth())
Instrs /= IW;
// Assume issue width 1 without a schedule model.
return std::max(Instrs, PRMax);
}
unsigned MachineTraceMetrics::Trace::getResourceLength(
ArrayRef<const MachineBasicBlock *> Extrablocks,
ArrayRef<const MCSchedClassDesc *> ExtraInstrs,
ArrayRef<const MCSchedClassDesc *> RemoveInstrs) const {
// Add up resources above and below the center block.
ArrayRef<unsigned> PRDepths = TE.getProcResourceDepths(getBlockNum());
ArrayRef<unsigned> PRHeights = TE.getProcResourceHeights(getBlockNum());
unsigned PRMax = 0;
// Capture computing cycles from extra instructions
auto extraCycles = [this](ArrayRef<const MCSchedClassDesc *> Instrs,
unsigned ResourceIdx)
->unsigned {
unsigned Cycles = 0;
for (const MCSchedClassDesc *SC : Instrs) {
if (!SC->isValid())
continue;
for (TargetSchedModel::ProcResIter
PI = TE.MTM.SchedModel.getWriteProcResBegin(SC),
PE = TE.MTM.SchedModel.getWriteProcResEnd(SC);
PI != PE; ++PI) {
if (PI->ProcResourceIdx != ResourceIdx)
continue;
Cycles +=
(PI->Cycles * TE.MTM.SchedModel.getResourceFactor(ResourceIdx));
}
}
return Cycles;
};
for (unsigned K = 0; K != PRDepths.size(); ++K) {
unsigned PRCycles = PRDepths[K] + PRHeights[K];
for (const MachineBasicBlock *MBB : Extrablocks)
PRCycles += TE.MTM.getProcResourceCycles(MBB->getNumber())[K];
PRCycles += extraCycles(ExtraInstrs, K);
PRCycles -= extraCycles(RemoveInstrs, K);
PRMax = std::max(PRMax, PRCycles);
}
// Convert to cycle count.
PRMax = TE.MTM.getCycles(PRMax);
// Instrs: #instructions in current trace outside current block.
unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight;
// Add instruction count from the extra blocks.
for (const MachineBasicBlock *MBB : Extrablocks)
Instrs += TE.MTM.getResources(MBB)->InstrCount;
Instrs += ExtraInstrs.size();
Instrs -= RemoveInstrs.size();
if (unsigned IW = TE.MTM.SchedModel.getIssueWidth())
Instrs /= IW;
// Assume issue width 1 without a schedule model.
return std::max(Instrs, PRMax);
}
bool MachineTraceMetrics::Trace::isDepInTrace(const MachineInstr *DefMI,
const MachineInstr *UseMI) const {
if (DefMI->getParent() == UseMI->getParent())
return true;
const TraceBlockInfo &DepTBI = TE.BlockInfo[DefMI->getParent()->getNumber()];
const TraceBlockInfo &TBI = TE.BlockInfo[UseMI->getParent()->getNumber()];
return DepTBI.isUsefulDominator(TBI);
}
void MachineTraceMetrics::Ensemble::print(raw_ostream &OS) const {
OS << getName() << " ensemble:\n";
for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) {
OS << " BB#" << i << '\t';
BlockInfo[i].print(OS);
OS << '\n';
}
}
void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const {
if (hasValidDepth()) {
OS << "depth=" << InstrDepth;
if (Pred)
OS << " pred=BB#" << Pred->getNumber();
else
OS << " pred=null";
OS << " head=BB#" << Head;
if (HasValidInstrDepths)
OS << " +instrs";
} else
OS << "depth invalid";
OS << ", ";
if (hasValidHeight()) {
OS << "height=" << InstrHeight;
if (Succ)
OS << " succ=BB#" << Succ->getNumber();
else
OS << " succ=null";
OS << " tail=BB#" << Tail;
if (HasValidInstrHeights)
OS << " +instrs";
} else
OS << "height invalid";
if (HasValidInstrDepths && HasValidInstrHeights)
OS << ", crit=" << CriticalPath;
}
void MachineTraceMetrics::Trace::print(raw_ostream &OS) const {
unsigned MBBNum = &TBI - &TE.BlockInfo[0];
OS << TE.getName() << " trace BB#" << TBI.Head << " --> BB#" << MBBNum
<< " --> BB#" << TBI.Tail << ':';
if (TBI.hasValidHeight() && TBI.hasValidDepth())
OS << ' ' << getInstrCount() << " instrs.";
if (TBI.HasValidInstrDepths && TBI.HasValidInstrHeights)
OS << ' ' << TBI.CriticalPath << " cycles.";
const MachineTraceMetrics::TraceBlockInfo *Block = &TBI;
OS << "\nBB#" << MBBNum;
while (Block->hasValidDepth() && Block->Pred) {
unsigned Num = Block->Pred->getNumber();
OS << " <- BB#" << Num;
Block = &TE.BlockInfo[Num];
}
Block = &TBI;
OS << "\n ";
while (Block->hasValidHeight() && Block->Succ) {
unsigned Num = Block->Succ->getNumber();
OS << " -> BB#" << Num;
Block = &TE.BlockInfo[Num];
}
OS << '\n';
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/TailDuplication.cpp | //===-- TailDuplication.cpp - Duplicate blocks into predecessors' tails ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass duplicates basic blocks ending in unconditional branches into
// the tails of their predecessors.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSSAUpdater.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "tailduplication"
STATISTIC(NumTails , "Number of tails duplicated");
STATISTIC(NumTailDups , "Number of tail duplicated blocks");
STATISTIC(NumInstrDups , "Additional instructions due to tail duplication");
STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
STATISTIC(NumAddedPHIs , "Number of phis added");
// Heuristic for tail duplication.
static cl::opt<unsigned>
TailDuplicateSize("tail-dup-size",
cl::desc("Maximum instructions to consider tail duplicating"),
cl::init(2), cl::Hidden);
static cl::opt<bool>
TailDupVerify("tail-dup-verify",
cl::desc("Verify sanity of PHI instructions during taildup"),
cl::init(false), cl::Hidden);
static cl::opt<unsigned>
TailDupLimit("tail-dup-limit", cl::init(~0U), cl::Hidden);
typedef std::vector<std::pair<MachineBasicBlock*,unsigned> > AvailableValsTy;
namespace {
/// TailDuplicatePass - Perform tail duplication.
class TailDuplicatePass : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const MachineBranchProbabilityInfo *MBPI;
MachineModuleInfo *MMI;
MachineRegisterInfo *MRI;
std::unique_ptr<RegScavenger> RS;
bool PreRegAlloc;
// SSAUpdateVRs - A list of virtual registers for which to update SSA form.
SmallVector<unsigned, 16> SSAUpdateVRs;
// SSAUpdateVals - For each virtual register in SSAUpdateVals keep a list of
// source virtual registers.
DenseMap<unsigned, AvailableValsTy> SSAUpdateVals;
public:
static char ID;
explicit TailDuplicatePass() :
MachineFunctionPass(ID), PreRegAlloc(false) {}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
void AddSSAUpdateEntry(unsigned OrigReg, unsigned NewReg,
MachineBasicBlock *BB);
void ProcessPHI(MachineInstr *MI, MachineBasicBlock *TailBB,
MachineBasicBlock *PredBB,
DenseMap<unsigned, unsigned> &LocalVRMap,
SmallVectorImpl<std::pair<unsigned,unsigned> > &Copies,
const DenseSet<unsigned> &UsedByPhi,
bool Remove);
void DuplicateInstruction(MachineInstr *MI,
MachineBasicBlock *TailBB,
MachineBasicBlock *PredBB,
MachineFunction &MF,
DenseMap<unsigned, unsigned> &LocalVRMap,
const DenseSet<unsigned> &UsedByPhi);
void UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallSetVector<MachineBasicBlock*, 8> &Succs);
bool TailDuplicateBlocks(MachineFunction &MF);
bool shouldTailDuplicate(const MachineFunction &MF,
bool IsSimple, MachineBasicBlock &TailBB);
bool isSimpleBB(MachineBasicBlock *TailBB);
bool canCompletelyDuplicateBB(MachineBasicBlock &BB);
bool duplicateSimpleBB(MachineBasicBlock *TailBB,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
const DenseSet<unsigned> &RegsUsedByPhi,
SmallVectorImpl<MachineInstr *> &Copies);
bool TailDuplicate(MachineBasicBlock *TailBB,
bool IsSimple,
MachineFunction &MF,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallVectorImpl<MachineInstr *> &Copies);
bool TailDuplicateAndUpdate(MachineBasicBlock *MBB,
bool IsSimple,
MachineFunction &MF);
void RemoveDeadBlock(MachineBasicBlock *MBB);
};
char TailDuplicatePass::ID = 0;
}
char &llvm::TailDuplicateID = TailDuplicatePass::ID;
INITIALIZE_PASS(TailDuplicatePass, "tailduplication", "Tail Duplication",
false, false)
bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
PreRegAlloc = MRI->isSSA();
RS.reset();
if (MRI->tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF))
RS.reset(new RegScavenger());
bool MadeChange = false;
while (TailDuplicateBlocks(MF))
MadeChange = true;
return MadeChange;
}
void TailDuplicatePass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineBranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) {
for (MachineFunction::iterator I = ++MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = I;
SmallSetVector<MachineBasicBlock*, 8> Preds(MBB->pred_begin(),
MBB->pred_end());
MachineBasicBlock::iterator MI = MBB->begin();
while (MI != MBB->end()) {
if (!MI->isPHI())
break;
for (SmallSetVector<MachineBasicBlock *, 8>::iterator PI = Preds.begin(),
PE = Preds.end(); PI != PE; ++PI) {
MachineBasicBlock *PredBB = *PI;
bool Found = false;
for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2) {
MachineBasicBlock *PHIBB = MI->getOperand(i+1).getMBB();
if (PHIBB == PredBB) {
Found = true;
break;
}
}
if (!Found) {
dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI;
dbgs() << " missing input from predecessor BB#"
<< PredBB->getNumber() << '\n';
llvm_unreachable(nullptr);
}
}
for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2) {
MachineBasicBlock *PHIBB = MI->getOperand(i+1).getMBB();
if (CheckExtra && !Preds.count(PHIBB)) {
dbgs() << "Warning: malformed PHI in BB#" << MBB->getNumber()
<< ": " << *MI;
dbgs() << " extra input from predecessor BB#"
<< PHIBB->getNumber() << '\n';
llvm_unreachable(nullptr);
}
if (PHIBB->getNumber() < 0) {
dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI;
dbgs() << " non-existing BB#" << PHIBB->getNumber() << '\n';
llvm_unreachable(nullptr);
}
}
++MI;
}
}
}
/// TailDuplicateAndUpdate - Tail duplicate the block and cleanup.
bool
TailDuplicatePass::TailDuplicateAndUpdate(MachineBasicBlock *MBB,
bool IsSimple,
MachineFunction &MF) {
// Save the successors list.
SmallSetVector<MachineBasicBlock*, 8> Succs(MBB->succ_begin(),
MBB->succ_end());
SmallVector<MachineBasicBlock*, 8> TDBBs;
SmallVector<MachineInstr*, 16> Copies;
if (!TailDuplicate(MBB, IsSimple, MF, TDBBs, Copies))
return false;
++NumTails;
SmallVector<MachineInstr*, 8> NewPHIs;
MachineSSAUpdater SSAUpdate(MF, &NewPHIs);
// TailBB's immediate successors are now successors of those predecessors
// which duplicated TailBB. Add the predecessors as sources to the PHI
// instructions.
bool isDead = MBB->pred_empty() && !MBB->hasAddressTaken();
if (PreRegAlloc)
UpdateSuccessorsPHIs(MBB, isDead, TDBBs, Succs);
// If it is dead, remove it.
if (isDead) {
NumInstrDups -= MBB->size();
RemoveDeadBlock(MBB);
++NumDeadBlocks;
}
// Update SSA form.
if (!SSAUpdateVRs.empty()) {
for (unsigned i = 0, e = SSAUpdateVRs.size(); i != e; ++i) {
unsigned VReg = SSAUpdateVRs[i];
SSAUpdate.Initialize(VReg);
// If the original definition is still around, add it as an available
// value.
MachineInstr *DefMI = MRI->getVRegDef(VReg);
MachineBasicBlock *DefBB = nullptr;
if (DefMI) {
DefBB = DefMI->getParent();
SSAUpdate.AddAvailableValue(DefBB, VReg);
}
// Add the new vregs as available values.
DenseMap<unsigned, AvailableValsTy>::iterator LI =
SSAUpdateVals.find(VReg);
for (unsigned j = 0, ee = LI->second.size(); j != ee; ++j) {
MachineBasicBlock *SrcBB = LI->second[j].first;
unsigned SrcReg = LI->second[j].second;
SSAUpdate.AddAvailableValue(SrcBB, SrcReg);
}
// Rewrite uses that are outside of the original def's block.
MachineRegisterInfo::use_iterator UI = MRI->use_begin(VReg);
while (UI != MRI->use_end()) {
MachineOperand &UseMO = *UI;
MachineInstr *UseMI = UseMO.getParent();
++UI;
if (UseMI->isDebugValue()) {
// SSAUpdate can replace the use with an undef. That creates
// a debug instruction that is a kill.
// FIXME: Should it SSAUpdate job to delete debug instructions
// instead of replacing the use with undef?
UseMI->eraseFromParent();
continue;
}
if (UseMI->getParent() == DefBB && !UseMI->isPHI())
continue;
SSAUpdate.RewriteUse(UseMO);
}
}
SSAUpdateVRs.clear();
SSAUpdateVals.clear();
}
// Eliminate some of the copies inserted by tail duplication to maintain
// SSA form.
for (unsigned i = 0, e = Copies.size(); i != e; ++i) {
MachineInstr *Copy = Copies[i];
if (!Copy->isCopy())
continue;
unsigned Dst = Copy->getOperand(0).getReg();
unsigned Src = Copy->getOperand(1).getReg();
if (MRI->hasOneNonDBGUse(Src) &&
MRI->constrainRegClass(Src, MRI->getRegClass(Dst))) {
// Copy is the only use. Do trivial copy propagation here.
MRI->replaceRegWith(Dst, Src);
Copy->eraseFromParent();
}
}
if (NewPHIs.size())
NumAddedPHIs += NewPHIs.size();
return true;
}
/// TailDuplicateBlocks - Look for small blocks that are unconditionally
/// branched to and do not fall through. Tail-duplicate their instructions
/// into their predecessors to eliminate (dynamic) branches.
bool TailDuplicatePass::TailDuplicateBlocks(MachineFunction &MF) {
bool MadeChange = false;
if (PreRegAlloc && TailDupVerify) {
DEBUG(dbgs() << "\n*** Before tail-duplicating\n");
VerifyPHIs(MF, true);
}
for (MachineFunction::iterator I = ++MF.begin(), E = MF.end(); I != E; ) {
MachineBasicBlock *MBB = I++;
if (NumTails == TailDupLimit)
break;
bool IsSimple = isSimpleBB(MBB);
if (!shouldTailDuplicate(MF, IsSimple, *MBB))
continue;
MadeChange |= TailDuplicateAndUpdate(MBB, IsSimple, MF);
}
if (PreRegAlloc && TailDupVerify)
VerifyPHIs(MF, false);
return MadeChange;
}
static bool isDefLiveOut(unsigned Reg, MachineBasicBlock *BB,
const MachineRegisterInfo *MRI) {
for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
if (UseMI.isDebugValue())
continue;
if (UseMI.getParent() != BB)
return true;
}
return false;
}
static unsigned getPHISrcRegOpIdx(MachineInstr *MI, MachineBasicBlock *SrcBB) {
for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2)
if (MI->getOperand(i+1).getMBB() == SrcBB)
return i;
return 0;
}
// Remember which registers are used by phis in this block. This is
// used to determine which registers are liveout while modifying the
// block (which is why we need to copy the information).
static void getRegsUsedByPHIs(const MachineBasicBlock &BB,
DenseSet<unsigned> *UsedByPhi) {
for (const auto &MI : BB) {
if (!MI.isPHI())
break;
for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
unsigned SrcReg = MI.getOperand(i).getReg();
UsedByPhi->insert(SrcReg);
}
}
}
/// AddSSAUpdateEntry - Add a definition and source virtual registers pair for
/// SSA update.
void TailDuplicatePass::AddSSAUpdateEntry(unsigned OrigReg, unsigned NewReg,
MachineBasicBlock *BB) {
DenseMap<unsigned, AvailableValsTy>::iterator LI= SSAUpdateVals.find(OrigReg);
if (LI != SSAUpdateVals.end())
LI->second.push_back(std::make_pair(BB, NewReg));
else {
AvailableValsTy Vals;
Vals.push_back(std::make_pair(BB, NewReg));
SSAUpdateVals.insert(std::make_pair(OrigReg, Vals));
SSAUpdateVRs.push_back(OrigReg);
}
}
/// ProcessPHI - Process PHI node in TailBB by turning it into a copy in PredBB.
/// Remember the source register that's contributed by PredBB and update SSA
/// update map.
void TailDuplicatePass::ProcessPHI(
MachineInstr *MI, MachineBasicBlock *TailBB, MachineBasicBlock *PredBB,
DenseMap<unsigned, unsigned> &LocalVRMap,
SmallVectorImpl<std::pair<unsigned, unsigned> > &Copies,
const DenseSet<unsigned> &RegsUsedByPhi, bool Remove) {
unsigned DefReg = MI->getOperand(0).getReg();
unsigned SrcOpIdx = getPHISrcRegOpIdx(MI, PredBB);
assert(SrcOpIdx && "Unable to find matching PHI source?");
unsigned SrcReg = MI->getOperand(SrcOpIdx).getReg();
const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
LocalVRMap.insert(std::make_pair(DefReg, SrcReg));
// Insert a copy from source to the end of the block. The def register is the
// available value liveout of the block.
unsigned NewDef = MRI->createVirtualRegister(RC);
Copies.push_back(std::make_pair(NewDef, SrcReg));
if (isDefLiveOut(DefReg, TailBB, MRI) || RegsUsedByPhi.count(DefReg))
AddSSAUpdateEntry(DefReg, NewDef, PredBB);
if (!Remove)
return;
// Remove PredBB from the PHI node.
MI->RemoveOperand(SrcOpIdx+1);
MI->RemoveOperand(SrcOpIdx);
if (MI->getNumOperands() == 1)
MI->eraseFromParent();
}
/// DuplicateInstruction - Duplicate a TailBB instruction to PredBB and update
/// the source operands due to earlier PHI translation.
void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
MachineBasicBlock *TailBB,
MachineBasicBlock *PredBB,
MachineFunction &MF,
DenseMap<unsigned, unsigned> &LocalVRMap,
const DenseSet<unsigned> &UsedByPhi) {
MachineInstr *NewMI = TII->duplicate(MI, MF);
for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (MO.isDef()) {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
unsigned NewReg = MRI->createVirtualRegister(RC);
MO.setReg(NewReg);
LocalVRMap.insert(std::make_pair(Reg, NewReg));
if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg))
AddSSAUpdateEntry(Reg, NewReg, PredBB);
} else {
DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg);
if (VI != LocalVRMap.end()) {
MO.setReg(VI->second);
// Clear any kill flags from this operand. The new register could have
// uses after this one, so kills are not valid here.
MO.setIsKill(false);
MRI->constrainRegClass(VI->second, MRI->getRegClass(Reg));
}
}
}
PredBB->insert(PredBB->instr_end(), NewMI);
}
/// UpdateSuccessorsPHIs - After FromBB is tail duplicated into its predecessor
/// blocks, the successors have gained new predecessors. Update the PHI
/// instructions in them accordingly.
void
TailDuplicatePass::UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallSetVector<MachineBasicBlock*,8> &Succs) {
for (SmallSetVector<MachineBasicBlock*, 8>::iterator SI = Succs.begin(),
SE = Succs.end(); SI != SE; ++SI) {
MachineBasicBlock *SuccBB = *SI;
for (MachineBasicBlock::iterator II = SuccBB->begin(), EE = SuccBB->end();
II != EE; ++II) {
if (!II->isPHI())
break;
MachineInstrBuilder MIB(*FromBB->getParent(), II);
unsigned Idx = 0;
for (unsigned i = 1, e = II->getNumOperands(); i != e; i += 2) {
MachineOperand &MO = II->getOperand(i+1);
if (MO.getMBB() == FromBB) {
Idx = i;
break;
}
}
assert(Idx != 0);
MachineOperand &MO0 = II->getOperand(Idx);
unsigned Reg = MO0.getReg();
if (isDead) {
// Folded into the previous BB.
// There could be duplicate phi source entries. FIXME: Should sdisel
// or earlier pass fixed this?
for (unsigned i = II->getNumOperands()-2; i != Idx; i -= 2) {
MachineOperand &MO = II->getOperand(i+1);
if (MO.getMBB() == FromBB) {
II->RemoveOperand(i+1);
II->RemoveOperand(i);
}
}
} else
Idx = 0;
// If Idx is set, the operands at Idx and Idx+1 must be removed.
// We reuse the location to avoid expensive RemoveOperand calls.
DenseMap<unsigned,AvailableValsTy>::iterator LI=SSAUpdateVals.find(Reg);
if (LI != SSAUpdateVals.end()) {
// This register is defined in the tail block.
for (unsigned j = 0, ee = LI->second.size(); j != ee; ++j) {
MachineBasicBlock *SrcBB = LI->second[j].first;
// If we didn't duplicate a bb into a particular predecessor, we
// might still have added an entry to SSAUpdateVals to correcly
// recompute SSA. If that case, avoid adding a dummy extra argument
// this PHI.
if (!SrcBB->isSuccessor(SuccBB))
continue;
unsigned SrcReg = LI->second[j].second;
if (Idx != 0) {
II->getOperand(Idx).setReg(SrcReg);
II->getOperand(Idx+1).setMBB(SrcBB);
Idx = 0;
} else {
MIB.addReg(SrcReg).addMBB(SrcBB);
}
}
} else {
// Live in tail block, must also be live in predecessors.
for (unsigned j = 0, ee = TDBBs.size(); j != ee; ++j) {
MachineBasicBlock *SrcBB = TDBBs[j];
if (Idx != 0) {
II->getOperand(Idx).setReg(Reg);
II->getOperand(Idx+1).setMBB(SrcBB);
Idx = 0;
} else {
MIB.addReg(Reg).addMBB(SrcBB);
}
}
}
if (Idx != 0) {
II->RemoveOperand(Idx+1);
II->RemoveOperand(Idx);
}
}
}
}
/// shouldTailDuplicate - Determine if it is profitable to duplicate this block.
bool
TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
bool IsSimple,
MachineBasicBlock &TailBB) {
// Only duplicate blocks that end with unconditional branches.
if (TailBB.canFallThrough())
return false;
// Don't try to tail-duplicate single-block loops.
if (TailBB.isSuccessor(&TailBB))
return false;
// Set the limit on the cost to duplicate. When optimizing for size,
// duplicate only one, because one branch instruction can be eliminated to
// compensate for the duplication.
unsigned MaxDuplicateCount;
if (TailDuplicateSize.getNumOccurrences() == 0 &&
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
MaxDuplicateCount = 1;
else
MaxDuplicateCount = TailDuplicateSize;
// If the target has hardware branch prediction that can handle indirect
// branches, duplicating them can often make them predictable when there
// are common paths through the code. The limit needs to be high enough
// to allow undoing the effects of tail merging and other optimizations
// that rearrange the predecessors of the indirect branch.
bool HasIndirectbr = false;
if (!TailBB.empty())
HasIndirectbr = TailBB.back().isIndirectBranch();
if (HasIndirectbr && PreRegAlloc)
MaxDuplicateCount = 20;
// Check the instructions in the block to determine whether tail-duplication
// is invalid or unlikely to be profitable.
unsigned InstrCount = 0;
for (MachineBasicBlock::iterator I = TailBB.begin(); I != TailBB.end(); ++I) {
// Non-duplicable things shouldn't be tail-duplicated.
if (I->isNotDuplicable())
return false;
// Do not duplicate 'return' instructions if this is a pre-regalloc run.
// A return may expand into a lot more instructions (e.g. reload of callee
// saved registers) after PEI.
if (PreRegAlloc && I->isReturn())
return false;
// Avoid duplicating calls before register allocation. Calls presents a
// barrier to register allocation so duplicating them may end up increasing
// spills.
if (PreRegAlloc && I->isCall())
return false;
if (!I->isPHI() && !I->isDebugValue())
InstrCount += 1;
if (InstrCount > MaxDuplicateCount)
return false;
}
if (HasIndirectbr && PreRegAlloc)
return true;
if (IsSimple)
return true;
if (!PreRegAlloc)
return true;
return canCompletelyDuplicateBB(TailBB);
}
/// isSimpleBB - True if this BB has only one unconditional jump.
bool
TailDuplicatePass::isSimpleBB(MachineBasicBlock *TailBB) {
if (TailBB->succ_size() != 1)
return false;
if (TailBB->pred_empty())
return false;
MachineBasicBlock::iterator I = TailBB->getFirstNonDebugInstr();
if (I == TailBB->end())
return true;
return I->isUnconditionalBranch();
}
static bool
bothUsedInPHI(const MachineBasicBlock &A,
SmallPtrSet<MachineBasicBlock*, 8> SuccsB) {
for (MachineBasicBlock::const_succ_iterator SI = A.succ_begin(),
SE = A.succ_end(); SI != SE; ++SI) {
MachineBasicBlock *BB = *SI;
if (SuccsB.count(BB) && !BB->empty() && BB->begin()->isPHI())
return true;
}
return false;
}
bool
TailDuplicatePass::canCompletelyDuplicateBB(MachineBasicBlock &BB) {
for (MachineBasicBlock::pred_iterator PI = BB.pred_begin(),
PE = BB.pred_end(); PI != PE; ++PI) {
MachineBasicBlock *PredBB = *PI;
if (PredBB->succ_size() > 1)
return false;
MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
return false;
if (!PredCond.empty())
return false;
}
return true;
}
bool
TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
const DenseSet<unsigned> &UsedByPhi,
SmallVectorImpl<MachineInstr *> &Copies) {
SmallPtrSet<MachineBasicBlock*, 8> Succs(TailBB->succ_begin(),
TailBB->succ_end());
SmallVector<MachineBasicBlock*, 8> Preds(TailBB->pred_begin(),
TailBB->pred_end());
bool Changed = false;
for (SmallSetVector<MachineBasicBlock *, 8>::iterator PI = Preds.begin(),
PE = Preds.end(); PI != PE; ++PI) {
MachineBasicBlock *PredBB = *PI;
if (PredBB->getLandingPadSuccessor())
continue;
if (bothUsedInPHI(*PredBB, Succs))
continue;
MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
continue;
Changed = true;
DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
<< "From simple Succ: " << *TailBB);
MachineBasicBlock *NewTarget = *TailBB->succ_begin();
MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(PredBB));
// Make PredFBB explicit.
if (PredCond.empty())
PredFBB = PredTBB;
// Make fall through explicit.
if (!PredTBB)
PredTBB = NextBB;
if (!PredFBB)
PredFBB = NextBB;
// Redirect
if (PredFBB == TailBB)
PredFBB = NewTarget;
if (PredTBB == TailBB)
PredTBB = NewTarget;
// Make the branch unconditional if possible
if (PredTBB == PredFBB) {
PredCond.clear();
PredFBB = nullptr;
}
// Avoid adding fall through branches.
if (PredFBB == NextBB)
PredFBB = nullptr;
if (PredTBB == NextBB && PredFBB == nullptr)
PredTBB = nullptr;
TII->RemoveBranch(*PredBB);
if (PredTBB)
TII->InsertBranch(*PredBB, PredTBB, PredFBB, PredCond, DebugLoc());
uint32_t Weight = MBPI->getEdgeWeight(PredBB, TailBB);
PredBB->removeSuccessor(TailBB);
unsigned NumSuccessors = PredBB->succ_size();
assert(NumSuccessors <= 1);
if (NumSuccessors == 0 || *PredBB->succ_begin() != NewTarget)
PredBB->addSuccessor(NewTarget, Weight);
TDBBs.push_back(PredBB);
}
return Changed;
}
/// TailDuplicate - If it is profitable, duplicate TailBB's contents in each
/// of its predecessors.
bool
TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
bool IsSimple,
MachineFunction &MF,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallVectorImpl<MachineInstr *> &Copies) {
DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n');
DenseSet<unsigned> UsedByPhi;
getRegsUsedByPHIs(*TailBB, &UsedByPhi);
if (IsSimple)
return duplicateSimpleBB(TailBB, TDBBs, UsedByPhi, Copies);
// Iterate through all the unique predecessors and tail-duplicate this
// block into them, if possible. Copying the list ahead of time also
// avoids trouble with the predecessor list reallocating.
bool Changed = false;
SmallSetVector<MachineBasicBlock*, 8> Preds(TailBB->pred_begin(),
TailBB->pred_end());
for (SmallSetVector<MachineBasicBlock *, 8>::iterator PI = Preds.begin(),
PE = Preds.end(); PI != PE; ++PI) {
MachineBasicBlock *PredBB = *PI;
assert(TailBB != PredBB &&
"Single-block loop should have been rejected earlier!");
// EH edges are ignored by AnalyzeBranch.
if (PredBB->succ_size() > 1)
continue;
MachineBasicBlock *PredTBB, *PredFBB;
SmallVector<MachineOperand, 4> PredCond;
if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
continue;
if (!PredCond.empty())
continue;
// Don't duplicate into a fall-through predecessor (at least for now).
if (PredBB->isLayoutSuccessor(TailBB) && PredBB->canFallThrough())
continue;
DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
<< "From Succ: " << *TailBB);
TDBBs.push_back(PredBB);
// Remove PredBB's unconditional branch.
TII->RemoveBranch(*PredBB);
if (RS && !TailBB->livein_empty()) {
// Update PredBB livein.
RS->enterBasicBlock(PredBB);
if (!PredBB->empty())
RS->forward(std::prev(PredBB->end()));
for (MachineBasicBlock::livein_iterator I = TailBB->livein_begin(),
E = TailBB->livein_end(); I != E; ++I) {
if (!RS->isRegUsed(*I, false))
// If a register is previously livein to the tail but it's not live
// at the end of predecessor BB, then it should be added to its
// livein list.
PredBB->addLiveIn(*I);
}
}
// Clone the contents of TailBB into PredBB.
DenseMap<unsigned, unsigned> LocalVRMap;
SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
// Use instr_iterator here to properly handle bundles, e.g.
// ARM Thumb2 IT block.
MachineBasicBlock::instr_iterator I = TailBB->instr_begin();
while (I != TailBB->instr_end()) {
MachineInstr *MI = &*I;
++I;
if (MI->isPHI()) {
// Replace the uses of the def of the PHI with the register coming
// from PredBB.
ProcessPHI(MI, TailBB, PredBB, LocalVRMap, CopyInfos, UsedByPhi, true);
} else {
// Replace def of virtual registers with new registers, and update
// uses with PHI source register or the new registers.
DuplicateInstruction(MI, TailBB, PredBB, MF, LocalVRMap, UsedByPhi);
}
}
MachineBasicBlock::iterator Loc = PredBB->getFirstTerminator();
for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
Copies.push_back(BuildMI(*PredBB, Loc, DebugLoc(),
TII->get(TargetOpcode::COPY),
CopyInfos[i].first).addReg(CopyInfos[i].second));
}
// Simplify
TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true);
NumInstrDups += TailBB->size() - 1; // subtract one for removed branch
// Update the CFG.
PredBB->removeSuccessor(PredBB->succ_begin());
assert(PredBB->succ_empty() &&
"TailDuplicate called on block with multiple successors!");
for (MachineBasicBlock::succ_iterator I = TailBB->succ_begin(),
E = TailBB->succ_end(); I != E; ++I)
PredBB->addSuccessor(*I, MBPI->getEdgeWeight(TailBB, I));
Changed = true;
++NumTailDups;
}
// If TailBB was duplicated into all its predecessors except for the prior
// block, which falls through unconditionally, move the contents of this
// block into the prior block.
MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(TailBB));
MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
SmallVector<MachineOperand, 4> PriorCond;
// This has to check PrevBB->succ_size() because EH edges are ignored by
// AnalyzeBranch.
if (PrevBB->succ_size() == 1 &&
!TII->AnalyzeBranch(*PrevBB, PriorTBB, PriorFBB, PriorCond, true) &&
PriorCond.empty() && !PriorTBB && TailBB->pred_size() == 1 &&
!TailBB->hasAddressTaken()) {
DEBUG(dbgs() << "\nMerging into block: " << *PrevBB
<< "From MBB: " << *TailBB);
if (PreRegAlloc) {
DenseMap<unsigned, unsigned> LocalVRMap;
SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
MachineBasicBlock::iterator I = TailBB->begin();
// Process PHI instructions first.
while (I != TailBB->end() && I->isPHI()) {
// Replace the uses of the def of the PHI with the register coming
// from PredBB.
MachineInstr *MI = &*I++;
ProcessPHI(MI, TailBB, PrevBB, LocalVRMap, CopyInfos, UsedByPhi, true);
if (MI->getParent())
MI->eraseFromParent();
}
// Now copy the non-PHI instructions.
while (I != TailBB->end()) {
// Replace def of virtual registers with new registers, and update
// uses with PHI source register or the new registers.
MachineInstr *MI = &*I++;
assert(!MI->isBundle() && "Not expecting bundles before regalloc!");
DuplicateInstruction(MI, TailBB, PrevBB, MF, LocalVRMap, UsedByPhi);
MI->eraseFromParent();
}
MachineBasicBlock::iterator Loc = PrevBB->getFirstTerminator();
for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
Copies.push_back(BuildMI(*PrevBB, Loc, DebugLoc(),
TII->get(TargetOpcode::COPY),
CopyInfos[i].first)
.addReg(CopyInfos[i].second));
}
} else {
// No PHIs to worry about, just splice the instructions over.
PrevBB->splice(PrevBB->end(), TailBB, TailBB->begin(), TailBB->end());
}
PrevBB->removeSuccessor(PrevBB->succ_begin());
assert(PrevBB->succ_empty());
PrevBB->transferSuccessors(TailBB);
TDBBs.push_back(PrevBB);
Changed = true;
}
// If this is after register allocation, there are no phis to fix.
if (!PreRegAlloc)
return Changed;
// If we made no changes so far, we are safe.
if (!Changed)
return Changed;
// Handle the nasty case in that we duplicated a block that is part of a loop
// into some but not all of its predecessors. For example:
// 1 -> 2 <-> 3 |
// \ |
// \---> rest |
// if we duplicate 2 into 1 but not into 3, we end up with
// 12 -> 3 <-> 2 -> rest |
// \ / |
// \----->-----/ |
// If there was a "var = phi(1, 3)" in 2, it has to be ultimately replaced
// with a phi in 3 (which now dominates 2).
// What we do here is introduce a copy in 3 of the register defined by the
// phi, just like when we are duplicating 2 into 3, but we don't copy any
// real instructions or remove the 3 -> 2 edge from the phi in 2.
for (SmallSetVector<MachineBasicBlock *, 8>::iterator PI = Preds.begin(),
PE = Preds.end(); PI != PE; ++PI) {
MachineBasicBlock *PredBB = *PI;
if (std::find(TDBBs.begin(), TDBBs.end(), PredBB) != TDBBs.end())
continue;
// EH edges
if (PredBB->succ_size() != 1)
continue;
DenseMap<unsigned, unsigned> LocalVRMap;
SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
MachineBasicBlock::iterator I = TailBB->begin();
// Process PHI instructions first.
while (I != TailBB->end() && I->isPHI()) {
// Replace the uses of the def of the PHI with the register coming
// from PredBB.
MachineInstr *MI = &*I++;
ProcessPHI(MI, TailBB, PredBB, LocalVRMap, CopyInfos, UsedByPhi, false);
}
MachineBasicBlock::iterator Loc = PredBB->getFirstTerminator();
for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
Copies.push_back(BuildMI(*PredBB, Loc, DebugLoc(),
TII->get(TargetOpcode::COPY),
CopyInfos[i].first).addReg(CopyInfos[i].second));
}
}
return Changed;
}
/// RemoveDeadBlock - Remove the specified dead machine basic block from the
/// function, updating the CFG.
void TailDuplicatePass::RemoveDeadBlock(MachineBasicBlock *MBB) {
assert(MBB->pred_empty() && "MBB must be dead!");
DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
// Remove all successors.
while (!MBB->succ_empty())
MBB->removeSuccessor(MBB->succ_end()-1);
// Remove the block.
MBB->eraseFromParent();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineRegionInfo.cpp |
#include "llvm/CodeGen/MachineRegionInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/RegionInfoImpl.h"
#include "llvm/CodeGen/MachinePostDominators.h"
#define DEBUG_TYPE "region"
using namespace llvm;
STATISTIC(numMachineRegions, "The # of machine regions");
STATISTIC(numMachineSimpleRegions, "The # of simple machine regions");
namespace llvm {
template class RegionBase<RegionTraits<MachineFunction>>;
template class RegionNodeBase<RegionTraits<MachineFunction>>;
template class RegionInfoBase<RegionTraits<MachineFunction>>;
}
//===----------------------------------------------------------------------===//
// MachineRegion implementation
//
MachineRegion::MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
MachineRegionInfo* RI,
MachineDominatorTree *DT, MachineRegion *Parent) :
RegionBase<RegionTraits<MachineFunction>>(Entry, Exit, RI, DT, Parent) {
}
MachineRegion::~MachineRegion() { }
//===----------------------------------------------------------------------===//
// MachineRegionInfo implementation
//
MachineRegionInfo::MachineRegionInfo() :
RegionInfoBase<RegionTraits<MachineFunction>>() {
}
MachineRegionInfo::~MachineRegionInfo() {
}
void MachineRegionInfo::updateStatistics(MachineRegion *R) {
++numMachineRegions;
// TODO: Slow. Should only be enabled if -stats is used.
if (R->isSimple())
++numMachineSimpleRegions;
}
void MachineRegionInfo::recalculate(MachineFunction &F,
MachineDominatorTree *DT_,
MachinePostDominatorTree *PDT_,
MachineDominanceFrontier *DF_) {
DT = DT_;
PDT = PDT_;
DF = DF_;
MachineBasicBlock *Entry = GraphTraits<MachineFunction*>::getEntryNode(&F);
TopLevelRegion = new MachineRegion(Entry, nullptr, this, DT, nullptr);
updateStatistics(TopLevelRegion);
calculate(F);
}
//===----------------------------------------------------------------------===//
// MachineRegionInfoPass implementation
//
MachineRegionInfoPass::MachineRegionInfoPass() : MachineFunctionPass(ID) {
initializeMachineRegionInfoPassPass(*PassRegistry::getPassRegistry());
}
MachineRegionInfoPass::~MachineRegionInfoPass() {
}
bool MachineRegionInfoPass::runOnMachineFunction(MachineFunction &F) {
releaseMemory();
auto DT = &getAnalysis<MachineDominatorTree>();
auto PDT = &getAnalysis<MachinePostDominatorTree>();
auto DF = &getAnalysis<MachineDominanceFrontier>();
RI.recalculate(F, DT, PDT, DF);
return false;
}
void MachineRegionInfoPass::releaseMemory() {
RI.releaseMemory();
}
void MachineRegionInfoPass::verifyAnalysis() const {
// Only do verification when user wants to, otherwise this expensive check
// will be invoked by PMDataManager::verifyPreservedAnalysis when
// a regionpass (marked PreservedAll) finish.
if (MachineRegionInfo::VerifyRegionInfo)
RI.verifyAnalysis();
}
void MachineRegionInfoPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTree>();
AU.addRequired<DominanceFrontier>();
}
void MachineRegionInfoPass::print(raw_ostream &OS, const Module *) const {
RI.print(OS);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineRegionInfoPass::dump() const {
RI.dump();
}
#endif
char MachineRegionInfoPass::ID = 0;
INITIALIZE_PASS_BEGIN(MachineRegionInfoPass, "regions",
"Detect single entry single exit regions", true, true)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier)
INITIALIZE_PASS_END(MachineRegionInfoPass, "regions",
"Detect single entry single exit regions", true, true)
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
// the link time optimization.
namespace llvm {
FunctionPass *createMachineRegionInfoPass() {
return new MachineRegionInfoPass();
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegisterCoalescer.h | //===-- RegisterCoalescer.h - Register Coalescing Interface -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the abstract interface for register coalescers,
// allowing them to interact with and query register allocators.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_REGISTERCOALESCER_H
#define LLVM_LIB_CODEGEN_REGISTERCOALESCER_H
namespace llvm {
class MachineInstr;
class TargetRegisterInfo;
class TargetRegisterClass;
class TargetInstrInfo;
/// A helper class for register coalescers. When deciding if
/// two registers can be coalesced, CoalescerPair can determine if a copy
/// instruction would become an identity copy after coalescing.
class CoalescerPair {
const TargetRegisterInfo &TRI;
/// The register that will be left after coalescing. It can be a
/// virtual or physical register.
unsigned DstReg;
/// The virtual register that will be coalesced into dstReg.
unsigned SrcReg;
/// The sub-register index of the old DstReg in the new coalesced register.
unsigned DstIdx;
/// The sub-register index of the old SrcReg in the new coalesced register.
unsigned SrcIdx;
/// True when the original copy was a partial subregister copy.
bool Partial;
/// True when both regs are virtual and newRC is constrained.
bool CrossClass;
/// True when DstReg and SrcReg are reversed from the original
/// copy instruction.
bool Flipped;
/// The register class of the coalesced register, or NULL if DstReg
/// is a physreg. This register class may be a super-register of both
/// SrcReg and DstReg.
const TargetRegisterClass *NewRC;
public:
CoalescerPair(const TargetRegisterInfo &tri)
: TRI(tri), DstReg(0), SrcReg(0), DstIdx(0), SrcIdx(0),
Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
/// Create a CoalescerPair representing a virtreg-to-physreg copy.
/// No need to call setRegisters().
CoalescerPair(unsigned VirtReg, unsigned PhysReg,
const TargetRegisterInfo &tri)
: TRI(tri), DstReg(PhysReg), SrcReg(VirtReg), DstIdx(0), SrcIdx(0),
Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
/// Set registers to match the copy instruction MI. Return
/// false if MI is not a coalescable copy instruction.
bool setRegisters(const MachineInstr*);
/// Swap SrcReg and DstReg. Return false if swapping is impossible
/// because DstReg is a physical register, or SubIdx is set.
bool flip();
/// Return true if MI is a copy instruction that will become
/// an identity copy after coalescing.
bool isCoalescable(const MachineInstr*) const;
/// Return true if DstReg is a physical register.
bool isPhys() const { return !NewRC; }
/// Return true if the original copy instruction did not copy
/// the full register, but was a subreg operation.
bool isPartial() const { return Partial; }
/// Return true if DstReg is virtual and NewRC is a smaller
/// register class than DstReg's.
bool isCrossClass() const { return CrossClass; }
/// Return true when getSrcReg is the register being defined by
/// the original copy instruction.
bool isFlipped() const { return Flipped; }
/// Return the register (virtual or physical) that will remain
/// after coalescing.
unsigned getDstReg() const { return DstReg; }
/// Return the virtual register that will be coalesced away.
unsigned getSrcReg() const { return SrcReg; }
/// Return the subregister index that DstReg will be coalesced into, or 0.
unsigned getDstIdx() const { return DstIdx; }
/// Return the subregister index that SrcReg will be coalesced into, or 0.
unsigned getSrcIdx() const { return SrcIdx; }
/// Return the register class of the coalesced register.
const TargetRegisterClass *getNewRC() const { return NewRC; }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegAllocPBQP.cpp | //===------ RegAllocPBQP.cpp ---- PBQP Register Allocator -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a Partitioned Boolean Quadratic Programming (PBQP) based
// register allocator for LLVM. This allocator works by constructing a PBQP
// problem representing the register allocation problem under consideration,
// solving this using a PBQP solver, and mapping the solution back to a
// register assignment. If any variables are selected for spilling then spill
// code is inserted and the process repeated.
//
// The PBQP solver (pbqp.c) provided for this allocator uses a heuristic tuned
// for register allocation. For more information on PBQP for register
// allocation, see the following papers:
//
// (1) Hames, L. and Scholz, B. 2006. Nearly optimal register allocation with
// PBQP. In Proceedings of the 7th Joint Modular Languages Conference
// (JMLC'06). LNCS, vol. 4228. Springer, New York, NY, USA. 346-361.
//
// (2) Scholz, B., Eckstein, E. 2002. Register allocation for irregular
// architectures. In Proceedings of the Joint Conference on Languages,
// Compilers and Tools for Embedded Systems (LCTES'02), ACM Press, New York,
// NY, USA, 139-148.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/RegAllocPBQP.h"
#include "RegisterCoalescer.h"
#include "Spiller.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <limits>
#include <memory>
#include <queue>
#include <set>
#include <sstream>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
static RegisterRegAlloc
RegisterPBQPRepAlloc("pbqp", "PBQP register allocator",
createDefaultPBQPRegisterAllocator);
static cl::opt<bool>
PBQPCoalescing("pbqp-coalescing",
cl::desc("Attempt coalescing during PBQP register allocation."),
cl::init(false), cl::Hidden);
#ifndef NDEBUG
static cl::opt<bool>
PBQPDumpGraphs("pbqp-dump-graphs",
cl::desc("Dump graphs for each function/round in the compilation unit."),
cl::init(false), cl::Hidden);
#endif
namespace {
///
/// PBQP based allocators solve the register allocation problem by mapping
/// register allocation problems to Partitioned Boolean Quadratic
/// Programming problems.
class RegAllocPBQP : public MachineFunctionPass {
public:
static char ID;
/// Construct a PBQP register allocator.
RegAllocPBQP(char *cPassID = nullptr)
: MachineFunctionPass(ID), customPassID(cPassID) {
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
}
/// Return the pass name.
const char* getPassName() const override {
return "PBQP Register Allocator";
}
/// PBQP analysis usage.
void getAnalysisUsage(AnalysisUsage &au) const override;
/// Perform register allocation
bool runOnMachineFunction(MachineFunction &MF) override;
private:
typedef std::map<const LiveInterval*, unsigned> LI2NodeMap;
typedef std::vector<const LiveInterval*> Node2LIMap;
typedef std::vector<unsigned> AllowedSet;
typedef std::vector<AllowedSet> AllowedSetMap;
typedef std::pair<unsigned, unsigned> RegPair;
typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
typedef std::set<unsigned> RegSet;
char *customPassID;
RegSet VRegsToAlloc, EmptyIntervalVRegs;
/// \brief Finds the initial set of vreg intervals to allocate.
void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS);
/// \brief Constructs an initial graph.
void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller);
/// \brief Spill the given VReg.
void spillVReg(unsigned VReg, SmallVectorImpl<unsigned> &NewIntervals,
MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
Spiller &VRegSpiller);
/// \brief Given a solved PBQP problem maps this solution back to a register
/// assignment.
bool mapPBQPToRegAlloc(const PBQPRAGraph &G,
const PBQP::Solution &Solution,
VirtRegMap &VRM,
Spiller &VRegSpiller);
/// \brief Postprocessing before final spilling. Sets basic block "live in"
/// variables.
void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS,
VirtRegMap &VRM) const;
};
char RegAllocPBQP::ID = 0;
/// @brief Set spill costs for each node in the PBQP reg-alloc graph.
class SpillCosts : public PBQPRAConstraint {
public:
void apply(PBQPRAGraph &G) override {
LiveIntervals &LIS = G.getMetadata().LIS;
// A minimum spill costs, so that register constraints can can be set
// without normalization in the [0.0:MinSpillCost( interval.
const PBQP::PBQPNum MinSpillCost = 10.0;
for (auto NId : G.nodeIds()) {
PBQP::PBQPNum SpillCost =
LIS.getInterval(G.getNodeMetadata(NId).getVReg()).weight;
if (SpillCost == 0.0)
SpillCost = std::numeric_limits<PBQP::PBQPNum>::min();
else
SpillCost += MinSpillCost;
PBQPRAGraph::RawVector NodeCosts(G.getNodeCosts(NId));
NodeCosts[PBQP::RegAlloc::getSpillOptionIdx()] = SpillCost;
G.setNodeCosts(NId, std::move(NodeCosts));
}
}
};
/// @brief Add interference edges between overlapping vregs.
class Interference : public PBQPRAConstraint {
private:
typedef const PBQP::RegAlloc::AllowedRegVector* AllowedRegVecPtr;
typedef std::pair<AllowedRegVecPtr, AllowedRegVecPtr> IKey;
typedef DenseMap<IKey, PBQPRAGraph::MatrixPtr> IMatrixCache;
typedef DenseSet<IKey> DisjointAllowedRegsCache;
typedef std::pair<PBQP::GraphBase::NodeId, PBQP::GraphBase::NodeId> IEdgeKey;
typedef DenseSet<IEdgeKey> IEdgeCache;
bool haveDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
PBQPRAGraph::NodeId MId,
const DisjointAllowedRegsCache &D) const {
const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
if (NRegs == MRegs)
return false;
if (NRegs < MRegs)
return D.count(IKey(NRegs, MRegs)) > 0;
return D.count(IKey(MRegs, NRegs)) > 0;
}
void setDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
PBQPRAGraph::NodeId MId,
DisjointAllowedRegsCache &D) {
const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
assert(NRegs != MRegs && "AllowedRegs can not be disjoint with itself");
if (NRegs < MRegs)
D.insert(IKey(NRegs, MRegs));
else
D.insert(IKey(MRegs, NRegs));
}
// Holds (Interval, CurrentSegmentID, and NodeId). The first two are required
// for the fast interference graph construction algorithm. The last is there
// to save us from looking up node ids via the VRegToNode map in the graph
// metadata.
typedef std::tuple<LiveInterval*, size_t, PBQP::GraphBase::NodeId>
IntervalInfo;
static SlotIndex getStartPoint(const IntervalInfo &I) {
return std::get<0>(I)->segments[std::get<1>(I)].start;
}
static SlotIndex getEndPoint(const IntervalInfo &I) {
return std::get<0>(I)->segments[std::get<1>(I)].end;
}
static PBQP::GraphBase::NodeId getNodeId(const IntervalInfo &I) {
return std::get<2>(I);
}
static bool lowestStartPoint(const IntervalInfo &I1,
const IntervalInfo &I2) {
// Condition reversed because priority queue has the *highest* element at
// the front, rather than the lowest.
return getStartPoint(I1) > getStartPoint(I2);
}
static bool lowestEndPoint(const IntervalInfo &I1,
const IntervalInfo &I2) {
SlotIndex E1 = getEndPoint(I1);
SlotIndex E2 = getEndPoint(I2);
if (E1 < E2)
return true;
if (E1 > E2)
return false;
// If two intervals end at the same point, we need a way to break the tie or
// the set will assume they're actually equal and refuse to insert a
// "duplicate". Just compare the vregs - fast and guaranteed unique.
return std::get<0>(I1)->reg < std::get<0>(I2)->reg;
}
static bool isAtLastSegment(const IntervalInfo &I) {
return std::get<1>(I) == std::get<0>(I)->size() - 1;
}
static IntervalInfo nextSegment(const IntervalInfo &I) {
return std::make_tuple(std::get<0>(I), std::get<1>(I) + 1, std::get<2>(I));
}
public:
void apply(PBQPRAGraph &G) override {
// The following is loosely based on the linear scan algorithm introduced in
// "Linear Scan Register Allocation" by Poletto and Sarkar. This version
// isn't linear, because the size of the active set isn't bound by the
// number of registers, but rather the size of the largest clique in the
// graph. Still, we expect this to be better than N^2.
LiveIntervals &LIS = G.getMetadata().LIS;
// Interferenc matrices are incredibly regular - they're only a function of
// the allowed sets, so we cache them to avoid the overhead of constructing
// and uniquing them.
IMatrixCache C;
// Finding an edge is expensive in the worst case (O(max_clique(G))). So
// cache locally edges we have already seen.
IEdgeCache EC;
// Cache known disjoint allowed registers pairs
DisjointAllowedRegsCache D;
typedef std::set<IntervalInfo, decltype(&lowestEndPoint)> IntervalSet;
typedef std::priority_queue<IntervalInfo, std::vector<IntervalInfo>,
decltype(&lowestStartPoint)> IntervalQueue;
IntervalSet Active(lowestEndPoint);
IntervalQueue Inactive(lowestStartPoint);
// Start by building the inactive set.
for (auto NId : G.nodeIds()) {
unsigned VReg = G.getNodeMetadata(NId).getVReg();
LiveInterval &LI = LIS.getInterval(VReg);
assert(!LI.empty() && "PBQP graph contains node for empty interval");
Inactive.push(std::make_tuple(&LI, 0, NId));
}
while (!Inactive.empty()) {
// Tentatively grab the "next" interval - this choice may be overriden
// below.
IntervalInfo Cur = Inactive.top();
// Retire any active intervals that end before Cur starts.
IntervalSet::iterator RetireItr = Active.begin();
while (RetireItr != Active.end() &&
(getEndPoint(*RetireItr) <= getStartPoint(Cur))) {
// If this interval has subsequent segments, add the next one to the
// inactive list.
if (!isAtLastSegment(*RetireItr))
Inactive.push(nextSegment(*RetireItr));
++RetireItr;
}
Active.erase(Active.begin(), RetireItr);
// One of the newly retired segments may actually start before the
// Cur segment, so re-grab the front of the inactive list.
Cur = Inactive.top();
Inactive.pop();
// At this point we know that Cur overlaps all active intervals. Add the
// interference edges.
PBQP::GraphBase::NodeId NId = getNodeId(Cur);
for (const auto &A : Active) {
PBQP::GraphBase::NodeId MId = getNodeId(A);
// Do not add an edge when the nodes' allowed registers do not
// intersect: there is obviously no interference.
if (haveDisjointAllowedRegs(G, NId, MId, D))
continue;
// Check that we haven't already added this edge
IEdgeKey EK(std::min(NId, MId), std::max(NId, MId));
if (EC.count(EK))
continue;
// This is a new edge - add it to the graph.
if (!createInterferenceEdge(G, NId, MId, C))
setDisjointAllowedRegs(G, NId, MId, D);
else
EC.insert(EK);
}
// Finally, add Cur to the Active set.
Active.insert(Cur);
}
}
private:
// Create an Interference edge and add it to the graph, unless it is
// a null matrix, meaning the nodes' allowed registers do not have any
// interference. This case occurs frequently between integer and floating
// point registers for example.
// return true iff both nodes interferes.
bool createInterferenceEdge(PBQPRAGraph &G,
PBQPRAGraph::NodeId NId, PBQPRAGraph::NodeId MId,
IMatrixCache &C) {
const TargetRegisterInfo &TRI =
*G.getMetadata().MF.getSubtarget().getRegisterInfo();
const auto &NRegs = G.getNodeMetadata(NId).getAllowedRegs();
const auto &MRegs = G.getNodeMetadata(MId).getAllowedRegs();
// Try looking the edge costs up in the IMatrixCache first.
IKey K(&NRegs, &MRegs);
IMatrixCache::iterator I = C.find(K);
if (I != C.end()) {
G.addEdgeBypassingCostAllocator(NId, MId, I->second);
return true;
}
PBQPRAGraph::RawMatrix M(NRegs.size() + 1, MRegs.size() + 1, 0);
bool NodesInterfere = false;
for (unsigned I = 0; I != NRegs.size(); ++I) {
unsigned PRegN = NRegs[I];
for (unsigned J = 0; J != MRegs.size(); ++J) {
unsigned PRegM = MRegs[J];
if (TRI.regsOverlap(PRegN, PRegM)) {
M[I + 1][J + 1] = std::numeric_limits<PBQP::PBQPNum>::infinity();
NodesInterfere = true;
}
}
}
if (!NodesInterfere)
return false;
PBQPRAGraph::EdgeId EId = G.addEdge(NId, MId, std::move(M));
C[K] = G.getEdgeCostsPtr(EId);
return true;
}
};
class Coalescing : public PBQPRAConstraint {
public:
void apply(PBQPRAGraph &G) override {
MachineFunction &MF = G.getMetadata().MF;
MachineBlockFrequencyInfo &MBFI = G.getMetadata().MBFI;
CoalescerPair CP(*MF.getSubtarget().getRegisterInfo());
// Scan the machine function and add a coalescing cost whenever CoalescerPair
// gives the Ok.
for (const auto &MBB : MF) {
for (const auto &MI : MBB) {
// Skip not-coalescable or already coalesced copies.
if (!CP.setRegisters(&MI) || CP.getSrcReg() == CP.getDstReg())
continue;
unsigned DstReg = CP.getDstReg();
unsigned SrcReg = CP.getSrcReg();
const float Scale = 1.0f / MBFI.getEntryFreq();
PBQP::PBQPNum CBenefit = MBFI.getBlockFreq(&MBB).getFrequency() * Scale;
if (CP.isPhys()) {
if (!MF.getRegInfo().isAllocatable(DstReg))
continue;
PBQPRAGraph::NodeId NId = G.getMetadata().getNodeIdForVReg(SrcReg);
const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed =
G.getNodeMetadata(NId).getAllowedRegs();
unsigned PRegOpt = 0;
while (PRegOpt < Allowed.size() && Allowed[PRegOpt] != DstReg)
++PRegOpt;
if (PRegOpt < Allowed.size()) {
PBQPRAGraph::RawVector NewCosts(G.getNodeCosts(NId));
NewCosts[PRegOpt + 1] -= CBenefit;
G.setNodeCosts(NId, std::move(NewCosts));
}
} else {
PBQPRAGraph::NodeId N1Id = G.getMetadata().getNodeIdForVReg(DstReg);
PBQPRAGraph::NodeId N2Id = G.getMetadata().getNodeIdForVReg(SrcReg);
const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed1 =
&G.getNodeMetadata(N1Id).getAllowedRegs();
const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed2 =
&G.getNodeMetadata(N2Id).getAllowedRegs();
PBQPRAGraph::EdgeId EId = G.findEdge(N1Id, N2Id);
if (EId == G.invalidEdgeId()) {
PBQPRAGraph::RawMatrix Costs(Allowed1->size() + 1,
Allowed2->size() + 1, 0);
addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
G.addEdge(N1Id, N2Id, std::move(Costs));
} else {
if (G.getEdgeNode1Id(EId) == N2Id) {
std::swap(N1Id, N2Id);
std::swap(Allowed1, Allowed2);
}
PBQPRAGraph::RawMatrix Costs(G.getEdgeCosts(EId));
addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
G.updateEdgeCosts(EId, std::move(Costs));
}
}
}
}
}
private:
void addVirtRegCoalesce(
PBQPRAGraph::RawMatrix &CostMat,
const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed1,
const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed2,
PBQP::PBQPNum Benefit) {
assert(CostMat.getRows() == Allowed1.size() + 1 && "Size mismatch.");
assert(CostMat.getCols() == Allowed2.size() + 1 && "Size mismatch.");
for (unsigned I = 0; I != Allowed1.size(); ++I) {
unsigned PReg1 = Allowed1[I];
for (unsigned J = 0; J != Allowed2.size(); ++J) {
unsigned PReg2 = Allowed2[J];
if (PReg1 == PReg2)
CostMat[I + 1][J + 1] -= Benefit;
}
}
}
};
} // End anonymous namespace.
// Out-of-line destructor/anchor for PBQPRAConstraint.
PBQPRAConstraint::~PBQPRAConstraint() {}
void PBQPRAConstraint::anchor() {}
void PBQPRAConstraintList::anchor() {}
void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
au.setPreservesCFG();
au.addRequired<AliasAnalysis>();
au.addPreserved<AliasAnalysis>();
au.addRequired<SlotIndexes>();
au.addPreserved<SlotIndexes>();
au.addRequired<LiveIntervals>();
au.addPreserved<LiveIntervals>();
//au.addRequiredID(SplitCriticalEdgesID);
if (customPassID)
au.addRequiredID(*customPassID);
au.addRequired<LiveStacks>();
au.addPreserved<LiveStacks>();
au.addRequired<MachineBlockFrequencyInfo>();
au.addPreserved<MachineBlockFrequencyInfo>();
au.addRequired<MachineLoopInfo>();
au.addPreserved<MachineLoopInfo>();
au.addRequired<MachineDominatorTree>();
au.addPreserved<MachineDominatorTree>();
au.addRequired<VirtRegMap>();
au.addPreserved<VirtRegMap>();
MachineFunctionPass::getAnalysisUsage(au);
}
void RegAllocPBQP::findVRegIntervalsToAlloc(const MachineFunction &MF,
LiveIntervals &LIS) {
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Iterate over all live ranges.
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
if (MRI.reg_nodbg_empty(Reg))
continue;
LiveInterval &LI = LIS.getInterval(Reg);
// If this live interval is non-empty we will use pbqp to allocate it.
// Empty intervals we allocate in a simple post-processing stage in
// finalizeAlloc.
if (!LI.empty()) {
VRegsToAlloc.insert(LI.reg);
} else {
EmptyIntervalVRegs.insert(LI.reg);
}
}
}
static bool isACalleeSavedRegister(unsigned reg, const TargetRegisterInfo &TRI,
const MachineFunction &MF) {
const MCPhysReg *CSR = TRI.getCalleeSavedRegs(&MF);
for (unsigned i = 0; CSR[i] != 0; ++i)
if (TRI.regsOverlap(reg, CSR[i]))
return true;
return false;
}
void RegAllocPBQP::initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM,
Spiller &VRegSpiller) {
MachineFunction &MF = G.getMetadata().MF;
LiveIntervals &LIS = G.getMetadata().LIS;
const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
const TargetRegisterInfo &TRI =
*G.getMetadata().MF.getSubtarget().getRegisterInfo();
std::vector<unsigned> Worklist(VRegsToAlloc.begin(), VRegsToAlloc.end());
while (!Worklist.empty()) {
unsigned VReg = Worklist.back();
Worklist.pop_back();
const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
LiveInterval &VRegLI = LIS.getInterval(VReg);
// Record any overlaps with regmask operands.
BitVector RegMaskOverlaps;
LIS.checkRegMaskInterference(VRegLI, RegMaskOverlaps);
// Compute an initial allowed set for the current vreg.
std::vector<unsigned> VRegAllowed;
ArrayRef<MCPhysReg> RawPRegOrder = TRC->getRawAllocationOrder(MF);
for (unsigned I = 0; I != RawPRegOrder.size(); ++I) {
unsigned PReg = RawPRegOrder[I];
if (MRI.isReserved(PReg))
continue;
// vregLI crosses a regmask operand that clobbers preg.
if (!RegMaskOverlaps.empty() && !RegMaskOverlaps.test(PReg))
continue;
// vregLI overlaps fixed regunit interference.
bool Interference = false;
for (MCRegUnitIterator Units(PReg, &TRI); Units.isValid(); ++Units) {
if (VRegLI.overlaps(LIS.getRegUnit(*Units))) {
Interference = true;
break;
}
}
if (Interference)
continue;
// preg is usable for this virtual register.
VRegAllowed.push_back(PReg);
}
// Check for vregs that have no allowed registers. These should be
// pre-spilled and the new vregs added to the worklist.
if (VRegAllowed.empty()) {
SmallVector<unsigned, 8> NewVRegs;
spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
Worklist.insert(Worklist.end(), NewVRegs.begin(), NewVRegs.end());
continue;
}
PBQPRAGraph::RawVector NodeCosts(VRegAllowed.size() + 1, 0);
// Tweak cost of callee saved registers, as using then force spilling and
// restoring them. This would only happen in the prologue / epilogue though.
for (unsigned i = 0; i != VRegAllowed.size(); ++i)
if (isACalleeSavedRegister(VRegAllowed[i], TRI, MF))
NodeCosts[1 + i] += 1.0;
PBQPRAGraph::NodeId NId = G.addNode(std::move(NodeCosts));
G.getNodeMetadata(NId).setVReg(VReg);
G.getNodeMetadata(NId).setAllowedRegs(
G.getMetadata().getAllowedRegs(std::move(VRegAllowed)));
G.getMetadata().setNodeIdForVReg(VReg, NId);
}
}
void RegAllocPBQP::spillVReg(unsigned VReg,
SmallVectorImpl<unsigned> &NewIntervals,
MachineFunction &MF, LiveIntervals &LIS,
VirtRegMap &VRM, Spiller &VRegSpiller) {
VRegsToAlloc.erase(VReg);
LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM);
VRegSpiller.spill(LRE);
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
(void)TRI;
DEBUG(dbgs() << "VREG " << PrintReg(VReg, &TRI) << " -> SPILLED (Cost: "
<< LRE.getParent().weight << ", New vregs: ");
// Copy any newly inserted live intervals into the list of regs to
// allocate.
for (LiveRangeEdit::iterator I = LRE.begin(), E = LRE.end();
I != E; ++I) {
const LiveInterval &LI = LIS.getInterval(*I);
assert(!LI.empty() && "Empty spill range.");
DEBUG(dbgs() << PrintReg(LI.reg, &TRI) << " ");
VRegsToAlloc.insert(LI.reg);
}
DEBUG(dbgs() << ")\n");
}
bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
const PBQP::Solution &Solution,
VirtRegMap &VRM,
Spiller &VRegSpiller) {
MachineFunction &MF = G.getMetadata().MF;
LiveIntervals &LIS = G.getMetadata().LIS;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
(void)TRI;
// Set to true if we have any spills
bool AnotherRoundNeeded = false;
// Clear the existing allocation.
VRM.clearAllVirt();
// Iterate over the nodes mapping the PBQP solution to a register
// assignment.
for (auto NId : G.nodeIds()) {
unsigned VReg = G.getNodeMetadata(NId).getVReg();
unsigned AllocOption = Solution.getSelection(NId);
if (AllocOption != PBQP::RegAlloc::getSpillOptionIdx()) {
unsigned PReg = G.getNodeMetadata(NId).getAllowedRegs()[AllocOption - 1];
DEBUG(dbgs() << "VREG " << PrintReg(VReg, &TRI) << " -> "
<< TRI.getName(PReg) << "\n");
assert(PReg != 0 && "Invalid preg selected.");
VRM.assignVirt2Phys(VReg, PReg);
} else {
// Spill VReg. If this introduces new intervals we'll need another round
// of allocation.
SmallVector<unsigned, 8> NewVRegs;
spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
AnotherRoundNeeded |= !NewVRegs.empty();
}
}
return !AnotherRoundNeeded;
}
void RegAllocPBQP::finalizeAlloc(MachineFunction &MF,
LiveIntervals &LIS,
VirtRegMap &VRM) const {
MachineRegisterInfo &MRI = MF.getRegInfo();
// First allocate registers for the empty intervals.
for (RegSet::const_iterator
I = EmptyIntervalVRegs.begin(), E = EmptyIntervalVRegs.end();
I != E; ++I) {
LiveInterval &LI = LIS.getInterval(*I);
unsigned PReg = MRI.getSimpleHint(LI.reg);
if (PReg == 0) {
const TargetRegisterClass &RC = *MRI.getRegClass(LI.reg);
PReg = RC.getRawAllocationOrder(MF).front();
}
VRM.assignVirt2Phys(LI.reg, PReg);
}
}
static inline float normalizePBQPSpillWeight(float UseDefFreq, unsigned Size,
unsigned NumInstr) {
// All intervals have a spill weight that is mostly proportional to the number
// of uses, with uses in loops having a bigger weight.
return NumInstr * normalizeSpillWeight(UseDefFreq, Size, 1);
}
bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
MachineBlockFrequencyInfo &MBFI =
getAnalysis<MachineBlockFrequencyInfo>();
calculateSpillWeightsAndHints(LIS, MF, getAnalysis<MachineLoopInfo>(), MBFI,
normalizePBQPSpillWeight);
VirtRegMap &VRM = getAnalysis<VirtRegMap>();
std::unique_ptr<Spiller> VRegSpiller(createInlineSpiller(*this, MF, VRM));
MF.getRegInfo().freezeReservedRegs(MF);
DEBUG(dbgs() << "PBQP Register Allocating for " << MF.getName() << "\n");
// Allocator main loop:
//
// * Map current regalloc problem to a PBQP problem
// * Solve the PBQP problem
// * Map the solution back to a register allocation
// * Spill if necessary
//
// This process is continued till no more spills are generated.
// Find the vreg intervals in need of allocation.
findVRegIntervalsToAlloc(MF, LIS);
#ifndef NDEBUG
const Function &F = *MF.getFunction();
std::string FullyQualifiedName =
F.getParent()->getModuleIdentifier() + "." + F.getName().str();
#endif
// If there are non-empty intervals allocate them using pbqp.
if (!VRegsToAlloc.empty()) {
const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
std::unique_ptr<PBQPRAConstraintList> ConstraintsRoot =
llvm::make_unique<PBQPRAConstraintList>();
ConstraintsRoot->addConstraint(llvm::make_unique<SpillCosts>());
ConstraintsRoot->addConstraint(llvm::make_unique<Interference>());
if (PBQPCoalescing)
ConstraintsRoot->addConstraint(llvm::make_unique<Coalescing>());
ConstraintsRoot->addConstraint(Subtarget.getCustomPBQPConstraints());
bool PBQPAllocComplete = false;
unsigned Round = 0;
while (!PBQPAllocComplete) {
DEBUG(dbgs() << " PBQP Regalloc round " << Round << ":\n");
PBQPRAGraph G(PBQPRAGraph::GraphMetadata(MF, LIS, MBFI));
initializeGraph(G, VRM, *VRegSpiller);
ConstraintsRoot->apply(G);
#ifndef NDEBUG
if (PBQPDumpGraphs) {
std::ostringstream RS;
RS << Round;
std::string GraphFileName = FullyQualifiedName + "." + RS.str() +
".pbqpgraph";
std::error_code EC;
raw_fd_ostream OS(GraphFileName, EC, sys::fs::F_Text);
DEBUG(dbgs() << "Dumping graph for round " << Round << " to \""
<< GraphFileName << "\"\n");
G.dump(OS);
}
#endif
PBQP::Solution Solution = PBQP::RegAlloc::solve(G);
PBQPAllocComplete = mapPBQPToRegAlloc(G, Solution, VRM, *VRegSpiller);
++Round;
}
}
// Finalise allocation, allocate empty ranges.
finalizeAlloc(MF, LIS, VRM);
VRegsToAlloc.clear();
EmptyIntervalVRegs.clear();
DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << VRM << "\n");
return true;
}
namespace {
// A helper class for printing node and register info in a consistent way
class PrintNodeInfo {
public:
typedef PBQP::RegAlloc::PBQPRAGraph Graph;
typedef PBQP::RegAlloc::PBQPRAGraph::NodeId NodeId;
PrintNodeInfo(NodeId NId, const Graph &G) : G(G), NId(NId) {}
void print(raw_ostream &OS) const {
const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
unsigned VReg = G.getNodeMetadata(NId).getVReg();
const char *RegClassName = TRI->getRegClassName(MRI.getRegClass(VReg));
OS << NId << " (" << RegClassName << ':' << PrintReg(VReg, TRI) << ')';
}
private:
const Graph &G;
NodeId NId;
};
inline raw_ostream &operator<<(raw_ostream &OS, const PrintNodeInfo &PR) {
PR.print(OS);
return OS;
}
} // anonymous namespace
void PBQP::RegAlloc::PBQPRAGraph::dump(raw_ostream &OS) const {
for (auto NId : nodeIds()) {
const Vector &Costs = getNodeCosts(NId);
assert(Costs.getLength() != 0 && "Empty vector in graph.");
OS << PrintNodeInfo(NId, *this) << ": " << Costs << '\n';
}
OS << '\n';
for (auto EId : edgeIds()) {
NodeId N1Id = getEdgeNode1Id(EId);
NodeId N2Id = getEdgeNode2Id(EId);
assert(N1Id != N2Id && "PBQP graphs should not have self-edges.");
const Matrix &M = getEdgeCosts(EId);
assert(M.getRows() != 0 && "No rows in matrix.");
assert(M.getCols() != 0 && "No cols in matrix.");
OS << PrintNodeInfo(N1Id, *this) << ' ' << M.getRows() << " rows / ";
OS << PrintNodeInfo(N2Id, *this) << ' ' << M.getCols() << " cols:\n";
OS << M << '\n';
}
}
void PBQP::RegAlloc::PBQPRAGraph::dump() const { dump(dbgs()); }
void PBQP::RegAlloc::PBQPRAGraph::printDot(raw_ostream &OS) const {
OS << "graph {\n";
for (auto NId : nodeIds()) {
OS << " node" << NId << " [ label=\""
<< PrintNodeInfo(NId, *this) << "\\n"
<< getNodeCosts(NId) << "\" ]\n";
}
OS << " edge [ len=" << nodeIds().size() << " ]\n";
for (auto EId : edgeIds()) {
OS << " node" << getEdgeNode1Id(EId)
<< " -- node" << getEdgeNode2Id(EId)
<< " [ label=\"";
const Matrix &EdgeCosts = getEdgeCosts(EId);
for (unsigned i = 0; i < EdgeCosts.getRows(); ++i) {
OS << EdgeCosts.getRowAsVector(i) << "\\n";
}
OS << "\" ]\n";
}
OS << "}\n";
}
FunctionPass *llvm::createPBQPRegisterAllocator(char *customPassID) {
return new RegAllocPBQP(customPassID);
}
FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
return createPBQPRegisterAllocator();
}
#undef DEBUG_TYPE
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ScheduleDAG.cpp | //===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the ScheduleDAG class, which is a base class used by
// scheduling implementation classes.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
#define DEBUG_TYPE "pre-RA-sched"
#ifndef NDEBUG
static cl::opt<bool> StressSchedOpt(
"stress-sched", cl::Hidden, cl::init(false),
cl::desc("Stress test instruction scheduling"));
#endif
void SchedulingPriorityQueue::anchor() { }
ScheduleDAG::ScheduleDAG(MachineFunction &mf)
: TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
MRI(mf.getRegInfo()), EntrySU(), ExitSU() {
#ifndef NDEBUG
StressSched = StressSchedOpt;
#endif
}
ScheduleDAG::~ScheduleDAG() {}
/// Clear the DAG state (e.g. between scheduling regions).
void ScheduleDAG::clearDAG() {
SUnits.clear();
EntrySU = SUnit();
ExitSU = SUnit();
}
/// getInstrDesc helper to handle SDNodes.
const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
if (!Node || !Node->isMachineOpcode()) return nullptr;
return &TII->get(Node->getMachineOpcode());
}
/// addPred - This adds the specified edge as a pred of the current node if
/// not already. It also adds the current node as a successor of the
/// specified node.
bool SUnit::addPred(const SDep &D, bool Required) {
// If this node already has this dependence, don't add a redundant one.
for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
I != E; ++I) {
// Zero-latency weak edges may be added purely for heuristic ordering. Don't
// add them if another kind of edge already exists.
if (!Required && I->getSUnit() == D.getSUnit())
return false;
if (I->overlaps(D)) {
// Extend the latency if needed. Equivalent to removePred(I) + addPred(D).
if (I->getLatency() < D.getLatency()) {
SUnit *PredSU = I->getSUnit();
// Find the corresponding successor in N.
SDep ForwardD = *I;
ForwardD.setSUnit(this);
for (SmallVectorImpl<SDep>::iterator II = PredSU->Succs.begin(),
EE = PredSU->Succs.end(); II != EE; ++II) {
if (*II == ForwardD) {
II->setLatency(D.getLatency());
break;
}
}
I->setLatency(D.getLatency());
}
return false;
}
}
// Now add a corresponding succ to N.
SDep P = D;
P.setSUnit(this);
SUnit *N = D.getSUnit();
// Update the bookkeeping.
if (D.getKind() == SDep::Data) {
assert(NumPreds < UINT_MAX && "NumPreds will overflow!");
assert(N->NumSuccs < UINT_MAX && "NumSuccs will overflow!");
++NumPreds;
++N->NumSuccs;
}
if (!N->isScheduled) {
if (D.isWeak()) {
++WeakPredsLeft;
}
else {
assert(NumPredsLeft < UINT_MAX && "NumPredsLeft will overflow!");
++NumPredsLeft;
}
}
if (!isScheduled) {
if (D.isWeak()) {
++N->WeakSuccsLeft;
}
else {
assert(N->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
++N->NumSuccsLeft;
}
}
Preds.push_back(D);
N->Succs.push_back(P);
if (P.getLatency() != 0) {
this->setDepthDirty();
N->setHeightDirty();
}
return true;
}
/// removePred - This removes the specified edge as a pred of the current
/// node if it exists. It also removes the current node as a successor of
/// the specified node.
void SUnit::removePred(const SDep &D) {
// Find the matching predecessor.
for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
I != E; ++I)
if (*I == D) {
// Find the corresponding successor in N.
SDep P = D;
P.setSUnit(this);
SUnit *N = D.getSUnit();
SmallVectorImpl<SDep>::iterator Succ = std::find(N->Succs.begin(),
N->Succs.end(), P);
assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
N->Succs.erase(Succ);
Preds.erase(I);
// Update the bookkeeping.
if (P.getKind() == SDep::Data) {
assert(NumPreds > 0 && "NumPreds will underflow!");
assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
--NumPreds;
--N->NumSuccs;
}
if (!N->isScheduled) {
if (D.isWeak())
--WeakPredsLeft;
else {
assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
--NumPredsLeft;
}
}
if (!isScheduled) {
if (D.isWeak())
--N->WeakSuccsLeft;
else {
assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
--N->NumSuccsLeft;
}
}
if (P.getLatency() != 0) {
this->setDepthDirty();
N->setHeightDirty();
}
return;
}
}
void SUnit::setDepthDirty() {
if (!isDepthCurrent) return;
SmallVector<SUnit*, 8> WorkList;
WorkList.push_back(this);
do {
SUnit *SU = WorkList.pop_back_val();
SU->isDepthCurrent = false;
for (SUnit::const_succ_iterator I = SU->Succs.begin(),
E = SU->Succs.end(); I != E; ++I) {
SUnit *SuccSU = I->getSUnit();
if (SuccSU->isDepthCurrent)
WorkList.push_back(SuccSU);
}
} while (!WorkList.empty());
}
void SUnit::setHeightDirty() {
if (!isHeightCurrent) return;
SmallVector<SUnit*, 8> WorkList;
WorkList.push_back(this);
do {
SUnit *SU = WorkList.pop_back_val();
SU->isHeightCurrent = false;
for (SUnit::const_pred_iterator I = SU->Preds.begin(),
E = SU->Preds.end(); I != E; ++I) {
SUnit *PredSU = I->getSUnit();
if (PredSU->isHeightCurrent)
WorkList.push_back(PredSU);
}
} while (!WorkList.empty());
}
/// setDepthToAtLeast - Update this node's successors to reflect the
/// fact that this node's depth just increased.
///
void SUnit::setDepthToAtLeast(unsigned NewDepth) {
if (NewDepth <= getDepth())
return;
setDepthDirty();
Depth = NewDepth;
isDepthCurrent = true;
}
/// setHeightToAtLeast - Update this node's predecessors to reflect the
/// fact that this node's height just increased.
///
void SUnit::setHeightToAtLeast(unsigned NewHeight) {
if (NewHeight <= getHeight())
return;
setHeightDirty();
Height = NewHeight;
isHeightCurrent = true;
}
/// ComputeDepth - Calculate the maximal path from the node to the exit.
///
void SUnit::ComputeDepth() {
SmallVector<SUnit*, 8> WorkList;
WorkList.push_back(this);
do {
SUnit *Cur = WorkList.back();
bool Done = true;
unsigned MaxPredDepth = 0;
for (SUnit::const_pred_iterator I = Cur->Preds.begin(),
E = Cur->Preds.end(); I != E; ++I) {
SUnit *PredSU = I->getSUnit();
if (PredSU->isDepthCurrent)
MaxPredDepth = std::max(MaxPredDepth,
PredSU->Depth + I->getLatency());
else {
Done = false;
WorkList.push_back(PredSU);
}
}
if (Done) {
WorkList.pop_back();
if (MaxPredDepth != Cur->Depth) {
Cur->setDepthDirty();
Cur->Depth = MaxPredDepth;
}
Cur->isDepthCurrent = true;
}
} while (!WorkList.empty());
}
/// ComputeHeight - Calculate the maximal path from the node to the entry.
///
void SUnit::ComputeHeight() {
SmallVector<SUnit*, 8> WorkList;
WorkList.push_back(this);
do {
SUnit *Cur = WorkList.back();
bool Done = true;
unsigned MaxSuccHeight = 0;
for (SUnit::const_succ_iterator I = Cur->Succs.begin(),
E = Cur->Succs.end(); I != E; ++I) {
SUnit *SuccSU = I->getSUnit();
if (SuccSU->isHeightCurrent)
MaxSuccHeight = std::max(MaxSuccHeight,
SuccSU->Height + I->getLatency());
else {
Done = false;
WorkList.push_back(SuccSU);
}
}
if (Done) {
WorkList.pop_back();
if (MaxSuccHeight != Cur->Height) {
Cur->setHeightDirty();
Cur->Height = MaxSuccHeight;
}
Cur->isHeightCurrent = true;
}
} while (!WorkList.empty());
}
void SUnit::biasCriticalPath() {
if (NumPreds < 2)
return;
SUnit::pred_iterator BestI = Preds.begin();
unsigned MaxDepth = BestI->getSUnit()->getDepth();
for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
++I) {
if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
BestI = I;
}
if (BestI != Preds.begin())
std::swap(*Preds.begin(), *BestI);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
/// a group of nodes flagged together.
void SUnit::dump(const ScheduleDAG *G) const {
dbgs() << "SU(" << NodeNum << "): ";
G->dumpNode(this);
}
void SUnit::dumpAll(const ScheduleDAG *G) const {
dump(G);
dbgs() << " # preds left : " << NumPredsLeft << "\n";
dbgs() << " # succs left : " << NumSuccsLeft << "\n";
if (WeakPredsLeft)
dbgs() << " # weak preds left : " << WeakPredsLeft << "\n";
if (WeakSuccsLeft)
dbgs() << " # weak succs left : " << WeakSuccsLeft << "\n";
dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
dbgs() << " Latency : " << Latency << "\n";
dbgs() << " Depth : " << getDepth() << "\n";
dbgs() << " Height : " << getHeight() << "\n";
if (Preds.size() != 0) {
dbgs() << " Predecessors:\n";
for (SUnit::const_succ_iterator I = Preds.begin(), E = Preds.end();
I != E; ++I) {
dbgs() << " ";
switch (I->getKind()) {
case SDep::Data: dbgs() << "val "; break;
case SDep::Anti: dbgs() << "anti"; break;
case SDep::Output: dbgs() << "out "; break;
case SDep::Order: dbgs() << "ch "; break;
}
dbgs() << "SU(" << I->getSUnit()->NodeNum << ")";
if (I->isArtificial())
dbgs() << " *";
dbgs() << ": Latency=" << I->getLatency();
if (I->isAssignedRegDep())
dbgs() << " Reg=" << PrintReg(I->getReg(), G->TRI);
dbgs() << "\n";
}
}
if (Succs.size() != 0) {
dbgs() << " Successors:\n";
for (SUnit::const_succ_iterator I = Succs.begin(), E = Succs.end();
I != E; ++I) {
dbgs() << " ";
switch (I->getKind()) {
case SDep::Data: dbgs() << "val "; break;
case SDep::Anti: dbgs() << "anti"; break;
case SDep::Output: dbgs() << "out "; break;
case SDep::Order: dbgs() << "ch "; break;
}
dbgs() << "SU(" << I->getSUnit()->NodeNum << ")";
if (I->isArtificial())
dbgs() << " *";
dbgs() << ": Latency=" << I->getLatency();
if (I->isAssignedRegDep())
dbgs() << " Reg=" << PrintReg(I->getReg(), G->TRI);
dbgs() << "\n";
}
}
dbgs() << "\n";
}
#endif
#ifndef NDEBUG
/// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
/// their state is consistent. Return the number of scheduled nodes.
///
unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
bool AnyNotSched = false;
unsigned DeadNodes = 0;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
if (!SUnits[i].isScheduled) {
if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
++DeadNodes;
continue;
}
if (!AnyNotSched)
dbgs() << "*** Scheduling failed! ***\n";
SUnits[i].dump(this);
dbgs() << "has not been scheduled!\n";
AnyNotSched = true;
}
if (SUnits[i].isScheduled &&
(isBottomUp ? SUnits[i].getHeight() : SUnits[i].getDepth()) >
unsigned(INT_MAX)) {
if (!AnyNotSched)
dbgs() << "*** Scheduling failed! ***\n";
SUnits[i].dump(this);
dbgs() << "has an unexpected "
<< (isBottomUp ? "Height" : "Depth") << " value!\n";
AnyNotSched = true;
}
if (isBottomUp) {
if (SUnits[i].NumSuccsLeft != 0) {
if (!AnyNotSched)
dbgs() << "*** Scheduling failed! ***\n";
SUnits[i].dump(this);
dbgs() << "has successors left!\n";
AnyNotSched = true;
}
} else {
if (SUnits[i].NumPredsLeft != 0) {
if (!AnyNotSched)
dbgs() << "*** Scheduling failed! ***\n";
SUnits[i].dump(this);
dbgs() << "has predecessors left!\n";
AnyNotSched = true;
}
}
}
assert(!AnyNotSched);
return SUnits.size() - DeadNodes;
}
#endif
/// InitDAGTopologicalSorting - create the initial topological
/// ordering from the DAG to be scheduled.
///
/// The idea of the algorithm is taken from
/// "Online algorithms for managing the topological order of
/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
/// This is the MNR algorithm, which was first introduced by
/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
/// "Maintaining a topological order under edge insertions".
///
/// Short description of the algorithm:
///
/// Topological ordering, ord, of a DAG maps each node to a topological
/// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
///
/// This means that if there is a path from the node X to the node Z,
/// then ord(X) < ord(Z).
///
/// This property can be used to check for reachability of nodes:
/// if Z is reachable from X, then an insertion of the edge Z->X would
/// create a cycle.
///
/// The algorithm first computes a topological ordering for the DAG by
/// initializing the Index2Node and Node2Index arrays and then tries to keep
/// the ordering up-to-date after edge insertions by reordering the DAG.
///
/// On insertion of the edge X->Y, the algorithm first marks by calling DFS
/// the nodes reachable from Y, and then shifts them using Shift to lie
/// immediately after X in Index2Node.
void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
unsigned DAGSize = SUnits.size();
std::vector<SUnit*> WorkList;
WorkList.reserve(DAGSize);
Index2Node.resize(DAGSize);
Node2Index.resize(DAGSize);
// Initialize the data structures.
if (ExitSU)
WorkList.push_back(ExitSU);
for (unsigned i = 0, e = DAGSize; i != e; ++i) {
SUnit *SU = &SUnits[i];
int NodeNum = SU->NodeNum;
unsigned Degree = SU->Succs.size();
// Temporarily use the Node2Index array as scratch space for degree counts.
Node2Index[NodeNum] = Degree;
// Is it a node without dependencies?
if (Degree == 0) {
assert(SU->Succs.empty() && "SUnit should have no successors");
// Collect leaf nodes.
WorkList.push_back(SU);
}
}
int Id = DAGSize;
while (!WorkList.empty()) {
SUnit *SU = WorkList.back();
WorkList.pop_back();
if (SU->NodeNum < DAGSize)
Allocate(SU->NodeNum, --Id);
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit *SU = I->getSUnit();
if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
// If all dependencies of the node are processed already,
// then the node can be computed now.
WorkList.push_back(SU);
}
}
Visited.resize(DAGSize);
#ifndef NDEBUG
// Check correctness of the ordering
for (unsigned i = 0, e = DAGSize; i != e; ++i) {
SUnit *SU = &SUnits[i];
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] &&
"Wrong topological sorting");
}
}
#endif
}
/// AddPred - Updates the topological ordering to accommodate an edge
/// to be added from SUnit X to SUnit Y.
void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
int UpperBound, LowerBound;
LowerBound = Node2Index[Y->NodeNum];
UpperBound = Node2Index[X->NodeNum];
bool HasLoop = false;
// Is Ord(X) < Ord(Y) ?
if (LowerBound < UpperBound) {
// Update the topological order.
Visited.reset();
DFS(Y, UpperBound, HasLoop);
assert(!HasLoop && "Inserted edge creates a loop!");
// Recompute topological indexes.
Shift(Visited, LowerBound, UpperBound);
}
}
/// RemovePred - Updates the topological ordering to accommodate an
/// an edge to be removed from the specified node N from the predecessors
/// of the current node M.
void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
// InitDAGTopologicalSorting();
}
/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
/// all nodes affected by the edge insertion. These nodes will later get new
/// topological indexes by means of the Shift method.
void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
bool &HasLoop) {
std::vector<const SUnit*> WorkList;
WorkList.reserve(SUnits.size());
WorkList.push_back(SU);
do {
SU = WorkList.back();
WorkList.pop_back();
Visited.set(SU->NodeNum);
for (int I = SU->Succs.size()-1; I >= 0; --I) {
unsigned s = SU->Succs[I].getSUnit()->NodeNum;
// Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
if (s >= Node2Index.size())
continue;
if (Node2Index[s] == UpperBound) {
HasLoop = true;
return;
}
// Visit successors if not already and in affected region.
if (!Visited.test(s) && Node2Index[s] < UpperBound) {
WorkList.push_back(SU->Succs[I].getSUnit());
}
}
} while (!WorkList.empty());
}
/// Shift - Renumber the nodes so that the topological ordering is
/// preserved.
void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
int UpperBound) {
std::vector<int> L;
int shift = 0;
int i;
for (i = LowerBound; i <= UpperBound; ++i) {
// w is node at topological index i.
int w = Index2Node[i];
if (Visited.test(w)) {
// Unmark.
Visited.reset(w);
L.push_back(w);
shift = shift + 1;
} else {
Allocate(w, i - shift);
}
}
for (unsigned j = 0; j < L.size(); ++j) {
Allocate(L[j], i - shift);
i = i + 1;
}
}
/// WillCreateCycle - Returns true if adding an edge to TargetSU from SU will
/// create a cycle. If so, it is not safe to call AddPred(TargetSU, SU).
bool ScheduleDAGTopologicalSort::WillCreateCycle(SUnit *TargetSU, SUnit *SU) {
// Is SU reachable from TargetSU via successor edges?
if (IsReachable(SU, TargetSU))
return true;
for (SUnit::pred_iterator
I = TargetSU->Preds.begin(), E = TargetSU->Preds.end(); I != E; ++I)
if (I->isAssignedRegDep() &&
IsReachable(SU, I->getSUnit()))
return true;
return false;
}
/// IsReachable - Checks if SU is reachable from TargetSU.
bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
const SUnit *TargetSU) {
// If insertion of the edge SU->TargetSU would create a cycle
// then there is a path from TargetSU to SU.
int UpperBound, LowerBound;
LowerBound = Node2Index[TargetSU->NodeNum];
UpperBound = Node2Index[SU->NodeNum];
bool HasLoop = false;
// Is Ord(TargetSU) < Ord(SU) ?
if (LowerBound < UpperBound) {
Visited.reset();
// There may be a path from TargetSU to SU. Check for it.
DFS(TargetSU, UpperBound, HasLoop);
}
return HasLoop;
}
/// Allocate - assign the topological index to the node n.
void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
Node2Index[n] = index;
Index2Node[index] = n;
}
ScheduleDAGTopologicalSort::
ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
: SUnits(sunits), ExitSU(exitsu) {}
ScheduleHazardRecognizer::~ScheduleHazardRecognizer() {}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/CodeGenPrepare.cpp | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass munges the code in the input function to better prepare it for
// SelectionDAG-based code generation. This works around limitations in it's
// basic-block-at-a-time approach. It should eventually be removed.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
using namespace llvm;
using namespace llvm::PatternMatch;
#define DEBUG_TYPE "codegenprepare"
STATISTIC(NumBlocksElim, "Number of blocks eliminated");
STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
"sunken Cmps");
STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
"of sunken Casts");
STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
"computations were sunk");
STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
STATISTIC(NumRetsDup, "Number of return instructions duplicated");
STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches");
STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
static cl::opt<bool> DisableBranchOpts(
"disable-cgp-branch-opts", cl::Hidden, cl::init(false),
cl::desc("Disable branch optimizations in CodeGenPrepare"));
static cl::opt<bool>
DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
cl::desc("Disable GC optimizations in CodeGenPrepare"));
static cl::opt<bool> DisableSelectToBranch(
"disable-cgp-select2branch", cl::Hidden, cl::init(false),
cl::desc("Disable select to branch conversion."));
static cl::opt<bool> AddrSinkUsingGEPs(
"addr-sink-using-gep", cl::Hidden, cl::init(false),
cl::desc("Address sinking in CGP using GEPs."));
static cl::opt<bool> EnableAndCmpSinking(
"enable-andcmp-sinking", cl::Hidden, cl::init(true),
cl::desc("Enable sinkinig and/cmp into branches."));
static cl::opt<bool> DisableStoreExtract(
"disable-cgp-store-extract", cl::Hidden, cl::init(false),
cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
static cl::opt<bool> StressStoreExtract(
"stress-cgp-store-extract", cl::Hidden, cl::init(false),
cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
static cl::opt<bool> DisableExtLdPromotion(
"disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
"CodeGenPrepare"));
static cl::opt<bool> StressExtLdPromotion(
"stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
"optimization in CodeGenPrepare"));
namespace {
typedef SmallPtrSet<Instruction *, 16> SetOfInstrs;
struct TypeIsSExt {
Type *Ty;
bool IsSExt;
TypeIsSExt(Type *Ty, bool IsSExt) : Ty(Ty), IsSExt(IsSExt) {}
};
typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy;
class TypePromotionTransaction;
class CodeGenPrepare : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// transformation profitability.
const TargetMachine *TM;
const TargetLowering *TLI;
const TargetTransformInfo *TTI;
const TargetLibraryInfo *TLInfo;
/// CurInstIterator - As we scan instructions optimizing them, this is the
/// next instruction to optimize. Xforms that can invalidate this should
/// update it.
BasicBlock::iterator CurInstIterator;
/// Keeps track of non-local addresses that have been sunk into a block.
/// This allows us to avoid inserting duplicate code for blocks with
/// multiple load/stores of the same address.
ValueMap<Value*, Value*> SunkAddrs;
/// Keeps track of all instructions inserted for the current function.
SetOfInstrs InsertedInsts;
/// Keeps track of the type of the related instruction before their
/// promotion for the current function.
InstrToOrigTy PromotedInsts;
/// ModifiedDT - If CFG is modified in anyway.
bool ModifiedDT;
/// OptSize - True if optimizing for size.
bool OptSize;
/// DataLayout for the Function being processed.
const DataLayout *DL;
public:
static char ID; // Pass identification, replacement for typeid
explicit CodeGenPrepare(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) {
initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
StringRef getPassName() const override { return "CodeGen Prepare"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
}
private:
bool EliminateFallThrough(Function &F);
bool EliminateMostlyEmptyBlocks(Function &F);
bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
void EliminateMostlyEmptyBlock(BasicBlock *BB);
bool OptimizeBlock(BasicBlock &BB, bool& ModifiedDT);
bool OptimizeInst(Instruction *I, bool& ModifiedDT);
bool OptimizeMemoryInst(Instruction *I, Value *Addr,
Type *AccessTy, unsigned AS);
bool OptimizeInlineAsmInst(CallInst *CS);
bool OptimizeCallInst(CallInst *CI, bool& ModifiedDT);
bool MoveExtToFormExtLoad(Instruction *&I);
bool OptimizeExtUses(Instruction *I);
bool OptimizeSelectInst(SelectInst *SI);
bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI);
bool OptimizeExtractElementInst(Instruction *Inst);
bool DupRetToEnableTailCallOpts(BasicBlock *BB);
bool PlaceDbgValues(Function &F);
bool sinkAndCmp(Function &F);
bool ExtLdPromotion(TypePromotionTransaction &TPT, LoadInst *&LI,
Instruction *&Inst,
const SmallVectorImpl<Instruction *> &Exts,
unsigned CreatedInstCost);
bool splitBranchCondition(Function &F);
bool simplifyOffsetableRelocate(Instruction &I);
};
}
char CodeGenPrepare::ID = 0;
INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare",
"Optimize for code generation", false, false)
FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) {
return new CodeGenPrepare(TM);
}
bool CodeGenPrepare::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
DL = &F.getParent()->getDataLayout();
bool EverMadeChange = false;
// Clear per function information.
InsertedInsts.clear();
PromotedInsts.clear();
ModifiedDT = false;
if (TM)
TLI = TM->getSubtargetImpl(F)->getTargetLowering();
TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
OptSize = F.hasFnAttribute(Attribute::OptimizeForSize);
/// This optimization identifies DIV instructions that can be
/// profitably bypassed and carried out with a shorter, faster divide.
if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
const DenseMap<unsigned int, unsigned int> &BypassWidths =
TLI->getBypassSlowDivWidths();
for (Function::iterator I = F.begin(); I != F.end(); I++)
EverMadeChange |= bypassSlowDivision(F, I, BypassWidths);
}
// Eliminate blocks that contain only PHI nodes and an
// unconditional branch.
EverMadeChange |= EliminateMostlyEmptyBlocks(F);
// llvm.dbg.value is far away from the value then iSel may not be able
// handle it properly. iSel will drop llvm.dbg.value if it can not
// find a node corresponding to the value.
EverMadeChange |= PlaceDbgValues(F);
// If there is a mask, compare against zero, and branch that can be combined
// into a single target instruction, push the mask and compare into branch
// users. Do this before OptimizeBlock -> OptimizeInst ->
// OptimizeCmpExpression, which perturbs the pattern being searched for.
if (!DisableBranchOpts) {
EverMadeChange |= sinkAndCmp(F);
EverMadeChange |= splitBranchCondition(F);
}
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
for (Function::iterator I = F.begin(); I != F.end(); ) {
BasicBlock *BB = I++;
bool ModifiedDTOnIteration = false;
MadeChange |= OptimizeBlock(*BB, ModifiedDTOnIteration);
// Restart BB iteration if the dominator tree of the Function was changed
if (ModifiedDTOnIteration)
break;
}
EverMadeChange |= MadeChange;
}
SunkAddrs.clear();
if (!DisableBranchOpts) {
MadeChange = false;
SmallPtrSet<BasicBlock*, 8> WorkList;
for (BasicBlock &BB : F) {
SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB));
MadeChange |= ConstantFoldTerminator(&BB, true);
if (!MadeChange) continue;
for (SmallVectorImpl<BasicBlock*>::iterator
II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
if (pred_begin(*II) == pred_end(*II))
WorkList.insert(*II);
}
// Delete the dead blocks and any of their dead successors.
MadeChange |= !WorkList.empty();
while (!WorkList.empty()) {
BasicBlock *BB = *WorkList.begin();
WorkList.erase(BB);
SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
DeleteDeadBlock(BB);
for (SmallVectorImpl<BasicBlock*>::iterator
II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
if (pred_begin(*II) == pred_end(*II))
WorkList.insert(*II);
}
// Merge pairs of basic blocks with unconditional branches, connected by
// a single edge.
if (EverMadeChange || MadeChange)
MadeChange |= EliminateFallThrough(F);
EverMadeChange |= MadeChange;
}
if (!DisableGCOpts) {
SmallVector<Instruction *, 2> Statepoints;
for (BasicBlock &BB : F)
for (Instruction &I : BB)
if (isStatepoint(I))
Statepoints.push_back(&I);
for (auto &I : Statepoints)
EverMadeChange |= simplifyOffsetableRelocate(*I);
}
return EverMadeChange;
}
/// EliminateFallThrough - Merge basic blocks which are connected
/// by a single edge, where one of the basic blocks has a single successor
/// pointing to the other basic block, which has a single predecessor.
bool CodeGenPrepare::EliminateFallThrough(Function &F) {
bool Changed = false;
// Scan all of the blocks in the function, except for the entry block.
for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
BasicBlock *BB = I++;
// If the destination block has a single pred, then this is a trivial
// edge, just collapse it.
BasicBlock *SinglePred = BB->getSinglePredecessor();
// Don't merge if BB's address is taken.
if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
if (Term && !Term->isConditional()) {
Changed = true;
DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
// Remember if SinglePred was the entry block of the function.
// If so, we will need to move BB back to the entry position.
bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
MergeBasicBlockIntoOnlyPred(BB, nullptr);
if (isEntry && BB != &BB->getParent()->getEntryBlock())
BB->moveBefore(&BB->getParent()->getEntryBlock());
// We have erased a block. Update the iterator.
I = BB;
}
}
return Changed;
}
/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes,
/// debug info directives, and an unconditional branch. Passes before isel
/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for
/// isel. Start by eliminating these blocks so we can split them the way we
/// want them.
bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
bool MadeChange = false;
// Note that this intentionally skips the entry block.
for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
BasicBlock *BB = I++;
// If this block doesn't end with an uncond branch, ignore it.
BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isUnconditional())
continue;
// If the instruction before the branch (skipping debug info) isn't a phi
// node, then other stuff is happening here.
BasicBlock::iterator BBI = BI;
if (BBI != BB->begin()) {
--BBI;
while (isa<DbgInfoIntrinsic>(BBI)) {
if (BBI == BB->begin())
break;
--BBI;
}
if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
continue;
}
// Do not break infinite loops.
BasicBlock *DestBB = BI->getSuccessor(0);
if (DestBB == BB)
continue;
if (!CanMergeBlocks(BB, DestBB))
continue;
EliminateMostlyEmptyBlock(BB);
MadeChange = true;
}
return MadeChange;
}
/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
/// single uncond branch between them, and BB contains no other non-phi
/// instructions.
bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
const BasicBlock *DestBB) const {
// We only want to eliminate blocks whose phi nodes are used by phi nodes in
// the successor. If there are more complex condition (e.g. preheaders),
// don't mess around with them.
BasicBlock::const_iterator BBI = BB->begin();
while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
for (const User *U : PN->users()) {
const Instruction *UI = cast<Instruction>(U);
if (UI->getParent() != DestBB || !isa<PHINode>(UI))
return false;
// If User is inside DestBB block and it is a PHINode then check
// incoming value. If incoming value is not from BB then this is
// a complex condition (e.g. preheaders) we want to avoid here.
if (UI->getParent() == DestBB) {
if (const PHINode *UPN = dyn_cast<PHINode>(UI))
for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
if (Insn && Insn->getParent() == BB &&
Insn->getParent() != UPN->getIncomingBlock(I))
return false;
}
}
}
}
// If BB and DestBB contain any common predecessors, then the phi nodes in BB
// and DestBB may have conflicting incoming values for the block. If so, we
// can't merge the block.
const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
if (!DestBBPN) return true; // no conflict.
// Collect the preds of BB.
SmallPtrSet<const BasicBlock*, 16> BBPreds;
if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
// It is faster to get preds from a PHI than with pred_iterator.
for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
BBPreds.insert(BBPN->getIncomingBlock(i));
} else {
BBPreds.insert(pred_begin(BB), pred_end(BB));
}
// Walk the preds of DestBB.
for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
if (BBPreds.count(Pred)) { // Common predecessor?
BBI = DestBB->begin();
while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
const Value *V1 = PN->getIncomingValueForBlock(Pred);
const Value *V2 = PN->getIncomingValueForBlock(BB);
// If V2 is a phi node in BB, look up what the mapped value will be.
if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
if (V2PN->getParent() == BB)
V2 = V2PN->getIncomingValueForBlock(Pred);
// If there is a conflict, bail out.
if (V1 != V2) return false;
}
}
}
return true;
}
/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
/// an unconditional branch in it.
void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
BranchInst *BI = cast<BranchInst>(BB->getTerminator());
BasicBlock *DestBB = BI->getSuccessor(0);
DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
// If the destination block has a single pred, then this is a trivial edge,
// just collapse it.
if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
if (SinglePred != DestBB) {
// Remember if SinglePred was the entry block of the function. If so, we
// will need to move BB back to the entry position.
bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
MergeBasicBlockIntoOnlyPred(DestBB, nullptr);
if (isEntry && BB != &BB->getParent()->getEntryBlock())
BB->moveBefore(&BB->getParent()->getEntryBlock());
DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
return;
}
}
// Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
// to handle the new incoming edges it is about to have.
PHINode *PN;
for (BasicBlock::iterator BBI = DestBB->begin();
(PN = dyn_cast<PHINode>(BBI)); ++BBI) {
// Remove the incoming value for BB, and remember it.
Value *InVal = PN->removeIncomingValue(BB, false);
// Two options: either the InVal is a phi node defined in BB or it is some
// value that dominates BB.
PHINode *InValPhi = dyn_cast<PHINode>(InVal);
if (InValPhi && InValPhi->getParent() == BB) {
// Add all of the input values of the input PHI as inputs of this phi.
for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
PN->addIncoming(InValPhi->getIncomingValue(i),
InValPhi->getIncomingBlock(i));
} else {
// Otherwise, add one instance of the dominating value for each edge that
// we will be adding.
if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
} else {
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
PN->addIncoming(InVal, *PI);
}
}
}
// The PHIs are now updated, change everything that refers to BB to use
// DestBB and remove BB.
BB->replaceAllUsesWith(DestBB);
BB->eraseFromParent();
++NumBlocksElim;
DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
}
// Computes a map of base pointer relocation instructions to corresponding
// derived pointer relocation instructions given a vector of all relocate calls
static void computeBaseDerivedRelocateMap(
const SmallVectorImpl<User *> &AllRelocateCalls,
DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> &
RelocateInstMap) {
// Collect information in two maps: one primarily for locating the base object
// while filling the second map; the second map is the final structure holding
// a mapping between Base and corresponding Derived relocate calls
DenseMap<std::pair<unsigned, unsigned>, IntrinsicInst *> RelocateIdxMap;
for (auto &U : AllRelocateCalls) {
GCRelocateOperands ThisRelocate(U);
IntrinsicInst *I = cast<IntrinsicInst>(U);
auto K = std::make_pair(ThisRelocate.getBasePtrIndex(),
ThisRelocate.getDerivedPtrIndex());
RelocateIdxMap.insert(std::make_pair(K, I));
}
for (auto &Item : RelocateIdxMap) {
std::pair<unsigned, unsigned> Key = Item.first;
if (Key.first == Key.second)
// Base relocation: nothing to insert
continue;
IntrinsicInst *I = Item.second;
auto BaseKey = std::make_pair(Key.first, Key.first);
// We're iterating over RelocateIdxMap so we cannot modify it.
auto MaybeBase = RelocateIdxMap.find(BaseKey);
if (MaybeBase == RelocateIdxMap.end())
// TODO: We might want to insert a new base object relocate and gep off
// that, if there are enough derived object relocates.
continue;
RelocateInstMap[MaybeBase->second].push_back(I);
}
}
// Accepts a GEP and extracts the operands into a vector provided they're all
// small integer constants
static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
SmallVectorImpl<Value *> &OffsetV) {
for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
// Only accept small constant integer operands
auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (!Op || Op->getZExtValue() > 20)
return false;
}
for (unsigned i = 1; i < GEP->getNumOperands(); i++)
OffsetV.push_back(GEP->getOperand(i));
return true;
}
// Takes a RelocatedBase (base pointer relocation instruction) and Targets to
// replace, computes a replacement, and affects it.
static bool
simplifyRelocatesOffABase(IntrinsicInst *RelocatedBase,
const SmallVectorImpl<IntrinsicInst *> &Targets) {
bool MadeChange = false;
for (auto &ToReplace : Targets) {
GCRelocateOperands MasterRelocate(RelocatedBase);
GCRelocateOperands ThisRelocate(ToReplace);
assert(ThisRelocate.getBasePtrIndex() == MasterRelocate.getBasePtrIndex() &&
"Not relocating a derived object of the original base object");
if (ThisRelocate.getBasePtrIndex() == ThisRelocate.getDerivedPtrIndex()) {
// A duplicate relocate call. TODO: coalesce duplicates.
continue;
}
Value *Base = ThisRelocate.getBasePtr();
auto Derived = dyn_cast<GetElementPtrInst>(ThisRelocate.getDerivedPtr());
if (!Derived || Derived->getPointerOperand() != Base)
continue;
SmallVector<Value *, 2> OffsetV;
if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
continue;
// Create a Builder and replace the target callsite with a gep
assert(RelocatedBase->getNextNode() && "Should always have one since it's not a terminator");
// Insert after RelocatedBase
IRBuilder<> Builder(RelocatedBase->getNextNode());
Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
// If gc_relocate does not match the actual type, cast it to the right type.
// In theory, there must be a bitcast after gc_relocate if the type does not
// match, and we should reuse it to get the derived pointer. But it could be
// cases like this:
// bb1:
// ...
// %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
// br label %merge
//
// bb2:
// ...
// %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
// br label %merge
//
// merge:
// %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
// %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
//
// In this case, we can not find the bitcast any more. So we insert a new bitcast
// no matter there is already one or not. In this way, we can handle all cases, and
// the extra bitcast should be optimized away in later passes.
Instruction *ActualRelocatedBase = RelocatedBase;
if (RelocatedBase->getType() != Base->getType()) {
ActualRelocatedBase =
cast<Instruction>(Builder.CreateBitCast(RelocatedBase, Base->getType()));
}
Value *Replacement = Builder.CreateGEP(
Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
Instruction *ReplacementInst = cast<Instruction>(Replacement);
Replacement->takeName(ToReplace);
// If the newly generated derived pointer's type does not match the original derived
// pointer's type, cast the new derived pointer to match it. Same reasoning as above.
Instruction *ActualReplacement = ReplacementInst;
if (ReplacementInst->getType() != ToReplace->getType()) {
ActualReplacement =
cast<Instruction>(Builder.CreateBitCast(ReplacementInst, ToReplace->getType()));
}
ToReplace->replaceAllUsesWith(ActualReplacement);
ToReplace->eraseFromParent();
MadeChange = true;
}
return MadeChange;
}
// Turns this:
//
// %base = ...
// %ptr = gep %base + 15
// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
// %base' = relocate(%tok, i32 4, i32 4)
// %ptr' = relocate(%tok, i32 4, i32 5)
// %val = load %ptr'
//
// into this:
//
// %base = ...
// %ptr = gep %base + 15
// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
// %base' = gc.relocate(%tok, i32 4, i32 4)
// %ptr' = gep %base' + 15
// %val = load %ptr'
bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) {
bool MadeChange = false;
SmallVector<User *, 2> AllRelocateCalls;
for (auto *U : I.users())
if (isGCRelocate(dyn_cast<Instruction>(U)))
// Collect all the relocate calls associated with a statepoint
AllRelocateCalls.push_back(U);
// We need atleast one base pointer relocation + one derived pointer
// relocation to mangle
if (AllRelocateCalls.size() < 2)
return false;
// RelocateInstMap is a mapping from the base relocate instruction to the
// corresponding derived relocate instructions
DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> RelocateInstMap;
computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
if (RelocateInstMap.empty())
return false;
for (auto &Item : RelocateInstMap)
// Item.first is the RelocatedBase to offset against
// Item.second is the vector of Targets to replace
MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
return MadeChange;
}
/// SinkCast - Sink the specified cast instruction into its user blocks
static bool SinkCast(CastInst *CI) {
BasicBlock *DefBB = CI->getParent();
/// InsertedCasts - Only insert a cast in each block once.
DenseMap<BasicBlock*, CastInst*> InsertedCasts;
bool MadeChange = false;
for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
UI != E; ) {
Use &TheUse = UI.getUse();
Instruction *User = cast<Instruction>(*UI);
// Figure out which BB this cast is used in. For PHI's this is the
// appropriate predecessor block.
BasicBlock *UserBB = User->getParent();
if (PHINode *PN = dyn_cast<PHINode>(User)) {
UserBB = PN->getIncomingBlock(TheUse);
}
// Preincrement use iterator so we don't invalidate it.
++UI;
// If this user is in the same block as the cast, don't change the cast.
if (UserBB == DefBB) continue;
// If we have already inserted a cast into this block, use it.
CastInst *&InsertedCast = InsertedCasts[UserBB];
if (!InsertedCast) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
InsertedCast =
CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
InsertPt);
}
// Replace a use of the cast with a use of the new cast.
TheUse = InsertedCast;
MadeChange = true;
++NumCastUses;
}
// If we removed all uses, nuke the cast.
if (CI->use_empty()) {
CI->eraseFromParent();
MadeChange = true;
}
return MadeChange;
}
/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
/// sink it into user blocks to reduce the number of virtual
/// registers that must be created and coalesced.
///
/// Return true if any changes are made.
///
static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
const DataLayout &DL) {
// If this is a noop copy,
EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(DL, CI->getType());
// This is an fp<->int conversion?
if (SrcVT.isInteger() != DstVT.isInteger())
return false;
// If this is an extension, it will be a zero or sign extension, which
// isn't a noop.
if (SrcVT.bitsLT(DstVT)) return false;
// If these values will be promoted, find out what they will be promoted
// to. This helps us consider truncates on PPC as noop copies when they
// are.
if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
TargetLowering::TypePromoteInteger)
SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
if (TLI.getTypeAction(CI->getContext(), DstVT) ==
TargetLowering::TypePromoteInteger)
DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
// If, after promotion, these are the same types, this is a noop copy.
if (SrcVT != DstVT)
return false;
return SinkCast(CI);
}
/// CombineUAddWithOverflow - try to combine CI into a call to the
/// llvm.uadd.with.overflow intrinsic if possible.
///
/// Return true if any changes were made.
static bool CombineUAddWithOverflow(CmpInst *CI) {
Value *A, *B;
Instruction *AddI;
if (!match(CI,
m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI))))
return false;
Type *Ty = AddI->getType();
if (!isa<IntegerType>(Ty))
return false;
// We don't want to move around uses of condition values this late, so we we
// check if it is legal to create the call to the intrinsic in the basic
// block containing the icmp:
if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse())
return false;
#ifndef NDEBUG
// Someday m_UAddWithOverflow may get smarter, but this is a safe assumption
// for now:
if (AddI->hasOneUse())
assert(*AddI->user_begin() == CI && "expected!");
#endif
Module *M = CI->getParent()->getParent()->getParent();
Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty);
auto *InsertPt = AddI->hasOneUse() ? CI : AddI;
auto *UAddWithOverflow =
CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt);
auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt);
auto *Overflow =
ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt);
CI->replaceAllUsesWith(Overflow);
AddI->replaceAllUsesWith(UAdd);
CI->eraseFromParent();
AddI->eraseFromParent();
return true;
}
/// SinkCmpExpression - Sink the given CmpInst into user blocks to reduce
/// the number of virtual registers that must be created and coalesced. This is
/// a clear win except on targets with multiple condition code registers
/// (PowerPC), where it might lose; some adjustment may be wanted there.
///
/// Return true if any changes are made.
static bool SinkCmpExpression(CmpInst *CI) {
BasicBlock *DefBB = CI->getParent();
/// InsertedCmp - Only insert a cmp in each block once.
DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
bool MadeChange = false;
for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
UI != E; ) {
Use &TheUse = UI.getUse();
Instruction *User = cast<Instruction>(*UI);
// Preincrement use iterator so we don't invalidate it.
++UI;
// Don't bother for PHI nodes.
if (isa<PHINode>(User))
continue;
// Figure out which BB this cmp is used in.
BasicBlock *UserBB = User->getParent();
// If this user is in the same block as the cmp, don't change the cmp.
if (UserBB == DefBB) continue;
// If we have already inserted a cmp into this block, use it.
CmpInst *&InsertedCmp = InsertedCmps[UserBB];
if (!InsertedCmp) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
InsertedCmp =
CmpInst::Create(CI->getOpcode(),
CI->getPredicate(), CI->getOperand(0),
CI->getOperand(1), "", InsertPt);
}
// Replace a use of the cmp with a use of the new cmp.
TheUse = InsertedCmp;
MadeChange = true;
++NumCmpUses;
}
// If we removed all uses, nuke the cmp.
if (CI->use_empty()) {
CI->eraseFromParent();
MadeChange = true;
}
return MadeChange;
}
static bool OptimizeCmpExpression(CmpInst *CI) {
if (SinkCmpExpression(CI))
return true;
if (CombineUAddWithOverflow(CI))
return true;
return false;
}
/// isExtractBitsCandidateUse - Check if the candidates could
/// be combined with shift instruction, which includes:
/// 1. Truncate instruction
/// 2. And instruction and the imm is a mask of the low bits:
/// imm & (imm+1) == 0
static bool isExtractBitsCandidateUse(Instruction *User) {
if (!isa<TruncInst>(User)) {
if (User->getOpcode() != Instruction::And ||
!isa<ConstantInt>(User->getOperand(1)))
return false;
const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
if ((Cimm & (Cimm + 1)).getBoolValue())
return false;
}
return true;
}
/// SinkShiftAndTruncate - sink both shift and truncate instruction
/// to the use of truncate's BB.
static bool
SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
const TargetLowering &TLI, const DataLayout &DL) {
BasicBlock *UserBB = User->getParent();
DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
TruncInst *TruncI = dyn_cast<TruncInst>(User);
bool MadeChange = false;
for (Value::user_iterator TruncUI = TruncI->user_begin(),
TruncE = TruncI->user_end();
TruncUI != TruncE;) {
Use &TruncTheUse = TruncUI.getUse();
Instruction *TruncUser = cast<Instruction>(*TruncUI);
// Preincrement use iterator so we don't invalidate it.
++TruncUI;
int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
if (!ISDOpcode)
continue;
// If the use is actually a legal node, there will not be an
// implicit truncate.
// FIXME: always querying the result type is just an
// approximation; some nodes' legality is determined by the
// operand or other means. There's no good way to find out though.
if (TLI.isOperationLegalOrCustom(
ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
continue;
// Don't bother for PHI nodes.
if (isa<PHINode>(TruncUser))
continue;
BasicBlock *TruncUserBB = TruncUser->getParent();
if (UserBB == TruncUserBB)
continue;
BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
if (!InsertedShift && !InsertedTrunc) {
BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
// Sink the shift
if (ShiftI->getOpcode() == Instruction::AShr)
InsertedShift =
BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt);
else
InsertedShift =
BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt);
// Sink the trunc
BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
TruncInsertPt++;
InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
TruncI->getType(), "", TruncInsertPt);
MadeChange = true;
TruncTheUse = InsertedTrunc;
}
}
return MadeChange;
}
/// OptimizeExtractBits - sink the shift *right* instruction into user blocks if
/// the uses could potentially be combined with this shift instruction and
/// generate BitExtract instruction. It will only be applied if the architecture
/// supports BitExtract instruction. Here is an example:
/// BB1:
/// %x.extract.shift = lshr i64 %arg1, 32
/// BB2:
/// %x.extract.trunc = trunc i64 %x.extract.shift to i16
/// ==>
///
/// BB2:
/// %x.extract.shift.1 = lshr i64 %arg1, 32
/// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
///
/// CodeGen will recoginze the pattern in BB2 and generate BitExtract
/// instruction.
/// Return true if any changes are made.
static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
const TargetLowering &TLI,
const DataLayout &DL) {
BasicBlock *DefBB = ShiftI->getParent();
/// Only insert instructions in each block once.
DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
bool MadeChange = false;
for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
UI != E;) {
Use &TheUse = UI.getUse();
Instruction *User = cast<Instruction>(*UI);
// Preincrement use iterator so we don't invalidate it.
++UI;
// Don't bother for PHI nodes.
if (isa<PHINode>(User))
continue;
if (!isExtractBitsCandidateUse(User))
continue;
BasicBlock *UserBB = User->getParent();
if (UserBB == DefBB) {
// If the shift and truncate instruction are in the same BB. The use of
// the truncate(TruncUse) may still introduce another truncate if not
// legal. In this case, we would like to sink both shift and truncate
// instruction to the BB of TruncUse.
// for example:
// BB1:
// i64 shift.result = lshr i64 opnd, imm
// trunc.result = trunc shift.result to i16
//
// BB2:
// ----> We will have an implicit truncate here if the architecture does
// not have i16 compare.
// cmp i16 trunc.result, opnd2
//
if (isa<TruncInst>(User) && shiftIsLegal
// If the type of the truncate is legal, no trucate will be
// introduced in other basic blocks.
&&
(!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
MadeChange =
SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
continue;
}
// If we have already inserted a shift into this block, use it.
BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
if (!InsertedShift) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
if (ShiftI->getOpcode() == Instruction::AShr)
InsertedShift =
BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt);
else
InsertedShift =
BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt);
MadeChange = true;
}
// Replace a use of the shift with a use of the new shift.
TheUse = InsertedShift;
}
// If we removed all uses, nuke the shift.
if (ShiftI->use_empty())
ShiftI->eraseFromParent();
return MadeChange;
}
// ScalarizeMaskedLoad() translates masked load intrinsic, like
// <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align,
// <16 x i1> %mask, <16 x i32> %passthru)
// to a chain of basic blocks, whith loading element one-by-one if
// the appropriate mask bit is set
//
// %1 = bitcast i8* %addr to i32*
// %2 = extractelement <16 x i1> %mask, i32 0
// %3 = icmp eq i1 %2, true
// br i1 %3, label %cond.load, label %else
//
//cond.load: ; preds = %0
// %4 = getelementptr i32* %1, i32 0
// %5 = load i32* %4
// %6 = insertelement <16 x i32> undef, i32 %5, i32 0
// br label %else
//
//else: ; preds = %0, %cond.load
// %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ]
// %7 = extractelement <16 x i1> %mask, i32 1
// %8 = icmp eq i1 %7, true
// br i1 %8, label %cond.load1, label %else2
//
//cond.load1: ; preds = %else
// %9 = getelementptr i32* %1, i32 1
// %10 = load i32* %9
// %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1
// br label %else2
//
//else2: ; preds = %else, %cond.load1
// %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
// %12 = extractelement <16 x i1> %mask, i32 2
// %13 = icmp eq i1 %12, true
// br i1 %13, label %cond.load4, label %else5
//
static void ScalarizeMaskedLoad(CallInst *CI) {
Value *Ptr = CI->getArgOperand(0);
Value *Src0 = CI->getArgOperand(3);
Value *Mask = CI->getArgOperand(2);
VectorType *VecType = dyn_cast<VectorType>(CI->getType());
Type *EltTy = VecType->getElementType();
assert(VecType && "Unexpected return type of masked load intrinsic");
IRBuilder<> Builder(CI->getContext());
Instruction *InsertPt = CI;
BasicBlock *IfBlock = CI->getParent();
BasicBlock *CondBlock = nullptr;
BasicBlock *PrevIfBlock = CI->getParent();
Builder.SetInsertPoint(InsertPt);
Builder.SetCurrentDebugLocation(CI->getDebugLoc());
// Bitcast %addr fron i8* to EltTy*
Type *NewPtrType =
EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
Value *UndefVal = UndefValue::get(VecType);
// The result vector
Value *VResult = UndefVal;
PHINode *Phi = nullptr;
Value *PrevPhi = UndefVal;
unsigned VectorWidth = VecType->getNumElements();
for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
// Fill the "else" block, created in the previous iteration
//
// %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
// %mask_1 = extractelement <16 x i1> %mask, i32 Idx
// %to_load = icmp eq i1 %mask_1, true
// br i1 %to_load, label %cond.load, label %else
//
if (Idx > 0) {
Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
Phi->addIncoming(VResult, CondBlock);
Phi->addIncoming(PrevPhi, PrevIfBlock);
PrevPhi = Phi;
VResult = Phi;
}
Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
ConstantInt::get(Predicate->getType(), 1));
// Create "cond" block
//
// %EltAddr = getelementptr i32* %1, i32 0
// %Elt = load i32* %EltAddr
// VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
//
CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load");
Builder.SetInsertPoint(InsertPt);
Value *Gep =
Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
LoadInst* Load = Builder.CreateLoad(Gep, false);
VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
// Create "else" block, fill it in the next iteration
BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
Builder.SetInsertPoint(InsertPt);
Instruction *OldBr = IfBlock->getTerminator();
BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
OldBr->eraseFromParent();
PrevIfBlock = IfBlock;
IfBlock = NewIfBlock;
}
Phi = Builder.CreatePHI(VecType, 2, "res.phi.select");
Phi->addIncoming(VResult, CondBlock);
Phi->addIncoming(PrevPhi, PrevIfBlock);
Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
CI->replaceAllUsesWith(NewI);
CI->eraseFromParent();
}
// ScalarizeMaskedStore() translates masked store intrinsic, like
// void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align,
// <16 x i1> %mask)
// to a chain of basic blocks, that stores element one-by-one if
// the appropriate mask bit is set
//
// %1 = bitcast i8* %addr to i32*
// %2 = extractelement <16 x i1> %mask, i32 0
// %3 = icmp eq i1 %2, true
// br i1 %3, label %cond.store, label %else
//
// cond.store: ; preds = %0
// %4 = extractelement <16 x i32> %val, i32 0
// %5 = getelementptr i32* %1, i32 0
// store i32 %4, i32* %5
// br label %else
//
// else: ; preds = %0, %cond.store
// %6 = extractelement <16 x i1> %mask, i32 1
// %7 = icmp eq i1 %6, true
// br i1 %7, label %cond.store1, label %else2
//
// cond.store1: ; preds = %else
// %8 = extractelement <16 x i32> %val, i32 1
// %9 = getelementptr i32* %1, i32 1
// store i32 %8, i32* %9
// br label %else2
// . . .
static void ScalarizeMaskedStore(CallInst *CI) {
Value *Ptr = CI->getArgOperand(1);
Value *Src = CI->getArgOperand(0);
Value *Mask = CI->getArgOperand(3);
VectorType *VecType = dyn_cast<VectorType>(Src->getType());
Type *EltTy = VecType->getElementType();
assert(VecType && "Unexpected data type in masked store intrinsic");
IRBuilder<> Builder(CI->getContext());
Instruction *InsertPt = CI;
BasicBlock *IfBlock = CI->getParent();
Builder.SetInsertPoint(InsertPt);
Builder.SetCurrentDebugLocation(CI->getDebugLoc());
// Bitcast %addr fron i8* to EltTy*
Type *NewPtrType =
EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
unsigned VectorWidth = VecType->getNumElements();
for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
// Fill the "else" block, created in the previous iteration
//
// %mask_1 = extractelement <16 x i1> %mask, i32 Idx
// %to_store = icmp eq i1 %mask_1, true
// br i1 %to_load, label %cond.store, label %else
//
Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
ConstantInt::get(Predicate->getType(), 1));
// Create "cond" block
//
// %OneElt = extractelement <16 x i32> %Src, i32 Idx
// %EltAddr = getelementptr i32* %1, i32 0
// %store i32 %OneElt, i32* %EltAddr
//
BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store");
Builder.SetInsertPoint(InsertPt);
Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
Value *Gep =
Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
Builder.CreateStore(OneElt, Gep);
// Create "else" block, fill it in the next iteration
BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
Builder.SetInsertPoint(InsertPt);
Instruction *OldBr = IfBlock->getTerminator();
BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
OldBr->eraseFromParent();
IfBlock = NewIfBlock;
}
CI->eraseFromParent();
}
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
BasicBlock *BB = CI->getParent();
// Lower inline assembly if we can.
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
if (TLI->ExpandInlineAsm(CI)) {
// Avoid invalidating the iterator.
CurInstIterator = BB->begin();
// Avoid processing instructions out of order, which could cause
// reuse before a value is defined.
SunkAddrs.clear();
return true;
}
// Sink address computing for memory operands into the block.
if (OptimizeInlineAsmInst(CI))
return true;
}
// Align the pointer arguments to this call if the target thinks it's a good
// idea
unsigned MinSize, PrefAlign;
if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
for (auto &Arg : CI->arg_operands()) {
// We want to align both objects whose address is used directly and
// objects whose address is used in casts and GEPs, though it only makes
// sense for GEPs if the offset is a multiple of the desired alignment and
// if size - offset meets the size threshold.
if (!Arg->getType()->isPointerTy())
continue;
APInt Offset(DL->getPointerSizeInBits(
cast<PointerType>(Arg->getType())->getAddressSpace()),
0);
Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
uint64_t Offset2 = Offset.getLimitedValue();
if ((Offset2 & (PrefAlign-1)) != 0)
continue;
AllocaInst *AI;
if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
AI->setAlignment(PrefAlign);
// Global variables can only be aligned if they are defined in this
// object (i.e. they are uniquely initialized in this object), and
// over-aligning global variables that have an explicit section is
// forbidden.
GlobalVariable *GV;
if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->hasUniqueInitializer() &&
!GV->hasSection() && GV->getAlignment() < PrefAlign &&
DL->getTypeAllocSize(GV->getType()->getElementType()) >=
MinSize + Offset2)
GV->setAlignment(PrefAlign);
}
// If this is a memcpy (or similar) then we may be able to improve the
// alignment
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
unsigned Align = getKnownAlignment(MI->getDest(), *DL);
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL));
if (Align > MI->getAlignment())
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align));
}
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II) {
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::objectsize: {
// Lower all uses of llvm.objectsize.*
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
// Substituting this can cause recursive simplifications, which can
// invalidate our iterator. Use WeakTrackingVH to hold onto it in case
// this happens.
WeakTrackingVH IterHandle(CurInstIterator);
replaceAndRecursivelySimplify(CI, RetVal,
TLInfo, nullptr);
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
if (IterHandle != CurInstIterator) {
CurInstIterator = BB->begin();
SunkAddrs.clear();
}
return true;
}
case Intrinsic::masked_load: {
// Scalarize unsupported vector masked load
if (!TTI->isLegalMaskedLoad(CI->getType(), 1)) {
ScalarizeMaskedLoad(CI);
ModifiedDT = true;
return true;
}
return false;
}
case Intrinsic::masked_store: {
if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), 1)) {
ScalarizeMaskedStore(CI);
ModifiedDT = true;
return true;
}
return false;
}
#if 0 // HLSL Change - remove platform intrinsics
case Intrinsic::aarch64_stlxr:
case Intrinsic::aarch64_stxr: {
ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
if (!ExtVal || !ExtVal->hasOneUse() ||
ExtVal->getParent() == CI->getParent())
return false;
// Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
ExtVal->moveBefore(CI);
// Mark this instruction as "inserted by CGP", so that other
// optimizations don't touch it.
InsertedInsts.insert(ExtVal);
return true;
}
#endif // HLSL Change - remove platform intrinsics
}
if (TLI) {
// Unknown address space.
// TODO: Target hook to pick which address space the intrinsic cares
// about?
unsigned AddrSpace = ~0u;
SmallVector<Value*, 2> PtrOps;
Type *AccessTy;
if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy, AddrSpace))
while (!PtrOps.empty())
if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy, AddrSpace))
return true;
}
}
// From here on out we're working with named functions.
if (!CI->getCalledFunction()) return false;
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// to fortified library functions (e.g. __memcpy_chk) that have the default
// "don't know" as the objectsize. Anything else should be left alone.
FortifiedLibCallSimplifier Simplifier(TLInfo, true);
if (Value *V = Simplifier.optimizeCall(CI)) {
CI->replaceAllUsesWith(V);
CI->eraseFromParent();
return true;
}
return false;
}
/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
/// instructions to the predecessor to enable tail call optimizations. The
/// case it is currently looking for is:
/// @code
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// br label %return
/// bb1:
/// %tmp1 = tail call i32 @f1()
/// br label %return
/// bb2:
/// %tmp2 = tail call i32 @f2()
/// br label %return
/// return:
/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
/// ret i32 %retval
/// @endcode
///
/// =>
///
/// @code
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// ret i32 %tmp0
/// bb1:
/// %tmp1 = tail call i32 @f1()
/// ret i32 %tmp1
/// bb2:
/// %tmp2 = tail call i32 @f2()
/// ret i32 %tmp2
/// @endcode
bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) {
if (!TLI)
return false;
ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
if (!RI)
return false;
PHINode *PN = nullptr;
BitCastInst *BCI = nullptr;
Value *V = RI->getReturnValue();
if (V) {
BCI = dyn_cast<BitCastInst>(V);
if (BCI)
V = BCI->getOperand(0);
PN = dyn_cast<PHINode>(V);
if (!PN)
return false;
}
if (PN && PN->getParent() != BB)
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
// See llvm::isInTailCallPosition().
const Function *F = BB->getParent();
AttributeSet CallerAttrs = F->getAttributes();
if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
return false;
// Make sure there are no instructions between the PHI and return, or that the
// return is the first instruction in the block.
if (PN) {
BasicBlock::iterator BI = BB->begin();
do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
if (&*BI == BCI)
// Also skip over the bitcast.
++BI;
if (&*BI != RI)
return false;
} else {
BasicBlock::iterator BI = BB->begin();
while (isa<DbgInfoIntrinsic>(BI)) ++BI;
if (&*BI != RI)
return false;
}
/// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
/// call.
SmallVector<CallInst*, 4> TailCalls;
if (PN) {
for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
// Make sure the phi value is indeed produced by the tail call.
if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
TLI->mayBeEmittedAsTailCall(CI))
TailCalls.push_back(CI);
}
} else {
SmallPtrSet<BasicBlock*, 4> VisitedBBs;
for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
if (!VisitedBBs.insert(*PI).second)
continue;
BasicBlock::InstListType &InstList = (*PI)->getInstList();
BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
if (RI == RE)
continue;
CallInst *CI = dyn_cast<CallInst>(&*RI);
if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI))
TailCalls.push_back(CI);
}
}
bool Changed = false;
for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
CallInst *CI = TailCalls[i];
CallSite CS(CI);
// Conservatively require the attributes of the call to match those of the
// return. Ignore noalias because it doesn't affect the call sequence.
AttributeSet CalleeAttrs = CS.getAttributes();
if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
removeAttribute(Attribute::NoAlias) !=
AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
removeAttribute(Attribute::NoAlias))
continue;
// Make sure the call instruction is followed by an unconditional branch to
// the return block.
BasicBlock *CallBB = CI->getParent();
BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
continue;
// Duplicate the return into CallBB.
(void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
ModifiedDT = Changed = true;
++NumRetsDup;
}
// If we eliminated all predecessors of the block, delete the block now.
if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
BB->eraseFromParent();
return Changed;
}
//===----------------------------------------------------------------------===//
// Memory Optimization
//===----------------------------------------------------------------------===//
namespace {
/// ExtAddrMode - This is an extended version of TargetLowering::AddrMode
/// which holds actual Value*'s for register values.
struct ExtAddrMode : public TargetLowering::AddrMode {
Value *BaseReg;
Value *ScaledReg;
ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {}
void print(raw_ostream &OS) const;
void dump() const;
bool operator==(const ExtAddrMode& O) const {
return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
(BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
(HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
}
};
#ifndef NDEBUG
static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
AM.print(OS);
return OS;
}
#endif
void ExtAddrMode::print(raw_ostream &OS) const {
bool NeedPlus = false;
OS << "[";
if (BaseGV) {
OS << (NeedPlus ? " + " : "")
<< "GV:";
BaseGV->printAsOperand(OS, /*PrintType=*/false);
NeedPlus = true;
}
if (BaseOffs) {
OS << (NeedPlus ? " + " : "")
<< BaseOffs;
NeedPlus = true;
}
if (BaseReg) {
OS << (NeedPlus ? " + " : "")
<< "Base:";
BaseReg->printAsOperand(OS, /*PrintType=*/false);
NeedPlus = true;
}
if (Scale) {
OS << (NeedPlus ? " + " : "")
<< Scale << "*";
ScaledReg->printAsOperand(OS, /*PrintType=*/false);
}
OS << ']';
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ExtAddrMode::dump() const {
print(dbgs());
dbgs() << '\n';
}
#endif
/// \brief This class provides transaction based operation on the IR.
/// Every change made through this class is recorded in the internal state and
/// can be undone (rollback) until commit is called.
class TypePromotionTransaction {
/// \brief This represents the common interface of the individual transaction.
/// Each class implements the logic for doing one specific modification on
/// the IR via the TypePromotionTransaction.
class TypePromotionAction {
protected:
/// The Instruction modified.
Instruction *Inst;
public:
/// \brief Constructor of the action.
/// The constructor performs the related action on the IR.
TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
virtual ~TypePromotionAction() {}
/// \brief Undo the modification done by this action.
/// When this method is called, the IR must be in the same state as it was
/// before this action was applied.
/// \pre Undoing the action works if and only if the IR is in the exact same
/// state as it was directly after this action was applied.
virtual void undo() = 0;
/// \brief Advocate every change made by this action.
/// When the results on the IR of the action are to be kept, it is important
/// to call this function, otherwise hidden information may be kept forever.
virtual void commit() {
// Nothing to be done, this action is not doing anything.
}
};
/// \brief Utility to remember the position of an instruction.
class InsertionHandler {
/// Position of an instruction.
/// Either an instruction:
/// - Is the first in a basic block: BB is used.
/// - Has a previous instructon: PrevInst is used.
union {
Instruction *PrevInst;
BasicBlock *BB;
} Point;
/// Remember whether or not the instruction had a previous instruction.
bool HasPrevInstruction;
public:
/// \brief Record the position of \p Inst.
InsertionHandler(Instruction *Inst) {
BasicBlock::iterator It = Inst;
HasPrevInstruction = (It != (Inst->getParent()->begin()));
if (HasPrevInstruction)
Point.PrevInst = --It;
else
Point.BB = Inst->getParent();
}
/// \brief Insert \p Inst at the recorded position.
void insert(Instruction *Inst) {
if (HasPrevInstruction) {
if (Inst->getParent())
Inst->removeFromParent();
Inst->insertAfter(Point.PrevInst);
} else {
Instruction *Position = Point.BB->getFirstInsertionPt();
if (Inst->getParent())
Inst->moveBefore(Position);
else
Inst->insertBefore(Position);
}
}
};
/// \brief Move an instruction before another.
class InstructionMoveBefore : public TypePromotionAction {
/// Original position of the instruction.
InsertionHandler Position;
public:
/// \brief Move \p Inst before \p Before.
InstructionMoveBefore(Instruction *Inst, Instruction *Before)
: TypePromotionAction(Inst), Position(Inst) {
DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n");
Inst->moveBefore(Before);
}
/// \brief Move the instruction back to its original position.
void undo() override {
DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
Position.insert(Inst);
}
};
/// \brief Set the operand of an instruction with a new value.
class OperandSetter : public TypePromotionAction {
/// Original operand of the instruction.
Value *Origin;
/// Index of the modified instruction.
unsigned Idx;
public:
/// \brief Set \p Idx operand of \p Inst with \p NewVal.
OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
: TypePromotionAction(Inst), Idx(Idx) {
DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
<< "for:" << *Inst << "\n"
<< "with:" << *NewVal << "\n");
Origin = Inst->getOperand(Idx);
Inst->setOperand(Idx, NewVal);
}
/// \brief Restore the original value of the instruction.
void undo() override {
DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
<< "for: " << *Inst << "\n"
<< "with: " << *Origin << "\n");
Inst->setOperand(Idx, Origin);
}
};
/// \brief Hide the operands of an instruction.
/// Do as if this instruction was not using any of its operands.
class OperandsHider : public TypePromotionAction {
/// The list of original operands.
SmallVector<Value *, 4> OriginalValues;
public:
/// \brief Remove \p Inst from the uses of the operands of \p Inst.
OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
unsigned NumOpnds = Inst->getNumOperands();
OriginalValues.reserve(NumOpnds);
for (unsigned It = 0; It < NumOpnds; ++It) {
// Save the current operand.
Value *Val = Inst->getOperand(It);
OriginalValues.push_back(Val);
// Set a dummy one.
// We could use OperandSetter here, but that would implied an overhead
// that we are not willing to pay.
Inst->setOperand(It, UndefValue::get(Val->getType()));
}
}
/// \brief Restore the original list of uses.
void undo() override {
DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
Inst->setOperand(It, OriginalValues[It]);
}
};
/// \brief Build a truncate instruction.
class TruncBuilder : public TypePromotionAction {
Value *Val;
public:
/// \brief Build a truncate instruction of \p Opnd producing a \p Ty
/// result.
/// trunc Opnd to Ty.
TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
IRBuilder<> Builder(Opnd);
Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
}
/// \brief Get the built value.
Value *getBuiltValue() { return Val; }
/// \brief Remove the built instruction.
void undo() override {
DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
};
/// \brief Build a sign extension instruction.
class SExtBuilder : public TypePromotionAction {
Value *Val;
public:
/// \brief Build a sign extension instruction of \p Opnd producing a \p Ty
/// result.
/// sext Opnd to Ty.
SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
: TypePromotionAction(InsertPt) {
IRBuilder<> Builder(InsertPt);
Val = Builder.CreateSExt(Opnd, Ty, "promoted");
DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
}
/// \brief Get the built value.
Value *getBuiltValue() { return Val; }
/// \brief Remove the built instruction.
void undo() override {
DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
};
/// \brief Build a zero extension instruction.
class ZExtBuilder : public TypePromotionAction {
Value *Val;
public:
/// \brief Build a zero extension instruction of \p Opnd producing a \p Ty
/// result.
/// zext Opnd to Ty.
ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
: TypePromotionAction(InsertPt) {
IRBuilder<> Builder(InsertPt);
Val = Builder.CreateZExt(Opnd, Ty, "promoted");
DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
}
/// \brief Get the built value.
Value *getBuiltValue() { return Val; }
/// \brief Remove the built instruction.
void undo() override {
DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
};
/// \brief Mutate an instruction to another type.
class TypeMutator : public TypePromotionAction {
/// Record the original type.
Type *OrigTy;
public:
/// \brief Mutate the type of \p Inst into \p NewTy.
TypeMutator(Instruction *Inst, Type *NewTy)
: TypePromotionAction(Inst), OrigTy(Inst->getType()) {
DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
<< "\n");
Inst->mutateType(NewTy);
}
/// \brief Mutate the instruction back to its original type.
void undo() override {
DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
<< "\n");
Inst->mutateType(OrigTy);
}
};
/// \brief Replace the uses of an instruction by another instruction.
class UsesReplacer : public TypePromotionAction {
/// Helper structure to keep track of the replaced uses.
struct InstructionAndIdx {
/// The instruction using the instruction.
Instruction *Inst;
/// The index where this instruction is used for Inst.
unsigned Idx;
InstructionAndIdx(Instruction *Inst, unsigned Idx)
: Inst(Inst), Idx(Idx) {}
};
/// Keep track of the original uses (pair Instruction, Index).
SmallVector<InstructionAndIdx, 4> OriginalUses;
typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator;
public:
/// \brief Replace all the use of \p Inst by \p New.
UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
<< "\n");
// Record the original uses.
for (Use &U : Inst->uses()) {
Instruction *UserI = cast<Instruction>(U.getUser());
OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
}
// Now, we can replace the uses.
Inst->replaceAllUsesWith(New);
}
/// \brief Reassign the original uses of Inst to Inst.
void undo() override {
DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
for (use_iterator UseIt = OriginalUses.begin(),
EndIt = OriginalUses.end();
UseIt != EndIt; ++UseIt) {
UseIt->Inst->setOperand(UseIt->Idx, Inst);
}
}
};
/// \brief Remove an instruction from the IR.
class InstructionRemover : public TypePromotionAction {
/// Original position of the instruction.
InsertionHandler Inserter;
/// Helper structure to hide all the link to the instruction. In other
/// words, this helps to do as if the instruction was removed.
OperandsHider Hider;
/// Keep track of the uses replaced, if any.
UsesReplacer *Replacer;
public:
/// \brief Remove all reference of \p Inst and optinally replace all its
/// uses with New.
/// \pre If !Inst->use_empty(), then New != nullptr
InstructionRemover(Instruction *Inst, Value *New = nullptr)
: TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
Replacer(nullptr) {
if (New)
Replacer = new UsesReplacer(Inst, New);
DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
Inst->removeFromParent();
}
~InstructionRemover() override { delete Replacer; }
/// \brief Really remove the instruction.
void commit() override { delete Inst; }
/// \brief Resurrect the instruction and reassign it to the proper uses if
/// new value was provided when build this action.
void undo() override {
DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
Inserter.insert(Inst);
if (Replacer)
Replacer->undo();
Hider.undo();
}
};
public:
/// Restoration point.
/// The restoration point is a pointer to an action instead of an iterator
/// because the iterator may be invalidated but not the pointer.
typedef const TypePromotionAction *ConstRestorationPt;
/// Advocate every changes made in that transaction.
void commit();
/// Undo all the changes made after the given point.
void rollback(ConstRestorationPt Point);
/// Get the current restoration point.
ConstRestorationPt getRestorationPoint() const;
/// \name API for IR modification with state keeping to support rollback.
/// @{
/// Same as Instruction::setOperand.
void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
/// Same as Instruction::eraseFromParent.
void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
/// Same as Value::replaceAllUsesWith.
void replaceAllUsesWith(Instruction *Inst, Value *New);
/// Same as Value::mutateType.
void mutateType(Instruction *Inst, Type *NewTy);
/// Same as IRBuilder::createTrunc.
Value *createTrunc(Instruction *Opnd, Type *Ty);
/// Same as IRBuilder::createSExt.
Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
/// Same as IRBuilder::createZExt.
Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
/// Same as Instruction::moveBefore.
void moveBefore(Instruction *Inst, Instruction *Before);
/// @}
private:
/// The ordered list of actions made so far.
SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt;
};
void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
Value *NewVal) {
Actions.push_back(
make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal));
}
void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
Value *NewVal) {
Actions.push_back(
make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal));
}
void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
Value *New) {
Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
}
void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
}
Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
Type *Ty) {
std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
Value *Val = Ptr->getBuiltValue();
Actions.push_back(std::move(Ptr));
return Val;
}
Value *TypePromotionTransaction::createSExt(Instruction *Inst,
Value *Opnd, Type *Ty) {
std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
Value *Val = Ptr->getBuiltValue();
Actions.push_back(std::move(Ptr));
return Val;
}
Value *TypePromotionTransaction::createZExt(Instruction *Inst,
Value *Opnd, Type *Ty) {
std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
Value *Val = Ptr->getBuiltValue();
Actions.push_back(std::move(Ptr));
return Val;
}
void TypePromotionTransaction::moveBefore(Instruction *Inst,
Instruction *Before) {
Actions.push_back(
make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before));
}
TypePromotionTransaction::ConstRestorationPt
TypePromotionTransaction::getRestorationPoint() const {
return !Actions.empty() ? Actions.back().get() : nullptr;
}
void TypePromotionTransaction::commit() {
for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
++It)
(*It)->commit();
Actions.clear();
}
void TypePromotionTransaction::rollback(
TypePromotionTransaction::ConstRestorationPt Point) {
while (!Actions.empty() && Point != Actions.back().get()) {
std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
Curr->undo();
}
}
/// \brief A helper class for matching addressing modes.
///
/// This encapsulates the logic for matching the target-legal addressing modes.
class AddressingModeMatcher {
SmallVectorImpl<Instruction*> &AddrModeInsts;
const TargetMachine &TM;
const TargetLowering &TLI;
const DataLayout &DL;
/// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
/// the memory instruction that we're computing this address for.
Type *AccessTy;
unsigned AddrSpace;
Instruction *MemoryInst;
/// AddrMode - This is the addressing mode that we're building up. This is
/// part of the return value of this addressing mode matching stuff.
ExtAddrMode &AddrMode;
/// The instructions inserted by other CodeGenPrepare optimizations.
const SetOfInstrs &InsertedInsts;
/// A map from the instructions to their type before promotion.
InstrToOrigTy &PromotedInsts;
/// The ongoing transaction where every action should be registered.
TypePromotionTransaction &TPT;
/// IgnoreProfitability - This is set to true when we should not do
/// profitability checks. When true, IsProfitableToFoldIntoAddressingMode
/// always returns true.
bool IgnoreProfitability;
AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI,
const TargetMachine &TM, Type *AT, unsigned AS,
Instruction *MI, ExtAddrMode &AM,
const SetOfInstrs &InsertedInsts,
InstrToOrigTy &PromotedInsts,
TypePromotionTransaction &TPT)
: AddrModeInsts(AMI), TM(TM),
TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent())
->getTargetLowering()),
DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
PromotedInsts(PromotedInsts), TPT(TPT) {
IgnoreProfitability = false;
}
public:
/// Match - Find the maximal addressing mode that a load/store of V can fold,
/// give an access type of AccessTy. This returns a list of involved
/// instructions in AddrModeInsts.
/// \p InsertedInsts The instructions inserted by other CodeGenPrepare
/// optimizations.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p The ongoing transaction where every action should be registered.
static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS,
Instruction *MemoryInst,
SmallVectorImpl<Instruction*> &AddrModeInsts,
const TargetMachine &TM,
const SetOfInstrs &InsertedInsts,
InstrToOrigTy &PromotedInsts,
TypePromotionTransaction &TPT) {
ExtAddrMode Result;
bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS,
MemoryInst, Result, InsertedInsts,
PromotedInsts, TPT).MatchAddr(V, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
return Result;
}
private:
bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
bool MatchAddr(Value *V, unsigned Depth);
bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
bool *MovedAway = nullptr);
bool IsProfitableToFoldIntoAddressingMode(Instruction *I,
ExtAddrMode &AMBefore,
ExtAddrMode &AMAfter);
bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
bool IsPromotionProfitable(unsigned NewCost, unsigned OldCost,
Value *PromotedOperand) const;
};
/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
/// Return true and update AddrMode if this addr mode is legal for the target,
/// false if not.
bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
unsigned Depth) {
// If Scale is 1, then this is the same as adding ScaleReg to the addressing
// mode. Just process that directly.
if (Scale == 1)
return MatchAddr(ScaleReg, Depth);
// If the scale is 0, it takes nothing to add this.
if (Scale == 0)
return true;
// If we already have a scale of this value, we can add to it, otherwise, we
// need an available scale field.
if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
return false;
ExtAddrMode TestAddrMode = AddrMode;
// Add scale to turn X*4+X*3 -> X*7. This could also do things like
// [A+B + A*7] -> [B+A*8].
TestAddrMode.Scale += Scale;
TestAddrMode.ScaledReg = ScaleReg;
// If the new address isn't legal, bail out.
if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
return false;
// It was legal, so commit it.
AddrMode = TestAddrMode;
// Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
// to see if ScaleReg is actually X+C. If so, we can turn this into adding
// X*Scale + C*Scale to addr mode.
ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
if (isa<Instruction>(ScaleReg) && // not a constant expr.
match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
TestAddrMode.ScaledReg = AddLHS;
TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
// If this addressing mode is legal, commit it and remember that we folded
// this instruction.
if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
AddrMode = TestAddrMode;
return true;
}
}
// Otherwise, not (x+c)*scale, just return what we have.
return true;
}
/// MightBeFoldableInst - This is a little filter, which returns true if an
/// addressing computation involving I might be folded into a load/store
/// accessing it. This doesn't need to be perfect, but needs to accept at least
/// the set of instructions that MatchOperationAddr can.
static bool MightBeFoldableInst(Instruction *I) {
switch (I->getOpcode()) {
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
// Don't touch identity bitcasts.
if (I->getType() == I->getOperand(0)->getType())
return false;
return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
case Instruction::PtrToInt:
// PtrToInt is always a noop, as we know that the int type is pointer sized.
return true;
case Instruction::IntToPtr:
// We know the input is intptr_t, so this is foldable.
return true;
case Instruction::Add:
return true;
case Instruction::Mul:
case Instruction::Shl:
// Can only handle X*C and X << C.
return isa<ConstantInt>(I->getOperand(1));
case Instruction::GetElementPtr:
return true;
default:
return false;
}
}
/// \brief Check whether or not \p Val is a legal instruction for \p TLI.
/// \note \p Val is assumed to be the product of some type promotion.
/// Therefore if \p Val has an undefined state in \p TLI, this is assumed
/// to be legal, as the non-promoted value would have had the same state.
static bool isPromotedInstructionLegal(const TargetLowering &TLI,
const DataLayout &DL, Value *Val) {
Instruction *PromotedInst = dyn_cast<Instruction>(Val);
if (!PromotedInst)
return false;
int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
// If the ISDOpcode is undefined, it was undefined before the promotion.
if (!ISDOpcode)
return true;
// Otherwise, check if the promoted instruction is legal or not.
return TLI.isOperationLegalOrCustom(
ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
}
/// \brief Hepler class to perform type promotion.
class TypePromotionHelper {
/// \brief Utility function to check whether or not a sign or zero extension
/// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
/// either using the operands of \p Inst or promoting \p Inst.
/// The type of the extension is defined by \p IsSExt.
/// In other words, check if:
/// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
/// #1 Promotion applies:
/// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
/// #2 Operand reuses:
/// ext opnd1 to ConsideredExtType.
/// \p PromotedInsts maps the instructions to their type before promotion.
static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
const InstrToOrigTy &PromotedInsts, bool IsSExt);
/// \brief Utility function to determine if \p OpIdx should be promoted when
/// promoting \p Inst.
static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
if (isa<SelectInst>(Inst) && OpIdx == 0)
return false;
return true;
}
/// \brief Utility function to promote the operand of \p Ext when this
/// operand is a promotable trunc or sext or zext.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p CreatedInstsCost[out] contains the cost of all instructions
/// created to promote the operand of Ext.
/// Newly added extensions are inserted in \p Exts.
/// Newly added truncates are inserted in \p Truncs.
/// Should never be called directly.
/// \return The promoted value which is used instead of Ext.
static Value *promoteOperandForTruncAndAnyExt(
Instruction *Ext, TypePromotionTransaction &TPT,
InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
/// \brief Utility function to promote the operand of \p Ext when this
/// operand is promotable and is not a supported trunc or sext.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p CreatedInstsCost[out] contains the cost of all the instructions
/// created to promote the operand of Ext.
/// Newly added extensions are inserted in \p Exts.
/// Newly added truncates are inserted in \p Truncs.
/// Should never be called directly.
/// \return The promoted value which is used instead of Ext.
static Value *promoteOperandForOther(Instruction *Ext,
TypePromotionTransaction &TPT,
InstrToOrigTy &PromotedInsts,
unsigned &CreatedInstsCost,
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs,
const TargetLowering &TLI, bool IsSExt);
/// \see promoteOperandForOther.
static Value *signExtendOperandForOther(
Instruction *Ext, TypePromotionTransaction &TPT,
InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
Exts, Truncs, TLI, true);
}
/// \see promoteOperandForOther.
static Value *zeroExtendOperandForOther(
Instruction *Ext, TypePromotionTransaction &TPT,
InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
Exts, Truncs, TLI, false);
}
public:
/// Type for the utility function that promotes the operand of Ext.
typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT,
InstrToOrigTy &PromotedInsts,
unsigned &CreatedInstsCost,
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs,
const TargetLowering &TLI);
/// \brief Given a sign/zero extend instruction \p Ext, return the approriate
/// action to promote the operand of \p Ext instead of using Ext.
/// \return NULL if no promotable action is possible with the current
/// sign extension.
/// \p InsertedInsts keeps track of all the instructions inserted by the
/// other CodeGenPrepare optimizations. This information is important
/// because we do not want to promote these instructions as CodeGenPrepare
/// will reinsert them later. Thus creating an infinite loop: create/remove.
/// \p PromotedInsts maps the instructions to their type before promotion.
static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
const TargetLowering &TLI,
const InstrToOrigTy &PromotedInsts);
};
bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
Type *ConsideredExtType,
const InstrToOrigTy &PromotedInsts,
bool IsSExt) {
// The promotion helper does not know how to deal with vector types yet.
// To be able to fix that, we would need to fix the places where we
// statically extend, e.g., constants and such.
if (Inst->getType()->isVectorTy())
return false;
// We can always get through zext.
if (isa<ZExtInst>(Inst))
return true;
// sext(sext) is ok too.
if (IsSExt && isa<SExtInst>(Inst))
return true;
// We can get through binary operator, if it is legal. In other words, the
// binary operator must have a nuw or nsw flag.
const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
(IsSExt && BinOp->hasNoSignedWrap())))
return true;
// Check if we can do the following simplification.
// ext(trunc(opnd)) --> ext(opnd)
if (!isa<TruncInst>(Inst))
return false;
Value *OpndVal = Inst->getOperand(0);
// Check if we can use this operand in the extension.
// If the type is larger than the result type of the extension,
// we cannot.
if (!OpndVal->getType()->isIntegerTy() ||
OpndVal->getType()->getIntegerBitWidth() >
ConsideredExtType->getIntegerBitWidth())
return false;
// If the operand of the truncate is not an instruction, we will not have
// any information on the dropped bits.
// (Actually we could for constant but it is not worth the extra logic).
Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
if (!Opnd)
return false;
// Check if the source of the type is narrow enough.
// I.e., check that trunc just drops extended bits of the same kind of
// the extension.
// #1 get the type of the operand and check the kind of the extended bits.
const Type *OpndType;
InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
if (It != PromotedInsts.end() && It->second.IsSExt == IsSExt)
OpndType = It->second.Ty;
else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
OpndType = Opnd->getOperand(0)->getType();
else
return false;
// #2 check that the truncate just drop extended bits.
if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth())
return true;
return false;
}
TypePromotionHelper::Action TypePromotionHelper::getAction(
Instruction *Ext, const SetOfInstrs &InsertedInsts,
const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
"Unexpected instruction type");
Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
Type *ExtTy = Ext->getType();
bool IsSExt = isa<SExtInst>(Ext);
// If the operand of the extension is not an instruction, we cannot
// get through.
// If it, check we can get through.
if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
return nullptr;
// Do not promote if the operand has been added by codegenprepare.
// Otherwise, it means we are undoing an optimization that is likely to be
// redone, thus causing potential infinite loop.
if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
return nullptr;
// SExt or Trunc instructions.
// Return the related handler.
if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
isa<ZExtInst>(ExtOpnd))
return promoteOperandForTruncAndAnyExt;
// Regular instruction.
// Abort early if we will have to insert non-free instructions.
if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
return nullptr;
return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
}
Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
llvm::Instruction *SExt, TypePromotionTransaction &TPT,
InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
// By construction, the operand of SExt is an instruction. Otherwise we cannot
// get through it and this method should not be called.
Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
Value *ExtVal = SExt;
bool HasMergedNonFreeExt = false;
if (isa<ZExtInst>(SExtOpnd)) {
// Replace s|zext(zext(opnd))
// => zext(opnd).
HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
Value *ZExt =
TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
TPT.replaceAllUsesWith(SExt, ZExt);
TPT.eraseInstruction(SExt);
ExtVal = ZExt;
} else {
// Replace z|sext(trunc(opnd)) or sext(sext(opnd))
// => z|sext(opnd).
TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
}
CreatedInstsCost = 0;
// Remove dead code.
if (SExtOpnd->use_empty())
TPT.eraseInstruction(SExtOpnd);
// Check if the extension is still needed.
Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
if (ExtInst) {
if (Exts)
Exts->push_back(ExtInst);
CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
}
return ExtVal;
}
// At this point we have: ext ty opnd to ty.
// Reassign the uses of ExtInst to the opnd and remove ExtInst.
Value *NextVal = ExtInst->getOperand(0);
TPT.eraseInstruction(ExtInst, NextVal);
return NextVal;
}
Value *TypePromotionHelper::promoteOperandForOther(
Instruction *Ext, TypePromotionTransaction &TPT,
InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
bool IsSExt) {
// By construction, the operand of Ext is an instruction. Otherwise we cannot
// get through it and this method should not be called.
Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
CreatedInstsCost = 0;
if (!ExtOpnd->hasOneUse()) {
// ExtOpnd will be promoted.
// All its uses, but Ext, will need to use a truncated value of the
// promoted version.
// Create the truncate now.
Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
ITrunc->removeFromParent();
// Insert it just after the definition.
ITrunc->insertAfter(ExtOpnd);
if (Truncs)
Truncs->push_back(ITrunc);
}
TPT.replaceAllUsesWith(ExtOpnd, Trunc);
// Restore the operand of Ext (which has been replace by the previous call
// to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
TPT.setOperand(Ext, 0, ExtOpnd);
}
// Get through the Instruction:
// 1. Update its type.
// 2. Replace the uses of Ext by Inst.
// 3. Extend each operand that needs to be extended.
// Remember the original type of the instruction before promotion.
// This is useful to know that the high bits are sign extended bits.
PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>(
ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt)));
// Step #1.
TPT.mutateType(ExtOpnd, Ext->getType());
// Step #2.
TPT.replaceAllUsesWith(Ext, ExtOpnd);
// Step #3.
Instruction *ExtForOpnd = Ext;
DEBUG(dbgs() << "Propagate Ext to operands\n");
for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
++OpIdx) {
DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
!shouldExtOperand(ExtOpnd, OpIdx)) {
DEBUG(dbgs() << "No need to propagate\n");
continue;
}
// Check if we can statically extend the operand.
Value *Opnd = ExtOpnd->getOperand(OpIdx);
if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
DEBUG(dbgs() << "Statically extend\n");
unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
: Cst->getValue().zext(BitWidth);
TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
continue;
}
// UndefValue are typed, so we have to statically sign extend them.
if (isa<UndefValue>(Opnd)) {
DEBUG(dbgs() << "Statically extend\n");
TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
continue;
}
// Otherwise we have to explicity sign extend the operand.
// Check if Ext was reused to extend an operand.
if (!ExtForOpnd) {
// If yes, create a new one.
DEBUG(dbgs() << "More operands to ext\n");
Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
: TPT.createZExt(Ext, Opnd, Ext->getType());
if (!isa<Instruction>(ValForExtOpnd)) {
TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
continue;
}
ExtForOpnd = cast<Instruction>(ValForExtOpnd);
}
if (Exts)
Exts->push_back(ExtForOpnd);
TPT.setOperand(ExtForOpnd, 0, Opnd);
// Move the sign extension before the insertion point.
TPT.moveBefore(ExtForOpnd, ExtOpnd);
TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
// If more sext are required, new instructions will have to be created.
ExtForOpnd = nullptr;
}
if (ExtForOpnd == Ext) {
DEBUG(dbgs() << "Extension is useless now\n");
TPT.eraseInstruction(Ext);
}
return ExtOpnd;
}
/// IsPromotionProfitable - Check whether or not promoting an instruction
/// to a wider type was profitable.
/// \p NewCost gives the cost of extension instructions created by the
/// promotion.
/// \p OldCost gives the cost of extension instructions before the promotion
/// plus the number of instructions that have been
/// matched in the addressing mode the promotion.
/// \p PromotedOperand is the value that has been promoted.
/// \return True if the promotion is profitable, false otherwise.
bool AddressingModeMatcher::IsPromotionProfitable(
unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n');
// The cost of the new extensions is greater than the cost of the
// old extension plus what we folded.
// This is not profitable.
if (NewCost > OldCost)
return false;
if (NewCost < OldCost)
return true;
// The promotion is neutral but it may help folding the sign extension in
// loads for instance.
// Check that we did not create an illegal instruction.
return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
}
/// MatchOperationAddr - Given an instruction or constant expr, see if we can
/// fold the operation into the addressing mode. If so, update the addressing
/// mode and return true, otherwise return false without modifying AddrMode.
/// If \p MovedAway is not NULL, it contains the information of whether or
/// not AddrInst has to be folded into the addressing mode on success.
/// If \p MovedAway == true, \p AddrInst will not be part of the addressing
/// because it has been moved away.
/// Thus AddrInst must not be added in the matched instructions.
/// This state can happen when AddrInst is a sext, since it may be moved away.
/// Therefore, AddrInst may not be valid when MovedAway is true and it must
/// not be referenced anymore.
bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
unsigned Depth,
bool *MovedAway) {
// Avoid exponential behavior on extremely deep expression trees.
if (Depth >= 5) return false;
// By default, all matched instructions stay in place.
if (MovedAway)
*MovedAway = false;
switch (Opcode) {
case Instruction::PtrToInt:
// PtrToInt is always a noop, as we know that the int type is pointer sized.
return MatchAddr(AddrInst->getOperand(0), Depth);
case Instruction::IntToPtr: {
auto AS = AddrInst->getType()->getPointerAddressSpace();
auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
// This inttoptr is a no-op if the integer type is pointer sized.
if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
return MatchAddr(AddrInst->getOperand(0), Depth);
return false;
}
case Instruction::BitCast:
// BitCast is always a noop, and we can handle it as long as it is
// int->int or pointer->pointer (we don't want int<->fp or something).
if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
// Don't touch identity bitcasts. These were probably put here by LSR,
// and we don't want to mess around with them. Assume it knows what it
// is doing.
AddrInst->getOperand(0)->getType() != AddrInst->getType())
return MatchAddr(AddrInst->getOperand(0), Depth);
return false;
case Instruction::AddrSpaceCast: {
unsigned SrcAS
= AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
return MatchAddr(AddrInst->getOperand(0), Depth);
return false;
}
case Instruction::Add: {
// Check to see if we can merge in the RHS then the LHS. If so, we win.
ExtAddrMode BackupAddrMode = AddrMode;
unsigned OldSize = AddrModeInsts.size();
// Start a transaction at this point.
// The LHS may match but not the RHS.
// Therefore, we need a higher level restoration point to undo partially
// matched operation.
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
MatchAddr(AddrInst->getOperand(0), Depth+1))
return true;
// Restore the old addr mode info.
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
TPT.rollback(LastKnownGood);
// Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
MatchAddr(AddrInst->getOperand(1), Depth+1))
return true;
// Otherwise we definitely can't merge the ADD in.
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
TPT.rollback(LastKnownGood);
break;
}
//case Instruction::Or:
// TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
//break;
case Instruction::Mul:
case Instruction::Shl: {
// Can only handle X*C and X << C.
ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
if (!RHS)
return false;
int64_t Scale = RHS->getSExtValue();
if (Opcode == Instruction::Shl)
Scale = 1LL << Scale;
return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
}
case Instruction::GetElementPtr: {
// Scan the GEP. We check it if it contains constant offsets and at most
// one variable offset.
int VariableOperand = -1;
unsigned VariableScale = 0;
int64_t ConstantOffset = 0;
gep_type_iterator GTI = gep_type_begin(AddrInst);
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx =
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
ConstantOffset += SL->getElementOffset(Idx);
} else {
uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
ConstantOffset += CI->getSExtValue()*TypeSize;
} else if (TypeSize) { // Scales of zero don't do anything.
// We only allow one variable index at the moment.
if (VariableOperand != -1)
return false;
// Remember the variable index.
VariableOperand = i;
VariableScale = TypeSize;
}
}
}
// A common case is for the GEP to only do a constant offset. In this case,
// just add it to the disp field and check validity.
if (VariableOperand == -1) {
AddrMode.BaseOffs += ConstantOffset;
if (ConstantOffset == 0 ||
TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
// Check to see if we can fold the base pointer in too.
if (MatchAddr(AddrInst->getOperand(0), Depth+1))
return true;
}
AddrMode.BaseOffs -= ConstantOffset;
return false;
}
// Save the valid addressing mode in case we can't match.
ExtAddrMode BackupAddrMode = AddrMode;
unsigned OldSize = AddrModeInsts.size();
// See if the scale and offset amount is valid for this target.
AddrMode.BaseOffs += ConstantOffset;
// Match the base operand of the GEP.
if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
// If it couldn't be matched, just stuff the value in a register.
if (AddrMode.HasBaseReg) {
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
return false;
}
AddrMode.HasBaseReg = true;
AddrMode.BaseReg = AddrInst->getOperand(0);
}
// Match the remaining variable portion of the GEP.
if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
Depth)) {
// If it couldn't be matched, try stuffing the base into a register
// instead of matching it, and retrying the match of the scale.
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
if (AddrMode.HasBaseReg)
return false;
AddrMode.HasBaseReg = true;
AddrMode.BaseReg = AddrInst->getOperand(0);
AddrMode.BaseOffs += ConstantOffset;
if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
VariableScale, Depth)) {
// If even that didn't work, bail.
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
return false;
}
}
return true;
}
case Instruction::SExt:
case Instruction::ZExt: {
Instruction *Ext = dyn_cast<Instruction>(AddrInst);
if (!Ext)
return false;
// Try to move this ext out of the way of the addressing mode.
// Ask for a method for doing so.
TypePromotionHelper::Action TPH =
TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
if (!TPH)
return false;
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
unsigned CreatedInstsCost = 0;
unsigned ExtCost = !TLI.isExtFree(Ext);
Value *PromotedOperand =
TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
// SExt has been moved away.
// Thus either it will be rematched later in the recursive calls or it is
// gone. Anyway, we must not fold it into the addressing mode at this point.
// E.g.,
// op = add opnd, 1
// idx = ext op
// addr = gep base, idx
// is now:
// promotedOpnd = ext opnd <- no match here
// op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
// addr = gep base, op <- match
if (MovedAway)
*MovedAway = true;
assert(PromotedOperand &&
"TypePromotionHelper should have filtered out those cases");
ExtAddrMode BackupAddrMode = AddrMode;
unsigned OldSize = AddrModeInsts.size();
if (!MatchAddr(PromotedOperand, Depth) ||
// The total of the new cost is equals to the cost of the created
// instructions.
// The total of the old cost is equals to the cost of the extension plus
// what we have saved in the addressing mode.
!IsPromotionProfitable(CreatedInstsCost,
ExtCost + (AddrModeInsts.size() - OldSize),
PromotedOperand)) {
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
TPT.rollback(LastKnownGood);
return false;
}
return true;
}
}
return false;
}
/// MatchAddr - If we can, try to add the value of 'Addr' into the current
/// addressing mode. If Addr can't be added to AddrMode this returns false and
/// leaves AddrMode unmodified. This assumes that Addr is either a pointer type
/// or intptr_t for the target.
///
bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
// Start a transaction at this point that we will rollback if the matching
// fails.
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
// Fold in immediates if legal for the target.
AddrMode.BaseOffs += CI->getSExtValue();
if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.BaseOffs -= CI->getSExtValue();
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
// If this is a global variable, try to fold it into the addressing mode.
if (!AddrMode.BaseGV) {
AddrMode.BaseGV = GV;
if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.BaseGV = nullptr;
}
} else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
ExtAddrMode BackupAddrMode = AddrMode;
unsigned OldSize = AddrModeInsts.size();
// Check to see if it is possible to fold this operation.
bool MovedAway = false;
if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
// This instruction may have been move away. If so, there is nothing
// to check here.
if (MovedAway)
return true;
// Okay, it's possible to fold this. Check to see if it is actually
// *profitable* to do so. We use a simple cost model to avoid increasing
// register pressure too much.
if (I->hasOneUse() ||
IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
AddrModeInsts.push_back(I);
return true;
}
// It isn't profitable to do this, roll back.
//cerr << "NOT FOLDING: " << *I;
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
TPT.rollback(LastKnownGood);
}
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
return true;
TPT.rollback(LastKnownGood);
} else if (isa<ConstantPointerNull>(Addr)) {
// Null pointer gets folded without affecting the addressing mode.
return true;
}
// Worse case, the target should support [reg] addressing modes. :)
if (!AddrMode.HasBaseReg) {
AddrMode.HasBaseReg = true;
AddrMode.BaseReg = Addr;
// Still check for legality in case the target supports [imm] but not [i+r].
if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.HasBaseReg = false;
AddrMode.BaseReg = nullptr;
}
// If the base register is already taken, see if we can do [r+r].
if (AddrMode.Scale == 0) {
AddrMode.Scale = 1;
AddrMode.ScaledReg = Addr;
if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.Scale = 0;
AddrMode.ScaledReg = nullptr;
}
// Couldn't match.
TPT.rollback(LastKnownGood);
return false;
}
/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
/// inline asm call are due to memory operands. If so, return true, otherwise
/// return false.
static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
const TargetMachine &TM) {
const Function *F = CI->getParent()->getParent();
const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering();
const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo();
TargetLowering::AsmOperandInfoVector TargetConstraints =
TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI,
ImmutableCallSite(CI));
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
// Compute the constraint code and ConstraintType to use.
TLI->ComputeConstraintToUse(OpInfo, SDValue());
// If this asm operand is our Value*, and if it isn't an indirect memory
// operand, we can't fold it!
if (OpInfo.CallOperandVal == OpVal &&
(OpInfo.ConstraintType != TargetLowering::C_Memory ||
!OpInfo.isIndirect))
return false;
}
return true;
}
/// FindAllMemoryUses - Recursively walk all the uses of I until we find a
/// memory use. If we find an obviously non-foldable instruction, return true.
/// Add the ultimately found memory instructions to MemoryUses.
static bool FindAllMemoryUses(
Instruction *I,
SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetMachine &TM) {
// If we already considered this instruction, we're done.
if (!ConsideredInsts.insert(I).second)
return false;
// If this is an obviously unfoldable instruction, bail out.
if (!MightBeFoldableInst(I))
return true;
// Loop over all the uses, recursively processing them.
for (Use &U : I->uses()) {
Instruction *UserI = cast<Instruction>(U.getUser());
if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
continue;
}
if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
unsigned opNo = U.getOperandNo();
if (opNo == 0) return true; // Storing addr, not into addr.
MemoryUses.push_back(std::make_pair(SI, opNo));
continue;
}
if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
if (!IA) return true;
// If this is a memory operand, we're cool, otherwise bail out.
if (!IsOperandAMemoryOperand(CI, IA, I, TM))
return true;
continue;
}
if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM))
return true;
}
return false;
}
/// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
/// the use site that we're folding it into. If so, there is no cost to
/// include it in the addressing mode. KnownLive1 and KnownLive2 are two values
/// that we know are live at the instruction already.
bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
Value *KnownLive2) {
// If Val is either of the known-live values, we know it is live!
if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
return true;
// All values other than instructions and arguments (e.g. constants) are live.
if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
// If Val is a constant sized alloca in the entry block, it is live, this is
// true because it is just a reference to the stack/frame pointer, which is
// live for the whole function.
if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
if (AI->isStaticAlloca())
return true;
// Check to see if this value is already used in the memory instruction's
// block. If so, it's already live into the block at the very least, so we
// can reasonably fold it.
return Val->isUsedInBasicBlock(MemoryInst->getParent());
}
/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
/// mode of the machine to fold the specified instruction into a load or store
/// that ultimately uses it. However, the specified instruction has multiple
/// uses. Given this, it may actually increase register pressure to fold it
/// into the load. For example, consider this code:
///
/// X = ...
/// Y = X+1
/// use(Y) -> nonload/store
/// Z = Y+1
/// load Z
///
/// In this case, Y has multiple uses, and can be folded into the load of Z
/// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
/// be live at the use(Y) line. If we don't fold Y into load Z, we use one
/// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
/// number of computations either.
///
/// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
/// X was live across 'load Z' for other reasons, we actually *would* want to
/// fold the addressing mode in the Z case. This would make Y die earlier.
bool AddressingModeMatcher::
IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
ExtAddrMode &AMAfter) {
if (IgnoreProfitability) return true;
// AMBefore is the addressing mode before this instruction was folded into it,
// and AMAfter is the addressing mode after the instruction was folded. Get
// the set of registers referenced by AMAfter and subtract out those
// referenced by AMBefore: this is the set of values which folding in this
// address extends the lifetime of.
//
// Note that there are only two potential values being referenced here,
// BaseReg and ScaleReg (global addresses are always available, as are any
// folded immediates).
Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
// If the BaseReg or ScaledReg was referenced by the previous addrmode, their
// lifetime wasn't extended by adding this instruction.
if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
BaseReg = nullptr;
if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
ScaledReg = nullptr;
// If folding this instruction (and it's subexprs) didn't extend any live
// ranges, we're ok with it.
if (!BaseReg && !ScaledReg)
return true;
// If all uses of this instruction are ultimately load/store/inlineasm's,
// check to see if their addressing modes will include this instruction. If
// so, we can fold it into all uses, so it doesn't matter if it has multiple
// uses.
SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
SmallPtrSet<Instruction*, 16> ConsideredInsts;
if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM))
return false; // Has a non-memory, non-foldable use!
// Now that we know that all uses of this instruction are part of a chain of
// computation involving only operations that could theoretically be folded
// into a memory use, loop over each of these uses and see if they could
// *actually* fold the instruction.
SmallVector<Instruction*, 32> MatchedAddrModeInsts;
for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
Instruction *User = MemoryUses[i].first;
unsigned OpNo = MemoryUses[i].second;
// Get the access type of this use. If the use isn't a pointer, we don't
// know what it accesses.
Value *Address = User->getOperand(OpNo);
PointerType *AddrTy = dyn_cast<PointerType>(Address->getType());
if (!AddrTy)
return false;
Type *AddressAccessTy = AddrTy->getElementType();
unsigned AS = AddrTy->getAddressSpace();
// Do a match against the root of this address, ignoring profitability. This
// will tell us if the addressing mode for the memory operation will
// *actually* cover the shared instruction.
ExtAddrMode Result;
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS,
MemoryInst, Result, InsertedInsts,
PromotedInsts, TPT);
Matcher.IgnoreProfitability = true;
bool Success = Matcher.MatchAddr(Address, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
// The match was to check the profitability, the changes made are not
// part of the original matcher. Therefore, they should be dropped
// otherwise the original matcher will not present the right state.
TPT.rollback(LastKnownGood);
// If the match didn't cover I, then it won't be shared by it.
if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
I) == MatchedAddrModeInsts.end())
return false;
MatchedAddrModeInsts.clear();
}
return true;
}
} // end anonymous namespace
/// IsNonLocalValue - Return true if the specified values are defined in a
/// different basic block than BB.
static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
if (Instruction *I = dyn_cast<Instruction>(V))
return I->getParent() != BB;
return false;
}
/// OptimizeMemoryInst - Load and Store Instructions often have
/// addressing modes that can do significant amounts of computation. As such,
/// instruction selection will try to get the load or store to do as much
/// computation as possible for the program. The problem is that isel can only
/// see within a single block. As such, we sink as much legal addressing mode
/// stuff into the block as possible.
///
/// This method is used to optimize both load/store and inline asms with memory
/// operands.
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Type *AccessTy, unsigned AddrSpace) {
Value *Repl = Addr;
// Try to collapse single-value PHI nodes. This is necessary to undo
// unprofitable PRE transformations.
SmallVector<Value*, 8> worklist;
SmallPtrSet<Value*, 16> Visited;
worklist.push_back(Addr);
// Use a worklist to iteratively look through PHI nodes, and ensure that
// the addressing mode obtained from the non-PHI roots of the graph
// are equivalent.
Value *Consensus = nullptr;
unsigned NumUsesConsensus = 0;
bool IsNumUsesConsensusValid = false;
SmallVector<Instruction*, 16> AddrModeInsts;
ExtAddrMode AddrMode;
TypePromotionTransaction TPT;
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
while (!worklist.empty()) {
Value *V = worklist.back();
worklist.pop_back();
// Break use-def graph loops.
if (!Visited.insert(V).second) {
Consensus = nullptr;
break;
}
// For a PHI node, push all of its incoming values.
if (PHINode *P = dyn_cast<PHINode>(V)) {
for (Value *IncValue : P->incoming_values())
worklist.push_back(IncValue);
continue;
}
// For non-PHIs, determine the addressing mode being computed.
SmallVector<Instruction*, 16> NewAddrModeInsts;
ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM,
InsertedInsts, PromotedInsts, TPT);
// This check is broken into two cases with very similar code to avoid using
// getNumUses() as much as possible. Some values have a lot of uses, so
// calling getNumUses() unconditionally caused a significant compile-time
// regression.
if (!Consensus) {
Consensus = V;
AddrMode = NewAddrMode;
AddrModeInsts = NewAddrModeInsts;
continue;
} else if (NewAddrMode == AddrMode) {
if (!IsNumUsesConsensusValid) {
NumUsesConsensus = Consensus->getNumUses();
IsNumUsesConsensusValid = true;
}
// Ensure that the obtained addressing mode is equivalent to that obtained
// for all other roots of the PHI traversal. Also, when choosing one
// such root as representative, select the one with the most uses in order
// to keep the cost modeling heuristics in AddressingModeMatcher
// applicable.
unsigned NumUses = V->getNumUses();
if (NumUses > NumUsesConsensus) {
Consensus = V;
NumUsesConsensus = NumUses;
AddrModeInsts = NewAddrModeInsts;
}
continue;
}
Consensus = nullptr;
break;
}
// If the addressing mode couldn't be determined, or if multiple different
// ones were determined, bail out now.
if (!Consensus) {
TPT.rollback(LastKnownGood);
return false;
}
TPT.commit();
// Check to see if any of the instructions supersumed by this addr mode are
// non-local to I's BB.
bool AnyNonLocal = false;
for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
AnyNonLocal = true;
break;
}
}
// If all the instructions matched are already in this BB, don't do anything.
if (!AnyNonLocal) {
DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n");
return false;
}
// Insert this computation right after this user. Since our caller is
// scanning from the top of the BB to the bottom, reuse of the expr are
// guaranteed to happen later.
IRBuilder<> Builder(MemoryInst);
// Now that we determined the addressing expression we want to use and know
// that we have to sink it into this block. Check to see if we have already
// done this for some other load/store instr in this block. If so, reuse the
// computation.
Value *&SunkAddr = SunkAddrs[Addr];
if (SunkAddr) {
DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst << "\n");
if (SunkAddr->getType() != Addr->getType())
SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
} else if (AddrSinkUsingGEPs ||
(!AddrSinkUsingGEPs.getNumOccurrences() && TM &&
TM->getSubtargetImpl(*MemoryInst->getParent()->getParent())
->useAA())) {
// By default, we use the GEP-based method when AA is used later. This
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst << "\n");
Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *ResultPtr = nullptr, *ResultIndex = nullptr;
// First, find the pointer.
if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
ResultPtr = AddrMode.BaseReg;
AddrMode.BaseReg = nullptr;
}
if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
// We can't add more than one pointer together, nor can we scale a
// pointer (both of which seem meaningless).
if (ResultPtr || AddrMode.Scale != 1)
return false;
ResultPtr = AddrMode.ScaledReg;
AddrMode.Scale = 0;
}
if (AddrMode.BaseGV) {
if (ResultPtr)
return false;
ResultPtr = AddrMode.BaseGV;
}
// If the real base value actually came from an inttoptr, then the matcher
// will look through it and provide only the integer value. In that case,
// use it here.
if (!ResultPtr && AddrMode.BaseReg) {
ResultPtr =
Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr");
AddrMode.BaseReg = nullptr;
} else if (!ResultPtr && AddrMode.Scale == 1) {
ResultPtr =
Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr");
AddrMode.Scale = 0;
}
if (!ResultPtr &&
!AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
SunkAddr = Constant::getNullValue(Addr->getType());
} else if (!ResultPtr) {
return false;
} else {
Type *I8PtrTy =
Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
Type *I8Ty = Builder.getInt8Ty();
// Start with the base register. Do this first so that subsequent address
// matching finds it last, which will prevent it from trying to match it
// as the scaled value in case it happens to be a mul. That would be
// problematic if we've sunk a different mul for the scale, because then
// we'd end up sinking both muls.
if (AddrMode.BaseReg) {
Value *V = AddrMode.BaseReg;
if (V->getType() != IntPtrTy)
V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
ResultIndex = V;
}
// Add the scale value.
if (AddrMode.Scale) {
Value *V = AddrMode.ScaledReg;
if (V->getType() == IntPtrTy) {
// done.
} else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
cast<IntegerType>(V->getType())->getBitWidth()) {
V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
} else {
// It is only safe to sign extend the BaseReg if we know that the math
// required to create it did not overflow before we extend it. Since
// the original IR value was tossed in favor of a constant back when
// the AddrMode was created we need to bail out gracefully if widths
// do not match instead of extending it.
Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex);
if (I && (ResultIndex != AddrMode.BaseReg))
I->eraseFromParent();
return false;
}
if (AddrMode.Scale != 1)
V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
"sunkaddr");
if (ResultIndex)
ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
else
ResultIndex = V;
}
// Add in the Base Offset if present.
if (AddrMode.BaseOffs) {
Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
if (ResultIndex) {
// We need to add this separately from the scale above to help with
// SDAG consecutive load/store merging.
if (ResultPtr->getType() != I8PtrTy)
ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
}
ResultIndex = V;
}
if (!ResultIndex) {
SunkAddr = ResultPtr;
} else {
if (ResultPtr->getType() != I8PtrTy)
ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
}
if (SunkAddr->getType() != Addr->getType())
SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
}
} else {
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst << "\n");
Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *Result = nullptr;
// Start with the base register. Do this first so that subsequent address
// matching finds it last, which will prevent it from trying to match it
// as the scaled value in case it happens to be a mul. That would be
// problematic if we've sunk a different mul for the scale, because then
// we'd end up sinking both muls.
if (AddrMode.BaseReg) {
Value *V = AddrMode.BaseReg;
if (V->getType()->isPointerTy())
V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
if (V->getType() != IntPtrTy)
V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
Result = V;
}
// Add the scale value.
if (AddrMode.Scale) {
Value *V = AddrMode.ScaledReg;
if (V->getType() == IntPtrTy) {
// done.
} else if (V->getType()->isPointerTy()) {
V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
} else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
cast<IntegerType>(V->getType())->getBitWidth()) {
V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
} else {
// It is only safe to sign extend the BaseReg if we know that the math
// required to create it did not overflow before we extend it. Since
// the original IR value was tossed in favor of a constant back when
// the AddrMode was created we need to bail out gracefully if widths
// do not match instead of extending it.
Instruction *I = dyn_cast_or_null<Instruction>(Result);
if (I && (Result != AddrMode.BaseReg))
I->eraseFromParent();
return false;
}
if (AddrMode.Scale != 1)
V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
"sunkaddr");
if (Result)
Result = Builder.CreateAdd(Result, V, "sunkaddr");
else
Result = V;
}
// Add in the BaseGV if present.
if (AddrMode.BaseGV) {
Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
if (Result)
Result = Builder.CreateAdd(Result, V, "sunkaddr");
else
Result = V;
}
// Add in the Base Offset if present.
if (AddrMode.BaseOffs) {
Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
if (Result)
Result = Builder.CreateAdd(Result, V, "sunkaddr");
else
Result = V;
}
if (!Result)
SunkAddr = Constant::getNullValue(Addr->getType());
else
SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
}
MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
// If we have no uses, recursively delete the value and all dead instructions
// using it.
if (Repl->use_empty()) {
// This can cause recursive deletion, which can invalidate our iterator.
// Use a WeakTrackingVH to hold onto it in case this happens.
WeakTrackingVH IterHandle(CurInstIterator);
BasicBlock *BB = CurInstIterator->getParent();
RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
if (IterHandle != CurInstIterator) {
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
CurInstIterator = BB->begin();
SunkAddrs.clear();
}
}
++NumMemoryInsts;
return true;
}
/// OptimizeInlineAsmInst - If there are any memory operands, use
/// OptimizeMemoryInst to sink their address computing into the block when
/// possible / profitable.
bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
bool MadeChange = false;
const TargetRegisterInfo *TRI =
TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo();
TargetLowering::AsmOperandInfoVector TargetConstraints =
TLI->ParseConstraints(*DL, TRI, CS);
unsigned ArgNo = 0;
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
// Compute the constraint code and ConstraintType to use.
TLI->ComputeConstraintToUse(OpInfo, SDValue());
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
OpInfo.isIndirect) {
Value *OpVal = CS->getArgOperand(ArgNo++);
MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
} else if (OpInfo.Type == InlineAsm::isInput)
ArgNo++;
}
return MadeChange;
}
/// \brief Check if all the uses of \p Inst are equivalent (or free) zero or
/// sign extensions.
static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) {
assert(!Inst->use_empty() && "Input must have at least one use");
const Instruction *FirstUser = cast<Instruction>(*Inst->user_begin());
bool IsSExt = isa<SExtInst>(FirstUser);
Type *ExtTy = FirstUser->getType();
for (const User *U : Inst->users()) {
const Instruction *UI = cast<Instruction>(U);
if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
return false;
Type *CurTy = UI->getType();
// Same input and output types: Same instruction after CSE.
if (CurTy == ExtTy)
continue;
// If IsSExt is true, we are in this situation:
// a = Inst
// b = sext ty1 a to ty2
// c = sext ty1 a to ty3
// Assuming ty2 is shorter than ty3, this could be turned into:
// a = Inst
// b = sext ty1 a to ty2
// c = sext ty2 b to ty3
// However, the last sext is not free.
if (IsSExt)
return false;
// This is a ZExt, maybe this is free to extend from one type to another.
// In that case, we would not account for a different use.
Type *NarrowTy;
Type *LargeTy;
if (ExtTy->getScalarType()->getIntegerBitWidth() >
CurTy->getScalarType()->getIntegerBitWidth()) {
NarrowTy = CurTy;
LargeTy = ExtTy;
} else {
NarrowTy = ExtTy;
LargeTy = CurTy;
}
if (!TLI.isZExtFree(NarrowTy, LargeTy))
return false;
}
// All uses are the same or can be derived from one another for free.
return true;
}
/// \brief Try to form ExtLd by promoting \p Exts until they reach a
/// load instruction.
/// If an ext(load) can be formed, it is returned via \p LI for the load
/// and \p Inst for the extension.
/// Otherwise LI == nullptr and Inst == nullptr.
/// When some promotion happened, \p TPT contains the proper state to
/// revert them.
///
/// \return true when promoting was necessary to expose the ext(load)
/// opportunity, false otherwise.
///
/// Example:
/// \code
/// %ld = load i32* %addr
/// %add = add nuw i32 %ld, 4
/// %zext = zext i32 %add to i64
/// \endcode
/// =>
/// \code
/// %ld = load i32* %addr
/// %zext = zext i32 %ld to i64
/// %add = add nuw i64 %zext, 4
/// \encode
/// Thanks to the promotion, we can match zext(load i32*) to i64.
bool CodeGenPrepare::ExtLdPromotion(TypePromotionTransaction &TPT,
LoadInst *&LI, Instruction *&Inst,
const SmallVectorImpl<Instruction *> &Exts,
unsigned CreatedInstsCost = 0) {
// Iterate over all the extensions to see if one form an ext(load).
for (auto I : Exts) {
// Check if we directly have ext(load).
if ((LI = dyn_cast<LoadInst>(I->getOperand(0)))) {
Inst = I;
// No promotion happened here.
return false;
}
// Check whether or not we want to do any promotion.
if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion)
continue;
// Get the action to perform the promotion.
TypePromotionHelper::Action TPH = TypePromotionHelper::getAction(
I, InsertedInsts, *TLI, PromotedInsts);
// Check if we can promote.
if (!TPH)
continue;
// Save the current state.
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
SmallVector<Instruction *, 4> NewExts;
unsigned NewCreatedInstsCost = 0;
unsigned ExtCost = !TLI->isExtFree(I);
// Promote.
Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
&NewExts, nullptr, *TLI);
assert(PromotedVal &&
"TypePromotionHelper should have filtered out those cases");
// We would be able to merge only one extension in a load.
// Therefore, if we have more than 1 new extension we heuristically
// cut this search path, because it means we degrade the code quality.
// With exactly 2, the transformation is neutral, because we will merge
// one extension but leave one. However, we optimistically keep going,
// because the new extension may be removed too.
long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
TotalCreatedInstsCost -= ExtCost;
if (!StressExtLdPromotion &&
(TotalCreatedInstsCost > 1 ||
!isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
// The promotion is not profitable, rollback to the previous state.
TPT.rollback(LastKnownGood);
continue;
}
// The promotion is profitable.
// Check if it exposes an ext(load).
(void)ExtLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInstsCost);
if (LI && (StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
// If we have created a new extension, i.e., now we have two
// extensions. We must make sure one of them is merged with
// the load, otherwise we may degrade the code quality.
(LI->hasOneUse() || hasSameExtUse(LI, *TLI))))
// Promotion happened.
return true;
// If this does not help to expose an ext(load) then, rollback.
TPT.rollback(LastKnownGood);
}
// None of the extension can form an ext(load).
LI = nullptr;
Inst = nullptr;
return false;
}
/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same
/// basic block as the load, unless conditions are unfavorable. This allows
/// SelectionDAG to fold the extend into the load.
/// \p I[in/out] the extension may be modified during the process if some
/// promotions apply.
///
bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *&I) {
// Try to promote a chain of computation if it allows to form
// an extended load.
TypePromotionTransaction TPT;
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
SmallVector<Instruction *, 1> Exts;
Exts.push_back(I);
// Look for a load being extended.
LoadInst *LI = nullptr;
Instruction *OldExt = I;
bool HasPromoted = ExtLdPromotion(TPT, LI, I, Exts);
if (!LI || !I) {
assert(!HasPromoted && !LI && "If we did not match any load instruction "
"the code must remain the same");
I = OldExt;
return false;
}
// If they're already in the same block, there's nothing to do.
// Make the cheap checks first if we did not promote.
// If we promoted, we need to check if it is indeed profitable.
if (!HasPromoted && LI->getParent() == I->getParent())
return false;
EVT VT = TLI->getValueType(*DL, I->getType());
EVT LoadVT = TLI->getValueType(*DL, LI->getType());
// If the load has other users and the truncate is not free, this probably
// isn't worthwhile.
if (!LI->hasOneUse() && TLI &&
(TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) &&
!TLI->isTruncateFree(I->getType(), LI->getType())) {
I = OldExt;
TPT.rollback(LastKnownGood);
return false;
}
// Check whether the target supports casts folded into loads.
unsigned LType;
if (isa<ZExtInst>(I))
LType = ISD::ZEXTLOAD;
else {
assert(isa<SExtInst>(I) && "Unexpected ext type!");
LType = ISD::SEXTLOAD;
}
if (TLI && !TLI->isLoadExtLegal(LType, VT, LoadVT)) {
I = OldExt;
TPT.rollback(LastKnownGood);
return false;
}
// Move the extend into the same block as the load, so that SelectionDAG
// can fold it.
TPT.commit();
I->removeFromParent();
I->insertAfter(LI);
++NumExtsMoved;
return true;
}
bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
BasicBlock *DefBB = I->getParent();
// If the result of a {s|z}ext and its source are both live out, rewrite all
// other uses of the source with result of extension.
Value *Src = I->getOperand(0);
if (Src->hasOneUse())
return false;
// Only do this xform if truncating is free.
if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
return false;
// Only safe to perform the optimization if the source is also defined in
// this block.
if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
return false;
bool DefIsLiveOut = false;
for (User *U : I->users()) {
Instruction *UI = cast<Instruction>(U);
// Figure out which BB this ext is used in.
BasicBlock *UserBB = UI->getParent();
if (UserBB == DefBB) continue;
DefIsLiveOut = true;
break;
}
if (!DefIsLiveOut)
return false;
// Make sure none of the uses are PHI nodes.
for (User *U : Src->users()) {
Instruction *UI = cast<Instruction>(U);
BasicBlock *UserBB = UI->getParent();
if (UserBB == DefBB) continue;
// Be conservative. We don't want this xform to end up introducing
// reloads just before load / store instructions.
if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
return false;
}
// InsertedTruncs - Only insert one trunc in each block once.
DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
bool MadeChange = false;
for (Use &U : Src->uses()) {
Instruction *User = cast<Instruction>(U.getUser());
// Figure out which BB this ext is used in.
BasicBlock *UserBB = User->getParent();
if (UserBB == DefBB) continue;
// Both src and def are live in this block. Rewrite the use.
Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
if (!InsertedTrunc) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
InsertedInsts.insert(InsertedTrunc);
}
// Replace a use of the {s|z}ext source with a use of the result.
U = InsertedTrunc;
++NumExtUses;
MadeChange = true;
}
return MadeChange;
}
/// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be
/// turned into an explicit branch.
static bool isFormingBranchFromSelectProfitable(SelectInst *SI) {
// FIXME: This should use the same heuristics as IfConversion to determine
// whether a select is better represented as a branch. This requires that
// branch probability metadata is preserved for the select, which is not the
// case currently.
CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
// If the branch is predicted right, an out of order CPU can avoid blocking on
// the compare. Emit cmovs on compares with a memory operand as branches to
// avoid stalls on the load from memory. If the compare has more than one use
// there's probably another cmov or setcc around so it's not worth emitting a
// branch.
if (!Cmp)
return false;
Value *CmpOp0 = Cmp->getOperand(0);
Value *CmpOp1 = Cmp->getOperand(1);
// We check that the memory operand has one use to avoid uses of the loaded
// value directly after the compare, making branches unprofitable.
return Cmp->hasOneUse() &&
((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) ||
(isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse()));
}
/// If we have a SelectInst that will likely profit from branch prediction,
/// turn it into a branch.
bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
// Can we convert the 'select' to CF ?
if (DisableSelectToBranch || OptSize || !TLI || VectorCond)
return false;
TargetLowering::SelectSupportKind SelectKind;
if (VectorCond)
SelectKind = TargetLowering::VectorMaskSelect;
else if (SI->getType()->isVectorTy())
SelectKind = TargetLowering::ScalarCondVectorVal;
else
SelectKind = TargetLowering::ScalarValSelect;
// Do we have efficient codegen support for this kind of 'selects' ?
if (TLI->isSelectSupported(SelectKind)) {
// We have efficient codegen support for the select instruction.
// Check if it is profitable to keep this 'select'.
if (!TLI->isPredictableSelectExpensive() ||
!isFormingBranchFromSelectProfitable(SI))
return false;
}
ModifiedDT = true;
// First, we split the block containing the select into 2 blocks.
BasicBlock *StartBlock = SI->getParent();
BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI));
BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
// Create a new block serving as the landing pad for the branch.
BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid",
NextBlock->getParent(), NextBlock);
// Move the unconditional branch from the block with the select in it into our
// landing pad block.
StartBlock->getTerminator()->eraseFromParent();
BranchInst::Create(NextBlock, SmallBlock);
// Insert the real conditional branch based on the original condition.
BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI);
// The select itself is replaced with a PHI Node.
PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin());
PN->takeName(SI);
PN->addIncoming(SI->getTrueValue(), StartBlock);
PN->addIncoming(SI->getFalseValue(), SmallBlock);
SI->replaceAllUsesWith(PN);
SI->eraseFromParent();
// Instruct OptimizeBlock to skip to the next block.
CurInstIterator = StartBlock->end();
++NumSelectsExpanded;
return true;
}
static bool isBroadcastShuffle(ShuffleVectorInst *SVI) {
SmallVector<int, 16> Mask(SVI->getShuffleMask());
int SplatElem = -1;
for (unsigned i = 0; i < Mask.size(); ++i) {
if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem)
return false;
SplatElem = Mask[i];
}
return true;
}
/// Some targets have expensive vector shifts if the lanes aren't all the same
/// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
/// it's often worth sinking a shufflevector splat down to its use so that
/// codegen can spot all lanes are identical.
bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
BasicBlock *DefBB = SVI->getParent();
// Only do this xform if variable vector shifts are particularly expensive.
if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
return false;
// We only expect better codegen by sinking a shuffle if we can recognise a
// constant splat.
if (!isBroadcastShuffle(SVI))
return false;
// InsertedShuffles - Only insert a shuffle in each block once.
DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
bool MadeChange = false;
for (User *U : SVI->users()) {
Instruction *UI = cast<Instruction>(U);
// Figure out which BB this ext is used in.
BasicBlock *UserBB = UI->getParent();
if (UserBB == DefBB) continue;
// For now only apply this when the splat is used by a shift instruction.
if (!UI->isShift()) continue;
// Everything checks out, sink the shuffle if the user's block doesn't
// already have a copy.
Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
if (!InsertedShuffle) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0),
SVI->getOperand(1),
SVI->getOperand(2), "", InsertPt);
}
UI->replaceUsesOfWith(SVI, InsertedShuffle);
MadeChange = true;
}
// If we removed all uses, nuke the shuffle.
if (SVI->use_empty()) {
SVI->eraseFromParent();
MadeChange = true;
}
return MadeChange;
}
namespace {
/// \brief Helper class to promote a scalar operation to a vector one.
/// This class is used to move downward extractelement transition.
/// E.g.,
/// a = vector_op <2 x i32>
/// b = extractelement <2 x i32> a, i32 0
/// c = scalar_op b
/// store c
///
/// =>
/// a = vector_op <2 x i32>
/// c = vector_op a (equivalent to scalar_op on the related lane)
/// * d = extractelement <2 x i32> c, i32 0
/// * store d
/// Assuming both extractelement and store can be combine, we get rid of the
/// transition.
class VectorPromoteHelper {
/// DataLayout associated with the current module.
const DataLayout &DL;
/// Used to perform some checks on the legality of vector operations.
const TargetLowering &TLI;
/// Used to estimated the cost of the promoted chain.
const TargetTransformInfo &TTI;
/// The transition being moved downwards.
Instruction *Transition;
/// The sequence of instructions to be promoted.
SmallVector<Instruction *, 4> InstsToBePromoted;
/// Cost of combining a store and an extract.
unsigned StoreExtractCombineCost;
/// Instruction that will be combined with the transition.
Instruction *CombineInst;
/// \brief The instruction that represents the current end of the transition.
/// Since we are faking the promotion until we reach the end of the chain
/// of computation, we need a way to get the current end of the transition.
Instruction *getEndOfTransition() const {
if (InstsToBePromoted.empty())
return Transition;
return InstsToBePromoted.back();
}
/// \brief Return the index of the original value in the transition.
/// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
/// c, is at index 0.
unsigned getTransitionOriginalValueIdx() const {
assert(isa<ExtractElementInst>(Transition) &&
"Other kind of transitions are not supported yet");
return 0;
}
/// \brief Return the index of the index in the transition.
/// E.g., for "extractelement <2 x i32> c, i32 0" the index
/// is at index 1.
unsigned getTransitionIdx() const {
assert(isa<ExtractElementInst>(Transition) &&
"Other kind of transitions are not supported yet");
return 1;
}
/// \brief Get the type of the transition.
/// This is the type of the original value.
/// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
/// transition is <2 x i32>.
Type *getTransitionType() const {
return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
}
/// \brief Promote \p ToBePromoted by moving \p Def downward through.
/// I.e., we have the following sequence:
/// Def = Transition <ty1> a to <ty2>
/// b = ToBePromoted <ty2> Def, ...
/// =>
/// b = ToBePromoted <ty1> a, ...
/// Def = Transition <ty1> ToBePromoted to <ty2>
void promoteImpl(Instruction *ToBePromoted);
/// \brief Check whether or not it is profitable to promote all the
/// instructions enqueued to be promoted.
bool isProfitableToPromote() {
Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
unsigned Index = isa<ConstantInt>(ValIdx)
? cast<ConstantInt>(ValIdx)->getZExtValue()
: -1;
Type *PromotedType = getTransitionType();
StoreInst *ST = cast<StoreInst>(CombineInst);
unsigned AS = ST->getPointerAddressSpace();
unsigned Align = ST->getAlignment();
// Check if this store is supported.
if (!TLI.allowsMisalignedMemoryAccesses(
TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
Align)) {
// If this is not supported, there is no way we can combine
// the extract with the store.
return false;
}
// The scalar chain of computation has to pay for the transition
// scalar to vector.
// The vector chain has to account for the combining cost.
uint64_t ScalarCost =
TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index);
uint64_t VectorCost = StoreExtractCombineCost;
for (const auto &Inst : InstsToBePromoted) {
// Compute the cost.
// By construction, all instructions being promoted are arithmetic ones.
// Moreover, one argument is a constant that can be viewed as a splat
// constant.
Value *Arg0 = Inst->getOperand(0);
bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
isa<ConstantFP>(Arg0);
TargetTransformInfo::OperandValueKind Arg0OVK =
IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
: TargetTransformInfo::OK_AnyValue;
TargetTransformInfo::OperandValueKind Arg1OVK =
!IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
: TargetTransformInfo::OK_AnyValue;
ScalarCost += TTI.getArithmeticInstrCost(
Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK);
VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
Arg0OVK, Arg1OVK);
}
DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
<< ScalarCost << "\nVector: " << VectorCost << '\n');
return ScalarCost > VectorCost;
}
/// \brief Generate a constant vector with \p Val with the same
/// number of elements as the transition.
/// \p UseSplat defines whether or not \p Val should be replicated
/// accross the whole vector.
/// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
/// otherwise we generate a vector with as many undef as possible:
/// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
/// used at the index of the extract.
Value *getConstantVector(Constant *Val, bool UseSplat) const {
unsigned ExtractIdx = UINT_MAX;
if (!UseSplat) {
// If we cannot determine where the constant must be, we have to
// use a splat constant.
Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
ExtractIdx = CstVal->getSExtValue();
else
UseSplat = true;
}
unsigned End = getTransitionType()->getVectorNumElements();
if (UseSplat)
return ConstantVector::getSplat(End, Val);
SmallVector<Constant *, 4> ConstVec;
UndefValue *UndefVal = UndefValue::get(Val->getType());
for (unsigned Idx = 0; Idx != End; ++Idx) {
if (Idx == ExtractIdx)
ConstVec.push_back(Val);
else
ConstVec.push_back(UndefVal);
}
return ConstantVector::get(ConstVec);
}
/// \brief Check if promoting to a vector type an operand at \p OperandIdx
/// in \p Use can trigger undefined behavior.
static bool canCauseUndefinedBehavior(const Instruction *Use,
unsigned OperandIdx) {
// This is not safe to introduce undef when the operand is on
// the right hand side of a division-like instruction.
if (OperandIdx != 1)
return false;
switch (Use->getOpcode()) {
default:
return false;
case Instruction::SDiv:
case Instruction::UDiv:
case Instruction::SRem:
case Instruction::URem:
return true;
case Instruction::FDiv:
case Instruction::FRem:
return !Use->hasNoNaNs();
}
llvm_unreachable(nullptr);
}
public:
VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
const TargetTransformInfo &TTI, Instruction *Transition,
unsigned CombineCost)
: DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
StoreExtractCombineCost(CombineCost), CombineInst(nullptr) {
assert(Transition && "Do not know how to promote null");
}
/// \brief Check if we can promote \p ToBePromoted to \p Type.
bool canPromote(const Instruction *ToBePromoted) const {
// We could support CastInst too.
return isa<BinaryOperator>(ToBePromoted);
}
/// \brief Check if it is profitable to promote \p ToBePromoted
/// by moving downward the transition through.
bool shouldPromote(const Instruction *ToBePromoted) const {
// Promote only if all the operands can be statically expanded.
// Indeed, we do not want to introduce any new kind of transitions.
for (const Use &U : ToBePromoted->operands()) {
const Value *Val = U.get();
if (Val == getEndOfTransition()) {
// If the use is a division and the transition is on the rhs,
// we cannot promote the operation, otherwise we may create a
// division by zero.
if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
return false;
continue;
}
if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
!isa<ConstantFP>(Val))
return false;
}
// Check that the resulting operation is legal.
int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
if (!ISDOpcode)
return false;
return StressStoreExtract ||
TLI.isOperationLegalOrCustom(
ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
}
/// \brief Check whether or not \p Use can be combined
/// with the transition.
/// I.e., is it possible to do Use(Transition) => AnotherUse?
bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
/// \brief Record \p ToBePromoted as part of the chain to be promoted.
void enqueueForPromotion(Instruction *ToBePromoted) {
InstsToBePromoted.push_back(ToBePromoted);
}
/// \brief Set the instruction that will be combined with the transition.
void recordCombineInstruction(Instruction *ToBeCombined) {
assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
CombineInst = ToBeCombined;
}
/// \brief Promote all the instructions enqueued for promotion if it is
/// is profitable.
/// \return True if the promotion happened, false otherwise.
bool promote() {
// Check if there is something to promote.
// Right now, if we do not have anything to combine with,
// we assume the promotion is not profitable.
if (InstsToBePromoted.empty() || !CombineInst)
return false;
// Check cost.
if (!StressStoreExtract && !isProfitableToPromote())
return false;
// Promote.
for (auto &ToBePromoted : InstsToBePromoted)
promoteImpl(ToBePromoted);
InstsToBePromoted.clear();
return true;
}
};
} // End of anonymous namespace.
void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
// At this point, we know that all the operands of ToBePromoted but Def
// can be statically promoted.
// For Def, we need to use its parameter in ToBePromoted:
// b = ToBePromoted ty1 a
// Def = Transition ty1 b to ty2
// Move the transition down.
// 1. Replace all uses of the promoted operation by the transition.
// = ... b => = ... Def.
assert(ToBePromoted->getType() == Transition->getType() &&
"The type of the result of the transition does not match "
"the final type");
ToBePromoted->replaceAllUsesWith(Transition);
// 2. Update the type of the uses.
// b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
Type *TransitionTy = getTransitionType();
ToBePromoted->mutateType(TransitionTy);
// 3. Update all the operands of the promoted operation with promoted
// operands.
// b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
for (Use &U : ToBePromoted->operands()) {
Value *Val = U.get();
Value *NewVal = nullptr;
if (Val == Transition)
NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
isa<ConstantFP>(Val)) {
// Use a splat constant if it is not safe to use undef.
NewVal = getConstantVector(
cast<Constant>(Val),
isa<UndefValue>(Val) ||
canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
} else
llvm_unreachable("Did you modified shouldPromote and forgot to update "
"this?");
ToBePromoted->setOperand(U.getOperandNo(), NewVal);
}
Transition->removeFromParent();
Transition->insertAfter(ToBePromoted);
Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
}
/// Some targets can do store(extractelement) with one instruction.
/// Try to push the extractelement towards the stores when the target
/// has this feature and this is profitable.
bool CodeGenPrepare::OptimizeExtractElementInst(Instruction *Inst) {
unsigned CombineCost = UINT_MAX;
if (DisableStoreExtract || !TLI ||
(!StressStoreExtract &&
!TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
Inst->getOperand(1), CombineCost)))
return false;
// At this point we know that Inst is a vector to scalar transition.
// Try to move it down the def-use chain, until:
// - We can combine the transition with its single use
// => we got rid of the transition.
// - We escape the current basic block
// => we would need to check that we are moving it at a cheaper place and
// we do not do that for now.
BasicBlock *Parent = Inst->getParent();
DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
// If the transition has more than one use, assume this is not going to be
// beneficial.
while (Inst->hasOneUse()) {
Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
if (ToBePromoted->getParent() != Parent) {
DEBUG(dbgs() << "Instruction to promote is in a different block ("
<< ToBePromoted->getParent()->getName()
<< ") than the transition (" << Parent->getName() << ").\n");
return false;
}
if (VPH.canCombine(ToBePromoted)) {
DEBUG(dbgs() << "Assume " << *Inst << '\n'
<< "will be combined with: " << *ToBePromoted << '\n');
VPH.recordCombineInstruction(ToBePromoted);
bool Changed = VPH.promote();
NumStoreExtractExposed += Changed;
return Changed;
}
DEBUG(dbgs() << "Try promoting.\n");
if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
return false;
DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
VPH.enqueueForPromotion(ToBePromoted);
Inst = ToBePromoted;
}
return false;
}
bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) {
// Bail out if we inserted the instruction to prevent optimizations from
// stepping on each other's toes.
if (InsertedInsts.count(I))
return false;
if (PHINode *P = dyn_cast<PHINode>(I)) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
// trivial PHI, go ahead and zap it here.
if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
++NumPHIsElim;
return true;
}
return false;
}
if (CastInst *CI = dyn_cast<CastInst>(I)) {
// If the source of the cast is a constant, then this should have
// already been constant folded. The only reason NOT to constant fold
// it is if something (e.g. LSR) was careful to place the constant
// evaluation in a block other than then one that uses it (e.g. to hoist
// the address of globals out of a loop). If this is the case, we don't
// want to forward-subst the cast.
if (isa<Constant>(CI->getOperand(0)))
return false;
if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
return true;
if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
/// Sink a zext or sext into its user blocks if the target type doesn't
/// fit in one register
if (TLI &&
TLI->getTypeAction(CI->getContext(),
TLI->getValueType(*DL, CI->getType())) ==
TargetLowering::TypeExpandInteger) {
return SinkCast(CI);
} else {
bool MadeChange = MoveExtToFormExtLoad(I);
return MadeChange | OptimizeExtUses(I);
}
}
return false;
}
if (CmpInst *CI = dyn_cast<CmpInst>(I))
if (!TLI || !TLI->hasMultipleConditionRegisters())
return OptimizeCmpExpression(CI);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (TLI) {
unsigned AS = LI->getPointerAddressSpace();
return OptimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
}
return false;
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (TLI) {
unsigned AS = SI->getPointerAddressSpace();
return OptimizeMemoryInst(I, SI->getOperand(1),
SI->getOperand(0)->getType(), AS);
}
return false;
}
BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
BinOp->getOpcode() == Instruction::LShr)) {
ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
if (TLI && CI && TLI->hasExtractBitsInsn())
return OptimizeExtractBits(BinOp, CI, *TLI, *DL);
return false;
}
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
if (GEPI->hasAllZeroIndices()) {
/// The GEP operand must be a pointer, so must its result -> BitCast
Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
GEPI->getName(), GEPI);
GEPI->replaceAllUsesWith(NC);
GEPI->eraseFromParent();
++NumGEPsElim;
OptimizeInst(NC, ModifiedDT);
return true;
}
return false;
}
if (CallInst *CI = dyn_cast<CallInst>(I))
return OptimizeCallInst(CI, ModifiedDT);
if (SelectInst *SI = dyn_cast<SelectInst>(I))
return OptimizeSelectInst(SI);
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
return OptimizeShuffleVectorInst(SVI);
if (isa<ExtractElementInst>(I))
return OptimizeExtractElementInst(I);
return false;
}
// In this pass we look for GEP and cast instructions that are used
// across basic blocks and rewrite them to improve basic-block-at-a-time
// selection.
bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB, bool& ModifiedDT) {
SunkAddrs.clear();
bool MadeChange = false;
CurInstIterator = BB.begin();
while (CurInstIterator != BB.end()) {
MadeChange |= OptimizeInst(CurInstIterator++, ModifiedDT);
if (ModifiedDT)
return true;
}
MadeChange |= DupRetToEnableTailCallOpts(&BB);
return MadeChange;
}
// llvm.dbg.value is far away from the value then iSel may not be able
// handle it properly. iSel will drop llvm.dbg.value if it can not
// find a node corresponding to the value.
bool CodeGenPrepare::PlaceDbgValues(Function &F) {
bool MadeChange = false;
for (BasicBlock &BB : F) {
Instruction *PrevNonDbgInst = nullptr;
for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
Instruction *Insn = BI++;
DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
// Leave dbg.values that refer to an alloca alone. These
// instrinsics describe the address of a variable (= the alloca)
// being taken. They should not be moved next to the alloca
// (and to the beginning of the scope), but rather stay close to
// where said address is used.
if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) {
PrevNonDbgInst = Insn;
continue;
}
Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
DVI->removeFromParent();
if (isa<PHINode>(VI))
DVI->insertBefore(VI->getParent()->getFirstInsertionPt());
else
DVI->insertAfter(VI);
MadeChange = true;
++NumDbgValueMoved;
}
}
}
return MadeChange;
}
// If there is a sequence that branches based on comparing a single bit
// against zero that can be combined into a single instruction, and the
// target supports folding these into a single instruction, sink the
// mask and compare into the branch uses. Do this before OptimizeBlock ->
// OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being
// searched for.
bool CodeGenPrepare::sinkAndCmp(Function &F) {
if (!EnableAndCmpSinking)
return false;
if (!TLI || !TLI->isMaskAndBranchFoldingLegal())
return false;
bool MadeChange = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
BasicBlock *BB = I++;
// Does this BB end with the following?
// %andVal = and %val, #single-bit-set
// %icmpVal = icmp %andResult, 0
// br i1 %cmpVal label %dest1, label %dest2"
BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator());
if (!Brcc || !Brcc->isConditional())
continue;
ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0));
if (!Cmp || Cmp->getParent() != BB)
continue;
ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1));
if (!Zero || !Zero->isZero())
continue;
Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0));
if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB)
continue;
ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1));
if (!Mask || !Mask->getUniqueInteger().isPowerOf2())
continue;
DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump());
// Push the "and; icmp" for any users that are conditional branches.
// Since there can only be one branch use per BB, we don't need to keep
// track of which BBs we insert into.
for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end();
UI != E; ) {
Use &TheUse = *UI;
// Find brcc use.
BranchInst *BrccUser = dyn_cast<BranchInst>(*UI);
++UI;
if (!BrccUser || !BrccUser->isConditional())
continue;
BasicBlock *UserBB = BrccUser->getParent();
if (UserBB == BB) continue;
DEBUG(dbgs() << "found Brcc use\n");
// Sink the "and; icmp" to use.
MadeChange = true;
BinaryOperator *NewAnd =
BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "",
BrccUser);
CmpInst *NewCmp =
CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero,
"", BrccUser);
TheUse = NewCmp;
++NumAndCmpsMoved;
DEBUG(BrccUser->getParent()->dump());
}
}
return MadeChange;
}
/// \brief Retrieve the probabilities of a conditional branch. Returns true on
/// success, or returns false if no or invalid metadata was found.
static bool extractBranchMetadata(BranchInst *BI,
uint64_t &ProbTrue, uint64_t &ProbFalse) {
assert(BI->isConditional() &&
"Looking for probabilities on unconditional branch?");
auto *ProfileData = BI->getMetadata(LLVMContext::MD_prof);
if (!ProfileData || ProfileData->getNumOperands() != 3)
return false;
const auto *CITrue =
mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1));
const auto *CIFalse =
mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2));
if (!CITrue || !CIFalse)
return false;
ProbTrue = CITrue->getValue().getZExtValue();
ProbFalse = CIFalse->getValue().getZExtValue();
return true;
}
/// \brief Scale down both weights to fit into uint32_t.
static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
uint32_t Scale = (NewMax / UINT32_MAX) + 1;
NewTrue = NewTrue / Scale;
NewFalse = NewFalse / Scale;
}
/// \brief Some targets prefer to split a conditional branch like:
/// \code
/// %0 = icmp ne i32 %a, 0
/// %1 = icmp ne i32 %b, 0
/// %or.cond = or i1 %0, %1
/// br i1 %or.cond, label %TrueBB, label %FalseBB
/// \endcode
/// into multiple branch instructions like:
/// \code
/// bb1:
/// %0 = icmp ne i32 %a, 0
/// br i1 %0, label %TrueBB, label %bb2
/// bb2:
/// %1 = icmp ne i32 %b, 0
/// br i1 %1, label %TrueBB, label %FalseBB
/// \endcode
/// This usually allows instruction selection to do even further optimizations
/// and combine the compare with the branch instruction. Currently this is
/// applied for targets which have "cheap" jump instructions.
///
/// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
///
bool CodeGenPrepare::splitBranchCondition(Function &F) {
if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive())
return false;
bool MadeChange = false;
for (auto &BB : F) {
// Does this BB end with the following?
// %cond1 = icmp|fcmp|binary instruction ...
// %cond2 = icmp|fcmp|binary instruction ...
// %cond.or = or|and i1 %cond1, cond2
// br i1 %cond.or label %dest1, label %dest2"
BinaryOperator *LogicOp;
BasicBlock *TBB, *FBB;
if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB)))
continue;
unsigned Opc;
Value *Cond1, *Cond2;
if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)),
m_OneUse(m_Value(Cond2)))))
Opc = Instruction::And;
else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)),
m_OneUse(m_Value(Cond2)))))
Opc = Instruction::Or;
else
continue;
if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) ||
!match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) )
continue;
DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
// Create a new BB.
auto *InsertBefore = std::next(Function::iterator(BB))
.getNodePtrUnchecked();
auto TmpBB = BasicBlock::Create(BB.getContext(),
BB.getName() + ".cond.split",
BB.getParent(), InsertBefore);
// Update original basic block by using the first condition directly by the
// branch instruction and removing the no longer needed and/or instruction.
auto *Br1 = cast<BranchInst>(BB.getTerminator());
Br1->setCondition(Cond1);
LogicOp->eraseFromParent();
// Depending on the conditon we have to either replace the true or the false
// successor of the original branch instruction.
if (Opc == Instruction::And)
Br1->setSuccessor(0, TmpBB);
else
Br1->setSuccessor(1, TmpBB);
// Fill in the new basic block.
auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
if (auto *I = dyn_cast<Instruction>(Cond2)) {
I->removeFromParent();
I->insertBefore(Br2);
}
// Update PHI nodes in both successors. The original BB needs to be
// replaced in one succesor's PHI nodes, because the branch comes now from
// the newly generated BB (NewBB). In the other successor we need to add one
// incoming edge to the PHI nodes, because both branch instructions target
// now the same successor. Depending on the original branch condition
// (and/or) we have to swap the successors (TrueDest, FalseDest), so that
// we perfrom the correct update for the PHI nodes.
// This doesn't change the successor order of the just created branch
// instruction (or any other instruction).
if (Opc == Instruction::Or)
std::swap(TBB, FBB);
// Replace the old BB with the new BB.
for (auto &I : *TBB) {
PHINode *PN = dyn_cast<PHINode>(&I);
if (!PN)
break;
int i;
while ((i = PN->getBasicBlockIndex(&BB)) >= 0)
PN->setIncomingBlock(i, TmpBB);
}
// Add another incoming edge form the new BB.
for (auto &I : *FBB) {
PHINode *PN = dyn_cast<PHINode>(&I);
if (!PN)
break;
auto *Val = PN->getIncomingValueForBlock(&BB);
PN->addIncoming(Val, TmpBB);
}
// Update the branch weights (from SelectionDAGBuilder::
// FindMergedConditions).
if (Opc == Instruction::Or) {
// Codegen X | Y as:
// BB1:
// jmp_if_X TBB
// jmp TmpBB
// TmpBB:
// jmp_if_Y TBB
// jmp FBB
//
// We have flexibility in setting Prob for BB1 and Prob for NewBB.
// The requirement is that
// TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
// = TrueProb for orignal BB.
// Assuming the orignal weights are A and B, one choice is to set BB1's
// weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
// assumes that
// TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
// Another choice is to assume TrueProb for BB1 equals to TrueProb for
// TmpBB, but the math is more complicated.
uint64_t TrueWeight, FalseWeight;
if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) {
uint64_t NewTrueWeight = TrueWeight;
uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
scaleWeights(NewTrueWeight, NewFalseWeight);
Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
.createBranchWeights(TrueWeight, FalseWeight));
NewTrueWeight = TrueWeight;
NewFalseWeight = 2 * FalseWeight;
scaleWeights(NewTrueWeight, NewFalseWeight);
Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
.createBranchWeights(TrueWeight, FalseWeight));
}
} else {
// Codegen X & Y as:
// BB1:
// jmp_if_X TmpBB
// jmp FBB
// TmpBB:
// jmp_if_Y TBB
// jmp FBB
//
// This requires creation of TmpBB after CurBB.
// We have flexibility in setting Prob for BB1 and Prob for TmpBB.
// The requirement is that
// FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
// = FalseProb for orignal BB.
// Assuming the orignal weights are A and B, one choice is to set BB1's
// weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
// assumes that
// FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
uint64_t TrueWeight, FalseWeight;
if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) {
uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
uint64_t NewFalseWeight = FalseWeight;
scaleWeights(NewTrueWeight, NewFalseWeight);
Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
.createBranchWeights(TrueWeight, FalseWeight));
NewTrueWeight = 2 * TrueWeight;
NewFalseWeight = FalseWeight;
scaleWeights(NewTrueWeight, NewFalseWeight);
Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
.createBranchWeights(TrueWeight, FalseWeight));
}
}
// Note: No point in getting fancy here, since the DT info is never
// available to CodeGenPrepare.
ModifiedDT = true;
MadeChange = true;
DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
TmpBB->dump());
}
return MadeChange;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineBlockFrequencyInfo.cpp | //===- MachineBlockFrequencyInfo.cpp - MBB Frequency Analysis -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
using namespace llvm;
#define DEBUG_TYPE "block-freq"
#ifndef NDEBUG
enum GVDAGType {
GVDT_None,
GVDT_Fraction,
GVDT_Integer
};
static cl::opt<GVDAGType>
ViewMachineBlockFreqPropagationDAG("view-machine-block-freq-propagation-dags",
cl::Hidden,
cl::desc("Pop up a window to show a dag displaying how machine block "
"frequencies propagate through the CFG."),
cl::values(
clEnumValN(GVDT_None, "none",
"do not display graphs."),
clEnumValN(GVDT_Fraction, "fraction", "display a graph using the "
"fractional block frequency representation."),
clEnumValN(GVDT_Integer, "integer", "display a graph using the raw "
"integer fractional block frequency representation."),
clEnumValEnd));
namespace llvm {
template <>
struct GraphTraits<MachineBlockFrequencyInfo *> {
typedef const MachineBasicBlock NodeType;
typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
typedef MachineFunction::const_iterator nodes_iterator;
static inline
const NodeType *getEntryNode(const MachineBlockFrequencyInfo *G) {
return G->getFunction()->begin();
}
static ChildIteratorType child_begin(const NodeType *N) {
return N->succ_begin();
}
static ChildIteratorType child_end(const NodeType *N) {
return N->succ_end();
}
static nodes_iterator nodes_begin(const MachineBlockFrequencyInfo *G) {
return G->getFunction()->begin();
}
static nodes_iterator nodes_end(const MachineBlockFrequencyInfo *G) {
return G->getFunction()->end();
}
};
template<>
struct DOTGraphTraits<MachineBlockFrequencyInfo*> :
public DefaultDOTGraphTraits {
explicit DOTGraphTraits(bool isSimple=false) :
DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const MachineBlockFrequencyInfo *G) {
return G->getFunction()->getName();
}
std::string getNodeLabel(const MachineBasicBlock *Node,
const MachineBlockFrequencyInfo *Graph) {
std::string Result;
raw_string_ostream OS(Result);
OS << Node->getName().str() << ":";
switch (ViewMachineBlockFreqPropagationDAG) {
case GVDT_Fraction:
Graph->printBlockFreq(OS, Node);
break;
case GVDT_Integer:
OS << Graph->getBlockFreq(Node).getFrequency();
break;
case GVDT_None:
llvm_unreachable("If we are not supposed to render a graph we should "
"never reach this point.");
}
return Result;
}
};
} // end namespace llvm
#endif
INITIALIZE_PASS_BEGIN(MachineBlockFrequencyInfo, "machine-block-freq",
"Machine Block Frequency Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(MachineBlockFrequencyInfo, "machine-block-freq",
"Machine Block Frequency Analysis", true, true)
char MachineBlockFrequencyInfo::ID = 0;
MachineBlockFrequencyInfo::
MachineBlockFrequencyInfo() :MachineFunctionPass(ID) {
initializeMachineBlockFrequencyInfoPass(*PassRegistry::getPassRegistry());
}
MachineBlockFrequencyInfo::~MachineBlockFrequencyInfo() {}
void MachineBlockFrequencyInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineLoopInfo>();
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool MachineBlockFrequencyInfo::runOnMachineFunction(MachineFunction &F) {
MachineBranchProbabilityInfo &MBPI =
getAnalysis<MachineBranchProbabilityInfo>();
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
if (!MBFI)
MBFI.reset(new ImplType);
MBFI->doFunction(&F, &MBPI, &MLI);
#ifndef NDEBUG
if (ViewMachineBlockFreqPropagationDAG != GVDT_None) {
view();
}
#endif
return false;
}
void MachineBlockFrequencyInfo::releaseMemory() { MBFI.reset(); }
/// Pop up a ghostview window with the current block frequency propagation
/// rendered using dot.
void MachineBlockFrequencyInfo::view() const {
// This code is only for debugging.
#ifndef NDEBUG
ViewGraph(const_cast<MachineBlockFrequencyInfo *>(this),
"MachineBlockFrequencyDAGs");
#else
errs() << "MachineBlockFrequencyInfo::view is only available in debug builds "
"on systems with Graphviz or gv!\n";
#endif // NDEBUG
}
BlockFrequency MachineBlockFrequencyInfo::
getBlockFreq(const MachineBasicBlock *MBB) const {
return MBFI ? MBFI->getBlockFreq(MBB) : 0;
}
const MachineFunction *MachineBlockFrequencyInfo::getFunction() const {
return MBFI ? MBFI->getFunction() : nullptr;
}
raw_ostream &
MachineBlockFrequencyInfo::printBlockFreq(raw_ostream &OS,
const BlockFrequency Freq) const {
return MBFI ? MBFI->printBlockFreq(OS, Freq) : OS;
}
raw_ostream &
MachineBlockFrequencyInfo::printBlockFreq(raw_ostream &OS,
const MachineBasicBlock *MBB) const {
return MBFI ? MBFI->printBlockFreq(OS, MBB) : OS;
}
uint64_t MachineBlockFrequencyInfo::getEntryFreq() const {
return MBFI ? MBFI->getEntryFreq() : 0;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/AggressiveAntiDepBreaker.h | //=- llvm/CodeGen/AggressiveAntiDepBreaker.h - Anti-Dep Support -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the AggressiveAntiDepBreaker class, which
// implements register anti-dependence breaking during post-RA
// scheduling. It attempts to break all anti-dependencies within a
// block.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_AGGRESSIVEANTIDEPBREAKER_H
#define LLVM_LIB_CODEGEN_AGGRESSIVEANTIDEPBREAKER_H
#include "AntiDepBreaker.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <map>
namespace llvm {
class RegisterClassInfo;
/// Contains all the state necessary for anti-dep breaking.
class LLVM_LIBRARY_VISIBILITY AggressiveAntiDepState {
public:
/// Information about a register reference within a liverange
typedef struct {
/// The registers operand
MachineOperand *Operand;
/// The register class
const TargetRegisterClass *RC;
} RegisterReference;
private:
/// Number of non-virtual target registers (i.e. TRI->getNumRegs()).
const unsigned NumTargetRegs;
/// Implements a disjoint-union data structure to
/// form register groups. A node is represented by an index into
/// the vector. A node can "point to" itself to indicate that it
/// is the parent of a group, or point to another node to indicate
/// that it is a member of the same group as that node.
std::vector<unsigned> GroupNodes;
/// For each register, the index of the GroupNode
/// currently representing the group that the register belongs to.
/// Register 0 is always represented by the 0 group, a group
/// composed of registers that are not eligible for anti-aliasing.
std::vector<unsigned> GroupNodeIndices;
/// Map registers to all their references within a live range.
std::multimap<unsigned, RegisterReference> RegRefs;
/// The index of the most recent kill (proceeding bottom-up),
/// or ~0u if the register is not live.
std::vector<unsigned> KillIndices;
/// The index of the most recent complete def (proceeding bottom
/// up), or ~0u if the register is live.
std::vector<unsigned> DefIndices;
public:
AggressiveAntiDepState(const unsigned TargetRegs, MachineBasicBlock *BB);
/// Return the kill indices.
std::vector<unsigned> &GetKillIndices() { return KillIndices; }
/// Return the define indices.
std::vector<unsigned> &GetDefIndices() { return DefIndices; }
/// Return the RegRefs map.
std::multimap<unsigned, RegisterReference>& GetRegRefs() { return RegRefs; }
// Get the group for a register. The returned value is
// the index of the GroupNode representing the group.
unsigned GetGroup(unsigned Reg);
// Return a vector of the registers belonging to a group.
// If RegRefs is non-NULL then only included referenced registers.
void GetGroupRegs(
unsigned Group,
std::vector<unsigned> &Regs,
std::multimap<unsigned,
AggressiveAntiDepState::RegisterReference> *RegRefs);
// Union Reg1's and Reg2's groups to form a new group.
// Return the index of the GroupNode representing the group.
unsigned UnionGroups(unsigned Reg1, unsigned Reg2);
// Remove a register from its current group and place
// it alone in its own group. Return the index of the GroupNode
// representing the registers new group.
unsigned LeaveGroup(unsigned Reg);
/// Return true if Reg is live.
bool IsLive(unsigned Reg);
};
class LLVM_LIBRARY_VISIBILITY AggressiveAntiDepBreaker
: public AntiDepBreaker {
MachineFunction& MF;
MachineRegisterInfo &MRI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const RegisterClassInfo &RegClassInfo;
/// The set of registers that should only be
/// renamed if they are on the critical path.
BitVector CriticalPathSet;
/// The state used to identify and rename anti-dependence registers.
AggressiveAntiDepState *State;
public:
AggressiveAntiDepBreaker(MachineFunction& MFi,
const RegisterClassInfo &RCI,
TargetSubtargetInfo::RegClassVector& CriticalPathRCs);
~AggressiveAntiDepBreaker() override;
/// Initialize anti-dep breaking for a new basic block.
void StartBlock(MachineBasicBlock *BB) override;
/// Identifiy anti-dependencies along the critical path
/// of the ScheduleDAG and break them by renaming registers.
///
unsigned BreakAntiDependencies(const std::vector<SUnit>& SUnits,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
DbgValueVector &DbgValues) override;
/// Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void Observe(MachineInstr *MI, unsigned Count,
unsigned InsertPosIndex) override;
/// Finish anti-dep breaking for a basic block.
void FinishBlock() override;
private:
/// Keep track of a position in the allocation order for each regclass.
typedef std::map<const TargetRegisterClass *, unsigned> RenameOrderType;
/// Return true if MO represents a register
/// that is both implicitly used and defined in MI
bool IsImplicitDefUse(MachineInstr *MI, MachineOperand& MO);
/// If MI implicitly def/uses a register, then
/// return that register and all subregisters.
void GetPassthruRegs(MachineInstr *MI, std::set<unsigned>& PassthruRegs);
void HandleLastUse(unsigned Reg, unsigned KillIdx, const char *tag,
const char *header = nullptr,
const char *footer = nullptr);
void PrescanInstruction(MachineInstr *MI, unsigned Count,
std::set<unsigned>& PassthruRegs);
void ScanInstruction(MachineInstr *MI, unsigned Count);
BitVector GetRenameRegisters(unsigned Reg);
bool FindSuitableFreeRegisters(unsigned AntiDepGroupIndex,
RenameOrderType& RenameOrder,
std::map<unsigned, unsigned> &RenameMap);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/GlobalMerge.cpp | //===-- GlobalMerge.cpp - Internal globals merging -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This pass merges globals with internal linkage into one. This way all the
// globals which were merged into a biggest one can be addressed using offsets
// from the same base pointer (no need for separate base pointer for each of the
// global). Such a transformation can significantly reduce the register pressure
// when many globals are involved.
//
// For example, consider the code which touches several global variables at
// once:
//
// static int foo[N], bar[N], baz[N];
//
// for (i = 0; i < N; ++i) {
// foo[i] = bar[i] * baz[i];
// }
//
// On ARM the addresses of 3 arrays should be kept in the registers, thus
// this code has quite large register pressure (loop body):
//
// ldr r1, [r5], #4
// ldr r2, [r6], #4
// mul r1, r2, r1
// str r1, [r0], #4
//
// Pass converts the code to something like:
//
// static struct {
// int foo[N];
// int bar[N];
// int baz[N];
// } merged;
//
// for (i = 0; i < N; ++i) {
// merged.foo[i] = merged.bar[i] * merged.baz[i];
// }
//
// and in ARM code this becomes:
//
// ldr r0, [r5, #40]
// ldr r1, [r5, #80]
// mul r0, r1, r0
// str r0, [r5], #4
//
// note that we saved 2 registers here almostly "for free".
//
// However, merging globals can have tradeoffs:
// - it confuses debuggers, tools, and users
// - it makes linker optimizations less useful (order files, LOHs, ...)
// - it forces usage of indexed addressing (which isn't necessarily "free")
// - it can increase register pressure when the uses are disparate enough.
//
// We use heuristics to discover the best global grouping we can (cf cl::opts).
// ===---------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "global-merge"
// FIXME: This is only useful as a last-resort way to disable the pass.
static cl::opt<bool>
EnableGlobalMerge("enable-global-merge", cl::Hidden,
cl::desc("Enable the global merge pass"),
cl::init(true));
static cl::opt<bool> GlobalMergeGroupByUse(
"global-merge-group-by-use", cl::Hidden,
cl::desc("Improve global merge pass to look at uses"), cl::init(true));
static cl::opt<bool> GlobalMergeIgnoreSingleUse(
"global-merge-ignore-single-use", cl::Hidden,
cl::desc("Improve global merge pass to ignore globals only used alone"),
cl::init(true));
static cl::opt<bool>
EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
cl::desc("Enable global merge pass on constants"),
cl::init(false));
// FIXME: this could be a transitional option, and we probably need to remove
// it if only we are sure this optimization could always benefit all targets.
static cl::opt<bool>
EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden,
cl::desc("Enable global merge pass on external linkage"),
cl::init(false));
STATISTIC(NumMerged, "Number of globals merged");
namespace {
class GlobalMerge : public FunctionPass {
const TargetMachine *TM;
// FIXME: Infer the maximum possible offset depending on the actual users
// (these max offsets are different for the users inside Thumb or ARM
// functions), see the code that passes in the offset in the ARM backend
// for more information.
unsigned MaxOffset;
/// Whether we should try to optimize for size only.
/// Currently, this applies a dead simple heuristic: only consider globals
/// used in minsize functions for merging.
/// FIXME: This could learn about optsize, and be used in the cost model.
bool OnlyOptimizeForSize;
bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const;
/// \brief Merge everything in \p Globals for which the corresponding bit
/// in \p GlobalSet is set.
bool doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
const BitVector &GlobalSet, Module &M, bool isConst,
unsigned AddrSpace) const;
/// \brief Check if the given variable has been identified as must keep
/// \pre setMustKeepGlobalVariables must have been called on the Module that
/// contains GV
bool isMustKeepGlobalVariable(const GlobalVariable *GV) const {
return MustKeepGlobalVariables.count(GV);
}
/// Collect every variables marked as "used" or used in a landing pad
/// instruction for this Module.
void setMustKeepGlobalVariables(Module &M);
/// Collect every variables marked as "used"
void collectUsedGlobalVariables(Module &M);
/// Keep track of the GlobalVariable that must not be merged away
SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables;
public:
static char ID; // Pass identification, replacement for typeid.
explicit GlobalMerge(const TargetMachine *TM = nullptr,
unsigned MaximalOffset = 0,
bool OnlyOptimizeForSize = false)
: FunctionPass(ID), TM(TM), MaxOffset(MaximalOffset),
OnlyOptimizeForSize(OnlyOptimizeForSize) {
initializeGlobalMergePass(*PassRegistry::getPassRegistry());
}
bool doInitialization(Module &M) override;
bool runOnFunction(Function &F) override;
bool doFinalization(Module &M) override;
StringRef getPassName() const override {
return "Merge internal globals";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
FunctionPass::getAnalysisUsage(AU);
}
};
} // end anonymous namespace
char GlobalMerge::ID = 0;
INITIALIZE_PASS_BEGIN(GlobalMerge, "global-merge", "Merge global variables",
false, false)
INITIALIZE_PASS_END(GlobalMerge, "global-merge", "Merge global variables",
false, false)
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const {
auto &DL = M.getDataLayout();
// FIXME: Find better heuristics
std::stable_sort(
Globals.begin(), Globals.end(),
[&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
return (DL.getTypeAllocSize(Ty1) < DL.getTypeAllocSize(Ty2));
});
// If we want to just blindly group all globals together, do so.
if (!GlobalMergeGroupByUse) {
BitVector AllGlobals(Globals.size());
AllGlobals.set();
return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
}
// If we want to be smarter, look at all uses of each global, to try to
// discover all sets of globals used together, and how many times each of
// these sets occured.
//
// Keep this reasonably efficient, by having an append-only list of all sets
// discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
// code (currently, a Function) to the set of globals seen so far that are
// used together in that unit (GlobalUsesByFunction).
//
// When we look at the Nth global, we now that any new set is either:
// - the singleton set {N}, containing this global only, or
// - the union of {N} and a previously-discovered set, containing some
// combination of the previous N-1 globals.
// Using that knowledge, when looking at the Nth global, we can keep:
// - a reference to the singleton set {N} (CurGVOnlySetIdx)
// - a list mapping each previous set to its union with {N} (EncounteredUGS),
// if it actually occurs.
// We keep track of the sets of globals used together "close enough".
struct UsedGlobalSet {
UsedGlobalSet(size_t Size) : Globals(Size), UsageCount(1) {}
BitVector Globals;
unsigned UsageCount;
};
// Each set is unique in UsedGlobalSets.
std::vector<UsedGlobalSet> UsedGlobalSets;
// Avoid repeating the create-global-set pattern.
auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
UsedGlobalSets.emplace_back(Globals.size());
return UsedGlobalSets.back();
};
// The first set is the empty set.
CreateGlobalSet().UsageCount = 0;
// We define "close enough" to be "in the same function".
// FIXME: Grouping uses by function is way too aggressive, so we should have
// a better metric for distance between uses.
// The obvious alternative would be to group by BasicBlock, but that's in
// turn too conservative..
// Anything in between wouldn't be trivial to compute, so just stick with
// per-function grouping.
// The value type is an index into UsedGlobalSets.
// The default (0) conveniently points to the empty set.
DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;
// Now, look at each merge-eligible global in turn.
// Keep track of the sets we already encountered to which we added the
// current global.
// Each element matches the same-index element in UsedGlobalSets.
// This lets us efficiently tell whether a set has already been expanded to
// include the current global.
std::vector<size_t> EncounteredUGS;
for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
GlobalVariable *GV = Globals[GI];
// Reset the encountered sets for this global...
std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0);
// ...and grow it in case we created new sets for the previous global.
EncounteredUGS.resize(UsedGlobalSets.size());
// We might need to create a set that only consists of the current global.
// Keep track of its index into UsedGlobalSets.
size_t CurGVOnlySetIdx = 0;
// For each global, look at all its Uses.
for (auto &U : GV->uses()) {
// This Use might be a ConstantExpr. We're interested in Instruction
// users, so look through ConstantExpr...
Use *UI, *UE;
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
if (CE->use_empty())
continue;
UI = &*CE->use_begin();
UE = nullptr;
} else if (isa<Instruction>(U.getUser())) {
UI = &U;
UE = UI->getNext();
} else {
continue;
}
// ...to iterate on all the instruction users of the global.
// Note that we iterate on Uses and not on Users to be able to getNext().
for (; UI != UE; UI = UI->getNext()) {
Instruction *I = dyn_cast<Instruction>(UI->getUser());
if (!I)
continue;
Function *ParentFn = I->getParent()->getParent();
// If we're only optimizing for size, ignore non-minsize functions.
if (OnlyOptimizeForSize &&
!ParentFn->hasFnAttribute(Attribute::MinSize))
continue;
size_t UGSIdx = GlobalUsesByFunction[ParentFn];
// If this is the first global the basic block uses, map it to the set
// consisting of this global only.
if (!UGSIdx) {
// If that set doesn't exist yet, create it.
if (!CurGVOnlySetIdx) {
CurGVOnlySetIdx = UsedGlobalSets.size();
CreateGlobalSet().Globals.set(GI);
} else {
++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
}
GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
continue;
}
// If we already encountered this BB, just increment the counter.
if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
++UsedGlobalSets[UGSIdx].UsageCount;
continue;
}
// If not, the previous set wasn't actually used in this function.
--UsedGlobalSets[UGSIdx].UsageCount;
// If we already expanded the previous set to include this global, just
// reuse that expanded set.
if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
++UsedGlobalSets[ExpandedIdx].UsageCount;
GlobalUsesByFunction[ParentFn] = ExpandedIdx;
continue;
}
// If not, create a new set consisting of the union of the previous set
// and this global. Mark it as encountered, so we can reuse it later.
GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
UsedGlobalSets.size();
UsedGlobalSet &NewUGS = CreateGlobalSet();
NewUGS.Globals.set(GI);
NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
}
}
}
// Now we found a bunch of sets of globals used together. We accumulated
// the number of times we encountered the sets (i.e., the number of blocks
// that use that exact set of globals).
//
// Multiply that by the size of the set to give us a crude profitability
// metric.
std::sort(UsedGlobalSets.begin(), UsedGlobalSets.end(),
[](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
return UGS1.Globals.count() * UGS1.UsageCount <
UGS2.Globals.count() * UGS2.UsageCount;
});
// We can choose to merge all globals together, but ignore globals never used
// with another global. This catches the obviously non-profitable cases of
// having a single global, but is aggressive enough for any other case.
if (GlobalMergeIgnoreSingleUse) {
BitVector AllGlobals(Globals.size());
for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
if (UGS.UsageCount == 0)
continue;
if (UGS.Globals.count() > 1)
AllGlobals |= UGS.Globals;
}
return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
}
// Starting from the sets with the best (=biggest) profitability, find a
// good combination.
// The ideal (and expensive) solution can only be found by trying all
// combinations, looking for the one with the best profitability.
// Don't be smart about it, and just pick the first compatible combination,
// starting with the sets with the best profitability.
BitVector PickedGlobals(Globals.size());
bool Changed = false;
for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
if (UGS.UsageCount == 0)
continue;
if (PickedGlobals.anyCommon(UGS.Globals))
continue;
PickedGlobals |= UGS.Globals;
// If the set only contains one global, there's no point in merging.
// Ignore the global for inclusion in other sets though, so keep it in
// PickedGlobals.
if (UGS.Globals.count() < 2)
continue;
Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
}
return Changed;
}
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
const BitVector &GlobalSet, Module &M, bool isConst,
unsigned AddrSpace) const {
Type *Int32Ty = Type::getInt32Ty(M.getContext());
auto &DL = M.getDataLayout();
assert(Globals.size() > 1);
DEBUG(dbgs() << " Trying to merge set, starts with #"
<< GlobalSet.find_first() << "\n");
ssize_t i = GlobalSet.find_first();
while (i != -1) {
ssize_t j = 0;
uint64_t MergedSize = 0;
std::vector<Type*> Tys;
std::vector<Constant*> Inits;
bool HasExternal = false;
GlobalVariable *TheFirstExternal = 0;
for (j = i; j != -1; j = GlobalSet.find_next(j)) {
Type *Ty = Globals[j]->getType()->getElementType();
MergedSize += DL.getTypeAllocSize(Ty);
if (MergedSize > MaxOffset) {
break;
}
Tys.push_back(Ty);
Inits.push_back(Globals[j]->getInitializer());
if (Globals[j]->hasExternalLinkage() && !HasExternal) {
HasExternal = true;
TheFirstExternal = Globals[j];
}
}
// If merged variables doesn't have external linkage, we needn't to expose
// the symbol after merging.
GlobalValue::LinkageTypes Linkage = HasExternal
? GlobalValue::ExternalLinkage
: GlobalValue::InternalLinkage;
StructType *MergedTy = StructType::get(M.getContext(), Tys);
Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
// If merged variables have external linkage, we use symbol name of the
// first variable merged as the suffix of global symbol name. This would
// be able to avoid the link-time naming conflict for globalm symbols.
GlobalVariable *MergedGV = new GlobalVariable(
M, MergedTy, isConst, Linkage, MergedInit,
HasExternal ? "_MergedGlobals_" + TheFirstExternal->getName()
: "_MergedGlobals",
nullptr, GlobalVariable::NotThreadLocal, AddrSpace);
for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k)) {
GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
std::string Name = Globals[k]->getName();
Constant *Idx[2] = {
ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, idx++)
};
Constant *GEP =
ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
Globals[k]->replaceAllUsesWith(GEP);
Globals[k]->eraseFromParent();
if (Linkage != GlobalValue::InternalLinkage) {
// Generate a new alias...
auto *PTy = cast<PointerType>(GEP->getType());
GlobalAlias::create(PTy, Linkage, Name, GEP, &M);
}
NumMerged++;
}
i = j;
}
return true;
}
void GlobalMerge::collectUsedGlobalVariables(Module &M) {
// Extract global variables from llvm.used array
const GlobalVariable *GV = M.getGlobalVariable("llvm.used");
if (!GV || !GV->hasInitializer()) return;
// Should be an array of 'i8*'.
const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
if (const GlobalVariable *G =
dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts()))
MustKeepGlobalVariables.insert(G);
}
void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
collectUsedGlobalVariables(M);
for (Module::iterator IFn = M.begin(), IEndFn = M.end(); IFn != IEndFn;
++IFn) {
for (Function::iterator IBB = IFn->begin(), IEndBB = IFn->end();
IBB != IEndBB; ++IBB) {
// Follow the invoke link to find the landing pad instruction
const InvokeInst *II = dyn_cast<InvokeInst>(IBB->getTerminator());
if (!II) continue;
const LandingPadInst *LPInst = II->getUnwindDest()->getLandingPadInst();
// Look for globals in the clauses of the landing pad instruction
for (unsigned Idx = 0, NumClauses = LPInst->getNumClauses();
Idx != NumClauses; ++Idx)
if (const GlobalVariable *GV =
dyn_cast<GlobalVariable>(LPInst->getClause(Idx)
->stripPointerCasts()))
MustKeepGlobalVariables.insert(GV);
}
}
}
bool GlobalMerge::doInitialization(Module &M) {
if (!EnableGlobalMerge)
return false;
auto &DL = M.getDataLayout();
DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
BSSGlobals;
bool Changed = false;
setMustKeepGlobalVariables(M);
// Grab all non-const globals.
for (Module::global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I) {
// Merge is safe for "normal" internal or external globals only
if (I->isDeclaration() || I->isThreadLocal() || I->hasSection())
continue;
if (!(EnableGlobalMergeOnExternal && I->hasExternalLinkage()) &&
!I->hasInternalLinkage())
continue;
PointerType *PT = dyn_cast<PointerType>(I->getType());
assert(PT && "Global variable is not a pointer!");
unsigned AddressSpace = PT->getAddressSpace();
// Ignore fancy-aligned globals for now.
unsigned Alignment = DL.getPreferredAlignment(I);
Type *Ty = I->getType()->getElementType();
if (Alignment > DL.getABITypeAlignment(Ty))
continue;
// Ignore all 'special' globals.
if (I->getName().startswith("llvm.") ||
I->getName().startswith(".llvm."))
continue;
// Ignore all "required" globals:
if (isMustKeepGlobalVariable(I))
continue;
if (DL.getTypeAllocSize(Ty) < MaxOffset) {
if (TargetLoweringObjectFile::getKindForGlobal(I, *TM).isBSSLocal())
BSSGlobals[AddressSpace].push_back(I);
else if (I->isConstant())
ConstGlobals[AddressSpace].push_back(I);
else
Globals[AddressSpace].push_back(I);
}
}
for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
I = Globals.begin(), E = Globals.end(); I != E; ++I)
if (I->second.size() > 1)
Changed |= doMerge(I->second, M, false, I->first);
for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
I = BSSGlobals.begin(), E = BSSGlobals.end(); I != E; ++I)
if (I->second.size() > 1)
Changed |= doMerge(I->second, M, false, I->first);
if (EnableGlobalMergeOnConst)
for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
I = ConstGlobals.begin(), E = ConstGlobals.end(); I != E; ++I)
if (I->second.size() > 1)
Changed |= doMerge(I->second, M, true, I->first);
return Changed;
}
bool GlobalMerge::runOnFunction(Function &F) {
return false;
}
bool GlobalMerge::doFinalization(Module &M) {
MustKeepGlobalVariables.clear();
return false;
}
Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset,
bool OnlyOptimizeForSize) {
return new GlobalMerge(TM, Offset, OnlyOptimizeForSize);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegAllocGreedy.cpp | //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RAGreedy function pass for register allocation in
// optimized builds.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "AllocationOrder.h"
#include "InterferenceCache.h"
#include "LiveDebugVariables.h"
#include "RegAllocBase.h"
#include "SpillPlacement.h"
#include "Spiller.h"
#include "SplitKit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/EdgeBundles.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveRegMatrix.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/PassAnalysisSupport.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <queue>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumGlobalSplits, "Number of split global live ranges");
STATISTIC(NumLocalSplits, "Number of split local live ranges");
STATISTIC(NumEvicted, "Number of interferences evicted");
static cl::opt<SplitEditor::ComplementSpillMode>
SplitSpillMode("split-spill-mode", cl::Hidden,
cl::desc("Spill mode for splitting live ranges"),
cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
clEnumValEnd),
cl::init(SplitEditor::SM_Partition));
static cl::opt<unsigned>
LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
cl::desc("Last chance recoloring max depth"),
cl::init(5));
static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
"lcr-max-interf", cl::Hidden,
cl::desc("Last chance recoloring maximum number of considered"
" interference at a time"),
cl::init(8));
static cl::opt<bool>
ExhaustiveSearch("exhaustive-register-search", cl::NotHidden,
cl::desc("Exhaustive Search for registers bypassing the depth "
"and interference cutoffs of last chance recoloring"));
static cl::opt<bool> EnableLocalReassignment(
"enable-local-reassign", cl::Hidden,
cl::desc("Local reassignment can yield better allocation decisions, but "
"may be compile time intensive"),
cl::init(false));
// FIXME: Find a good default for this flag and remove the flag.
static cl::opt<unsigned>
CSRFirstTimeCost("regalloc-csr-first-time-cost",
cl::desc("Cost for first time use of callee-saved register."),
cl::init(0), cl::Hidden);
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
createGreedyRegisterAllocator);
namespace {
class RAGreedy : public MachineFunctionPass,
public RegAllocBase,
private LiveRangeEdit::Delegate {
// Convenient shortcuts.
typedef std::priority_queue<std::pair<unsigned, unsigned> > PQueue;
typedef SmallPtrSet<LiveInterval *, 4> SmallLISet;
typedef SmallSet<unsigned, 16> SmallVirtRegSet;
// context
MachineFunction *MF;
// Shortcuts to some useful interface.
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
RegisterClassInfo RCI;
// analyses
SlotIndexes *Indexes;
MachineBlockFrequencyInfo *MBFI;
MachineDominatorTree *DomTree;
MachineLoopInfo *Loops;
EdgeBundles *Bundles;
SpillPlacement *SpillPlacer;
LiveDebugVariables *DebugVars;
// state
std::unique_ptr<Spiller> SpillerInstance;
PQueue Queue;
unsigned NextCascade;
// Live ranges pass through a number of stages as we try to allocate them.
// Some of the stages may also create new live ranges:
//
// - Region splitting.
// - Per-block splitting.
// - Local splitting.
// - Spilling.
//
// Ranges produced by one of the stages skip the previous stages when they are
// dequeued. This improves performance because we can skip interference checks
// that are unlikely to give any results. It also guarantees that the live
// range splitting algorithm terminates, something that is otherwise hard to
// ensure.
enum LiveRangeStage {
/// Newly created live range that has never been queued.
RS_New,
/// Only attempt assignment and eviction. Then requeue as RS_Split.
RS_Assign,
/// Attempt live range splitting if assignment is impossible.
RS_Split,
/// Attempt more aggressive live range splitting that is guaranteed to make
/// progress. This is used for split products that may not be making
/// progress.
RS_Split2,
/// Live range will be spilled. No more splitting will be attempted.
RS_Spill,
/// There is nothing more we can do to this live range. Abort compilation
/// if it can't be assigned.
RS_Done
};
// Enum CutOffStage to keep a track whether the register allocation failed
// because of the cutoffs encountered in last chance recoloring.
// Note: This is used as bitmask. New value should be next power of 2.
enum CutOffStage {
// No cutoffs encountered
CO_None = 0,
// lcr-max-depth cutoff encountered
CO_Depth = 1,
// lcr-max-interf cutoff encountered
CO_Interf = 2
};
uint8_t CutOffInfo;
#ifndef NDEBUG
static const char *const StageName[];
#endif
// RegInfo - Keep additional information about each live range.
struct RegInfo {
LiveRangeStage Stage;
// Cascade - Eviction loop prevention. See canEvictInterference().
unsigned Cascade;
RegInfo() : Stage(RS_New), Cascade(0) {}
};
IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
LiveRangeStage getStage(const LiveInterval &VirtReg) const {
return ExtraRegInfo[VirtReg.reg].Stage;
}
void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
ExtraRegInfo.resize(MRI->getNumVirtRegs());
ExtraRegInfo[VirtReg.reg].Stage = Stage;
}
template<typename Iterator>
void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
ExtraRegInfo.resize(MRI->getNumVirtRegs());
for (;Begin != End; ++Begin) {
unsigned Reg = *Begin;
if (ExtraRegInfo[Reg].Stage == RS_New)
ExtraRegInfo[Reg].Stage = NewStage;
}
}
/// Cost of evicting interference.
struct EvictionCost {
unsigned BrokenHints; ///< Total number of broken hints.
float MaxWeight; ///< Maximum spill weight evicted.
EvictionCost(): BrokenHints(0), MaxWeight(0) {}
bool isMax() const { return BrokenHints == ~0u; }
void setMax() { BrokenHints = ~0u; }
void setBrokenHints(unsigned NHints) { BrokenHints = NHints; }
bool operator<(const EvictionCost &O) const {
return std::tie(BrokenHints, MaxWeight) <
std::tie(O.BrokenHints, O.MaxWeight);
}
};
// splitting state.
std::unique_ptr<SplitAnalysis> SA;
std::unique_ptr<SplitEditor> SE;
/// Cached per-block interference maps
InterferenceCache IntfCache;
/// All basic blocks where the current register has uses.
SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
/// Global live range splitting candidate info.
struct GlobalSplitCandidate {
// Register intended for assignment, or 0.
unsigned PhysReg;
// SplitKit interval index for this candidate.
unsigned IntvIdx;
// Interference for PhysReg.
InterferenceCache::Cursor Intf;
// Bundles where this candidate should be live.
BitVector LiveBundles;
SmallVector<unsigned, 8> ActiveBlocks;
void reset(InterferenceCache &Cache, unsigned Reg) {
PhysReg = Reg;
IntvIdx = 0;
Intf.setPhysReg(Cache, Reg);
LiveBundles.clear();
ActiveBlocks.clear();
}
// Set B[i] = C for every live bundle where B[i] was NoCand.
unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
unsigned Count = 0;
for (int i = LiveBundles.find_first(); i >= 0;
i = LiveBundles.find_next(i))
if (B[i] == NoCand) {
B[i] = C;
Count++;
}
return Count;
}
};
/// Candidate info for each PhysReg in AllocationOrder.
/// This vector never shrinks, but grows to the size of the largest register
/// class.
SmallVector<GlobalSplitCandidate, 32> GlobalCand;
enum : unsigned { NoCand = ~0u };
/// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
/// NoCand which indicates the stack interval.
SmallVector<unsigned, 32> BundleCand;
/// Callee-save register cost, calculated once per machine function.
BlockFrequency CSRCost;
/// Run or not the local reassignment heuristic. This information is
/// obtained from the TargetSubtargetInfo.
bool EnableLocalReassign;
/// Set of broken hints that may be reconciled later because of eviction.
SmallSetVector<LiveInterval *, 8> SetOfBrokenHints;
public:
RAGreedy();
/// Return the pass name.
const char* getPassName() const override {
return "Greedy Register Allocator";
}
/// RAGreedy analysis usage.
void getAnalysisUsage(AnalysisUsage &AU) const override;
void releaseMemory() override;
Spiller &spiller() override { return *SpillerInstance; }
void enqueue(LiveInterval *LI) override;
LiveInterval *dequeue() override;
unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override;
void aboutToRemoveInterval(LiveInterval &) override;
/// Perform register allocation.
bool runOnMachineFunction(MachineFunction &mf) override;
static char ID;
private:
unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &,
SmallVirtRegSet &, unsigned = 0);
bool LRE_CanEraseVirtReg(unsigned) override;
void LRE_WillShrinkVirtReg(unsigned) override;
void LRE_DidCloneVirtReg(unsigned, unsigned) override;
void enqueue(PQueue &CurQueue, LiveInterval *LI);
LiveInterval *dequeue(PQueue &CurQueue);
BlockFrequency calcSpillCost();
bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
void growRegion(GlobalSplitCandidate &Cand);
BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&);
bool calcCompactRegion(GlobalSplitCandidate&);
void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
void calcGapWeights(unsigned, SmallVectorImpl<float>&);
unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg);
bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
void evictInterference(LiveInterval&, unsigned,
SmallVectorImpl<unsigned>&);
bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
SmallLISet &RecoloringCandidates,
const SmallVirtRegSet &FixedRegisters);
unsigned tryAssign(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
unsigned tryEvict(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&, unsigned = ~0u);
unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
/// Calculate cost of region splitting.
unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
AllocationOrder &Order,
BlockFrequency &BestCost,
unsigned &NumCands, bool IgnoreCSR);
/// Perform region splitting.
unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
bool HasCompact,
SmallVectorImpl<unsigned> &NewVRegs);
/// Check other options before using a callee-saved register for the first
/// time.
unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
unsigned PhysReg, unsigned &CostPerUseLimit,
SmallVectorImpl<unsigned> &NewVRegs);
void initializeCSRCost();
unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
unsigned trySplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
SmallVectorImpl<unsigned> &,
SmallVirtRegSet &, unsigned);
bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &,
SmallVirtRegSet &, unsigned);
void tryHintRecoloring(LiveInterval &);
void tryHintsRecoloring();
/// Model the information carried by one end of a copy.
struct HintInfo {
/// The frequency of the copy.
BlockFrequency Freq;
/// The virtual register or physical register.
unsigned Reg;
/// Its currently assigned register.
/// In case of a physical register Reg == PhysReg.
unsigned PhysReg;
HintInfo(BlockFrequency Freq, unsigned Reg, unsigned PhysReg)
: Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
};
typedef SmallVector<HintInfo, 4> HintsInfo;
BlockFrequency getBrokenHintFreq(const HintsInfo &, unsigned);
void collectHintInfo(unsigned, HintsInfo &);
bool isUnusedCalleeSavedReg(unsigned PhysReg) const;
};
} // end anonymous namespace
char RAGreedy::ID = 0;
#ifndef NDEBUG
const char *const RAGreedy::StageName[] = {
"RS_New",
"RS_Assign",
"RS_Split",
"RS_Split2",
"RS_Spill",
"RS_Done"
};
#endif
// Hysteresis to use when comparing floats.
// This helps stabilize decisions based on float comparisons.
const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
FunctionPass* llvm::createGreedyRegisterAllocator() {
return new RAGreedy();
}
RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
}
void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addPreserved<MachineBlockFrequencyInfo>();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
AU.addRequired<LiveRegMatrix>();
AU.addPreserved<LiveRegMatrix>();
AU.addRequired<EdgeBundles>();
AU.addRequired<SpillPlacement>();
MachineFunctionPass::getAnalysisUsage(AU);
}
//===----------------------------------------------------------------------===//
// LiveRangeEdit delegate methods
//===----------------------------------------------------------------------===//
bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
if (VRM->hasPhys(VirtReg)) {
LiveInterval &LI = LIS->getInterval(VirtReg);
Matrix->unassign(LI);
aboutToRemoveInterval(LI);
return true;
}
// Unassigned virtreg is probably in the priority queue.
// RegAllocBase will erase it after dequeueing.
return false;
}
void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
if (!VRM->hasPhys(VirtReg))
return;
// Register is assigned, put it back on the queue for reassignment.
LiveInterval &LI = LIS->getInterval(VirtReg);
Matrix->unassign(LI);
enqueue(&LI);
}
void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
// Cloning a register we haven't even heard about yet? Just ignore it.
if (!ExtraRegInfo.inBounds(Old))
return;
// LRE may clone a virtual register because dead code elimination causes it to
// be split into connected components. The new components are much smaller
// than the original, so they should get a new chance at being assigned.
// same stage as the parent.
ExtraRegInfo[Old].Stage = RS_Assign;
ExtraRegInfo.grow(New);
ExtraRegInfo[New] = ExtraRegInfo[Old];
}
void RAGreedy::releaseMemory() {
SpillerInstance.reset();
ExtraRegInfo.clear();
GlobalCand.clear();
}
void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); }
void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
// Prioritize live ranges by size, assigning larger ranges first.
// The queue holds (size, reg) pairs.
const unsigned Size = LI->getSize();
const unsigned Reg = LI->reg;
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Can only enqueue virtual registers");
unsigned Prio;
ExtraRegInfo.grow(Reg);
if (ExtraRegInfo[Reg].Stage == RS_New)
ExtraRegInfo[Reg].Stage = RS_Assign;
if (ExtraRegInfo[Reg].Stage == RS_Split) {
// Unsplit ranges that couldn't be allocated immediately are deferred until
// everything else has been allocated.
Prio = Size;
} else {
// Giant live ranges fall back to the global assignment heuristic, which
// prevents excessive spilling in pathological cases.
bool ReverseLocal = TRI->reverseLocalAssignment();
const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
bool ForceGlobal = !ReverseLocal &&
(Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs());
if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
LIS->intervalIsInOneMBB(*LI)) {
// Allocate original local ranges in linear instruction order. Since they
// are singly defined, this produces optimal coloring in the absence of
// global interference and other constraints.
if (!ReverseLocal)
Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
else {
// Allocating bottom up may allow many short LRGs to be assigned first
// to one of the cheap registers. This could be much faster for very
// large blocks on targets with many physical registers.
Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex());
}
Prio |= RC.AllocationPriority << 24;
} else {
// Allocate global and split ranges in long->short order. Long ranges that
// don't fit should be spilled (or split) ASAP so they don't create
// interference. Mark a bit to prioritize global above local ranges.
Prio = (1u << 29) + Size;
}
// Mark a higher bit to prioritize global and local above RS_Split.
Prio |= (1u << 31);
// Boost ranges that have a physical register hint.
if (VRM->hasKnownPreference(Reg))
Prio |= (1u << 30);
}
// The virtual register number is a tie breaker for same-sized ranges.
// Give lower vreg numbers higher priority to assign them first.
CurQueue.push(std::make_pair(Prio, ~Reg));
}
LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
if (CurQueue.empty())
return nullptr;
LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
CurQueue.pop();
return LI;
}
//===----------------------------------------------------------------------===//
// Direct Assignment
//===----------------------------------------------------------------------===//
/// tryAssign - Try to assign VirtReg to an available register.
unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
Order.rewind();
unsigned PhysReg;
while ((PhysReg = Order.next()))
if (!Matrix->checkInterference(VirtReg, PhysReg))
break;
if (!PhysReg || Order.isHint())
return PhysReg;
// PhysReg is available, but there may be a better choice.
// If we missed a simple hint, try to cheaply evict interference from the
// preferred register.
if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
if (Order.isHint(Hint)) {
DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
EvictionCost MaxCost;
MaxCost.setBrokenHints(1);
if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
evictInterference(VirtReg, Hint, NewVRegs);
return Hint;
}
}
// Try to evict interference from a cheaper alternative.
unsigned Cost = TRI->getCostPerUse(PhysReg);
// Most registers have 0 additional cost.
if (!Cost)
return PhysReg;
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
<< '\n');
unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
return CheapReg ? CheapReg : PhysReg;
}
//===----------------------------------------------------------------------===//
// Interference eviction
//===----------------------------------------------------------------------===//
unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
unsigned PhysReg;
while ((PhysReg = Order.next())) {
if (PhysReg == PrevReg)
continue;
MCRegUnitIterator Units(PhysReg, TRI);
for (; Units.isValid(); ++Units) {
// Instantiate a "subquery", not to be confused with the Queries array.
LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]);
if (subQ.checkInterference())
break;
}
// If no units have interference, break out with the current PhysReg.
if (!Units.isValid())
break;
}
if (PhysReg)
DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
<< PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI)
<< '\n');
return PhysReg;
}
/// shouldEvict - determine if A should evict the assigned live range B. The
/// eviction policy defined by this function together with the allocation order
/// defined by enqueue() decides which registers ultimately end up being split
/// and spilled.
///
/// Cascade numbers are used to prevent infinite loops if this function is a
/// cyclic relation.
///
/// @param A The live range to be assigned.
/// @param IsHint True when A is about to be assigned to its preferred
/// register.
/// @param B The live range to be evicted.
/// @param BreaksHint True when B is already assigned to its preferred register.
bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
LiveInterval &B, bool BreaksHint) {
bool CanSplit = getStage(B) < RS_Spill;
// Be fairly aggressive about following hints as long as the evictee can be
// split.
if (CanSplit && IsHint && !BreaksHint)
return true;
if (A.weight > B.weight) {
DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
return true;
}
return false;
}
/// canEvictInterference - Return true if all interferences between VirtReg and
/// PhysReg can be evicted.
///
/// @param VirtReg Live range that is about to be assigned.
/// @param PhysReg Desired register for assignment.
/// @param IsHint True when PhysReg is VirtReg's preferred register.
/// @param MaxCost Only look for cheaper candidates and update with new cost
/// when returning true.
/// @returns True when interference can be evicted cheaper than MaxCost.
bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
bool IsHint, EvictionCost &MaxCost) {
// It is only possible to evict virtual register interference.
if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
return false;
bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
// Find VirtReg's cascade number. This will be unassigned if VirtReg was never
// involved in an eviction before. If a cascade number was assigned, deny
// evicting anything with the same or a newer cascade number. This prevents
// infinite eviction loops.
//
// This works out so a register without a cascade number is allowed to evict
// anything, and it can be evicted by anything.
unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
if (!Cascade)
Cascade = NextCascade;
EvictionCost Cost;
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
// If there is 10 or more interferences, chances are one is heavier.
if (Q.collectInterferingVRegs(10) >= 10)
return false;
// Check if any interfering live range is heavier than MaxWeight.
for (unsigned i = Q.interferingVRegs().size(); i; --i) {
LiveInterval *Intf = Q.interferingVRegs()[i - 1];
assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
"Only expecting virtual register interference from query");
// Never evict spill products. They cannot split or spill.
if (getStage(*Intf) == RS_Done)
return false;
// Once a live range becomes small enough, it is urgent that we find a
// register for it. This is indicated by an infinite spill weight. These
// urgent live ranges get to evict almost anything.
//
// Also allow urgent evictions of unspillable ranges from a strictly
// larger allocation order.
bool Urgent = !VirtReg.isSpillable() &&
(Intf->isSpillable() ||
RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
// Only evict older cascades or live ranges without a cascade.
unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
if (Cascade <= IntfCascade) {
if (!Urgent)
return false;
// We permit breaking cascades for urgent evictions. It should be the
// last resort, though, so make it really expensive.
Cost.BrokenHints += 10;
}
// Would this break a satisfied hint?
bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
// Update eviction cost.
Cost.BrokenHints += BreaksHint;
Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
// Abort if this would be too expensive.
if (!(Cost < MaxCost))
return false;
if (Urgent)
continue;
// Apply the eviction policy for non-urgent evictions.
if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
return false;
// If !MaxCost.isMax(), then we're just looking for a cheap register.
// Evicting another local live range in this case could lead to suboptimal
// coloring.
if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
(!EnableLocalReassign || !canReassign(*Intf, PhysReg))) {
return false;
}
}
}
MaxCost = Cost;
return true;
}
/// evictInterference - Evict any interferring registers that prevent VirtReg
/// from being assigned to Physreg. This assumes that canEvictInterference
/// returned true.
void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<unsigned> &NewVRegs) {
// Make sure that VirtReg has a cascade number, and assign that cascade
// number to every evicted register. These live ranges than then only be
// evicted by a newer cascade, preventing infinite loops.
unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
if (!Cascade)
Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
<< " interference: Cascade " << Cascade << '\n');
// Collect all interfering virtregs first.
SmallVector<LiveInterval*, 8> Intfs;
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
Intfs.append(IVR.begin(), IVR.end());
}
// Evict them second. This will invalidate the queries.
for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
LiveInterval *Intf = Intfs[i];
// The same VirtReg may be present in multiple RegUnits. Skip duplicates.
if (!VRM->hasPhys(Intf->reg))
continue;
Matrix->unassign(*Intf);
assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
VirtReg.isSpillable() < Intf->isSpillable()) &&
"Cannot decrease cascade number, illegal eviction");
ExtraRegInfo[Intf->reg].Cascade = Cascade;
++NumEvicted;
NewVRegs.push_back(Intf->reg);
}
}
/// Returns true if the given \p PhysReg is a callee saved register and has not
/// been used for allocation yet.
bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const {
unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
if (CSR == 0)
return false;
return !Matrix->isPhysRegUsed(PhysReg);
}
/// tryEvict - Try to evict all interferences for a physreg.
/// @param VirtReg Currently unassigned virtual register.
/// @param Order Physregs to try.
/// @return Physreg to assign VirtReg, or 0.
unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs,
unsigned CostPerUseLimit) {
NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
// Keep track of the cheapest interference seen so far.
EvictionCost BestCost;
BestCost.setMax();
unsigned BestPhys = 0;
unsigned OrderLimit = Order.getOrder().size();
// When we are just looking for a reduced cost per use, don't break any
// hints, and only evict smaller spill weights.
if (CostPerUseLimit < ~0u) {
BestCost.BrokenHints = 0;
BestCost.MaxWeight = VirtReg.weight;
// Check of any registers in RC are below CostPerUseLimit.
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
unsigned MinCost = RegClassInfo.getMinCost(RC);
if (MinCost >= CostPerUseLimit) {
DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " << MinCost
<< ", no cheaper registers to be found.\n");
return 0;
}
// It is normal for register classes to have a long tail of registers with
// the same cost. We don't need to look at them if they're too expensive.
if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
OrderLimit = RegClassInfo.getLastCostChange(RC);
DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n");
}
}
Order.rewind();
while (unsigned PhysReg = Order.next(OrderLimit)) {
if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
continue;
// The first use of a callee-saved register in a function has cost 1.
// Don't start using a CSR when the CostPerUseLimit is low.
if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
<< PrintReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
<< '\n');
continue;
}
if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
continue;
// Best so far.
BestPhys = PhysReg;
// Stop if the hint can be used.
if (Order.isHint())
break;
}
if (!BestPhys)
return 0;
evictInterference(VirtReg, BestPhys, NewVRegs);
return BestPhys;
}
//===----------------------------------------------------------------------===//
// Region Splitting
//===----------------------------------------------------------------------===//
/// addSplitConstraints - Fill out the SplitConstraints vector based on the
/// interference pattern in Physreg and its aliases. Add the constraints to
/// SpillPlacement and return the static cost of this split in Cost, assuming
/// that all preferences in SplitConstraints are met.
/// Return false if there are no bundles with positive bias.
bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
BlockFrequency &Cost) {
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
// Reset interference dependent info.
SplitConstraints.resize(UseBlocks.size());
BlockFrequency StaticCost = 0;
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
BC.Number = BI.MBB->getNumber();
Intf.moveToBlock(BC.Number);
BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
BC.ChangesValue = BI.FirstDef.isValid();
if (!Intf.hasInterference())
continue;
// Number of spill code instructions to insert.
unsigned Ins = 0;
// Interference for the live-in value.
if (BI.LiveIn) {
if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
BC.Entry = SpillPlacement::MustSpill, ++Ins;
else if (Intf.first() < BI.FirstInstr)
BC.Entry = SpillPlacement::PrefSpill, ++Ins;
else if (Intf.first() < BI.LastInstr)
++Ins;
}
// Interference for the live-out value.
if (BI.LiveOut) {
if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
BC.Exit = SpillPlacement::MustSpill, ++Ins;
else if (Intf.last() > BI.LastInstr)
BC.Exit = SpillPlacement::PrefSpill, ++Ins;
else if (Intf.last() > BI.FirstInstr)
++Ins;
}
// Accumulate the total frequency of inserted spill code.
while (Ins--)
StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
}
Cost = StaticCost;
// Add constraints for use-blocks. Note that these are the only constraints
// that may add a positive bias, it is downhill from here.
SpillPlacer->addConstraints(SplitConstraints);
return SpillPlacer->scanActiveBundles();
}
/// addThroughConstraints - Add constraints and links to SpillPlacer from the
/// live-through blocks in Blocks.
void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
ArrayRef<unsigned> Blocks) {
const unsigned GroupSize = 8;
SpillPlacement::BlockConstraint BCS[GroupSize];
unsigned TBS[GroupSize];
unsigned B = 0, T = 0;
for (unsigned i = 0; i != Blocks.size(); ++i) {
unsigned Number = Blocks[i];
Intf.moveToBlock(Number);
if (!Intf.hasInterference()) {
assert(T < GroupSize && "Array overflow");
assert(T < GroupSize); // This is reset to zero when it reaches GroupSize
TBS[T] = Number;
if (++T == GroupSize) {
SpillPlacer->addLinks(makeArrayRef(TBS, T));
T = 0;
}
continue;
}
assert(B < GroupSize && "Array overflow");
assert(B < GroupSize); // This is reset to zero when it reaches GroupSize
BCS[B].Number = Number;
// Interference for the live-in value.
if (Intf.first() <= Indexes->getMBBStartIdx(Number))
BCS[B].Entry = SpillPlacement::MustSpill;
else
BCS[B].Entry = SpillPlacement::PrefSpill;
// Interference for the live-out value.
if (Intf.last() >= SA->getLastSplitPoint(Number))
BCS[B].Exit = SpillPlacement::MustSpill;
else
BCS[B].Exit = SpillPlacement::PrefSpill;
if (++B == GroupSize) {
SpillPlacer->addConstraints(makeArrayRef(BCS, B));
B = 0;
}
}
SpillPlacer->addConstraints(makeArrayRef(BCS, B));
SpillPlacer->addLinks(makeArrayRef(TBS, T));
}
void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
// Keep track of through blocks that have not been added to SpillPlacer.
BitVector Todo = SA->getThroughBlocks();
SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
unsigned AddedTo = 0;
#ifndef NDEBUG
unsigned Visited = 0;
#endif
for (;;) {
ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
// Find new through blocks in the periphery of PrefRegBundles.
for (int i = 0, e = NewBundles.size(); i != e; ++i) {
unsigned Bundle = NewBundles[i];
// Look at all blocks connected to Bundle in the full graph.
ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
I != E; ++I) {
unsigned Block = *I;
if (!Todo.test(Block))
continue;
Todo.reset(Block);
// This is a new through block. Add it to SpillPlacer later.
ActiveBlocks.push_back(Block);
#ifndef NDEBUG
++Visited;
#endif
}
}
// Any new blocks to add?
if (ActiveBlocks.size() == AddedTo)
break;
// Compute through constraints from the interference, or assume that all
// through blocks prefer spilling when forming compact regions.
auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
if (Cand.PhysReg)
addThroughConstraints(Cand.Intf, NewBlocks);
else
// Provide a strong negative bias on through blocks to prevent unwanted
// liveness on loop backedges.
SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
AddedTo = ActiveBlocks.size();
// Perhaps iterating can enable more bundles?
SpillPlacer->iterate();
}
DEBUG(dbgs() << ", v=" << Visited);
}
/// calcCompactRegion - Compute the set of edge bundles that should be live
/// when splitting the current live range into compact regions. Compact
/// regions can be computed without looking at interference. They are the
/// regions formed by removing all the live-through blocks from the live range.
///
/// Returns false if the current live range is already compact, or if the
/// compact regions would form single block regions anyway.
bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
// Without any through blocks, the live range is already compact.
if (!SA->getNumThroughBlocks())
return false;
// Compact regions don't correspond to any physreg.
Cand.reset(IntfCache, 0);
DEBUG(dbgs() << "Compact region bundles");
// Use the spill placer to determine the live bundles. GrowRegion pretends
// that all the through blocks have interference when PhysReg is unset.
SpillPlacer->prepare(Cand.LiveBundles);
// The static split cost will be zero since Cand.Intf reports no interference.
BlockFrequency Cost;
if (!addSplitConstraints(Cand.Intf, Cost)) {
DEBUG(dbgs() << ", none.\n");
return false;
}
growRegion(Cand);
SpillPlacer->finish();
if (!Cand.LiveBundles.any()) {
DEBUG(dbgs() << ", none.\n");
return false;
}
DEBUG({
for (int i = Cand.LiveBundles.find_first(); i>=0;
i = Cand.LiveBundles.find_next(i))
dbgs() << " EB#" << i;
dbgs() << ".\n";
});
return true;
}
/// calcSpillCost - Compute how expensive it would be to split the live range in
/// SA around all use blocks instead of forming bundle regions.
BlockFrequency RAGreedy::calcSpillCost() {
BlockFrequency Cost = 0;
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
unsigned Number = BI.MBB->getNumber();
// We normally only need one spill instruction - a load or a store.
Cost += SpillPlacer->getBlockFrequency(Number);
// Unless the value is redefined in the block.
if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
Cost += SpillPlacer->getBlockFrequency(Number);
}
return Cost;
}
/// calcGlobalSplitCost - Return the global split cost of following the split
/// pattern in LiveBundles. This cost should be added to the local cost of the
/// interference pattern in SplitConstraints.
///
BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
BlockFrequency GlobalCost = 0;
const BitVector &LiveBundles = Cand.LiveBundles;
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
unsigned Ins = 0;
if (BI.LiveIn)
Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
if (BI.LiveOut)
Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
while (Ins--)
GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
}
for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
unsigned Number = Cand.ActiveBlocks[i];
bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
if (!RegIn && !RegOut)
continue;
if (RegIn && RegOut) {
// We need double spill code if this block has interference.
Cand.Intf.moveToBlock(Number);
if (Cand.Intf.hasInterference()) {
GlobalCost += SpillPlacer->getBlockFrequency(Number);
GlobalCost += SpillPlacer->getBlockFrequency(Number);
}
continue;
}
// live-in / stack-out or stack-in live-out.
GlobalCost += SpillPlacer->getBlockFrequency(Number);
}
return GlobalCost;
}
/// splitAroundRegion - Split the current live range around the regions
/// determined by BundleCand and GlobalCand.
///
/// Before calling this function, GlobalCand and BundleCand must be initialized
/// so each bundle is assigned to a valid candidate, or NoCand for the
/// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
/// objects must be initialized for the current live range, and intervals
/// created for the used candidates.
///
/// @param LREdit The LiveRangeEdit object handling the current split.
/// @param UsedCands List of used GlobalCand entries. Every BundleCand value
/// must appear in this list.
void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
ArrayRef<unsigned> UsedCands) {
// These are the intervals created for new global ranges. We may create more
// intervals for local ranges.
const unsigned NumGlobalIntvs = LREdit.size();
DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
assert(NumGlobalIntvs && "No global intervals configured");
// Isolate even single instructions when dealing with a proper sub-class.
// That guarantees register class inflation for the stack interval because it
// is all copies.
unsigned Reg = SA->getParent().reg;
bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
// First handle all the blocks with uses.
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
unsigned Number = BI.MBB->getNumber();
unsigned IntvIn = 0, IntvOut = 0;
SlotIndex IntfIn, IntfOut;
if (BI.LiveIn) {
unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
if (CandIn != NoCand) {
GlobalSplitCandidate &Cand = GlobalCand[CandIn];
IntvIn = Cand.IntvIdx;
Cand.Intf.moveToBlock(Number);
IntfIn = Cand.Intf.first();
}
}
if (BI.LiveOut) {
unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
if (CandOut != NoCand) {
GlobalSplitCandidate &Cand = GlobalCand[CandOut];
IntvOut = Cand.IntvIdx;
Cand.Intf.moveToBlock(Number);
IntfOut = Cand.Intf.last();
}
}
// Create separate intervals for isolated blocks with multiple uses.
if (!IntvIn && !IntvOut) {
DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
SE->splitSingleBlock(BI);
continue;
}
if (IntvIn && IntvOut)
SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
else if (IntvIn)
SE->splitRegInBlock(BI, IntvIn, IntfIn);
else
SE->splitRegOutBlock(BI, IntvOut, IntfOut);
}
// Handle live-through blocks. The relevant live-through blocks are stored in
// the ActiveBlocks list with each candidate. We need to filter out
// duplicates.
BitVector Todo = SA->getThroughBlocks();
for (unsigned c = 0; c != UsedCands.size(); ++c) {
ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
unsigned Number = Blocks[i];
if (!Todo.test(Number))
continue;
Todo.reset(Number);
unsigned IntvIn = 0, IntvOut = 0;
SlotIndex IntfIn, IntfOut;
unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
if (CandIn != NoCand) {
GlobalSplitCandidate &Cand = GlobalCand[CandIn];
IntvIn = Cand.IntvIdx;
Cand.Intf.moveToBlock(Number);
IntfIn = Cand.Intf.first();
}
unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
if (CandOut != NoCand) {
GlobalSplitCandidate &Cand = GlobalCand[CandOut];
IntvOut = Cand.IntvIdx;
Cand.Intf.moveToBlock(Number);
IntfOut = Cand.Intf.last();
}
if (!IntvIn && !IntvOut)
continue;
SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
}
}
++NumGlobalSplits;
SmallVector<unsigned, 8> IntvMap;
SE->finish(&IntvMap);
DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
ExtraRegInfo.resize(MRI->getNumVirtRegs());
unsigned OrigBlocks = SA->getNumLiveBlocks();
// Sort out the new intervals created by splitting. We get four kinds:
// - Remainder intervals should not be split again.
// - Candidate intervals can be assigned to Cand.PhysReg.
// - Block-local splits are candidates for local splitting.
// - DCE leftovers should go back on the queue.
for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
LiveInterval &Reg = LIS->getInterval(LREdit.get(i));
// Ignore old intervals from DCE.
if (getStage(Reg) != RS_New)
continue;
// Remainder interval. Don't try splitting again, spill if it doesn't
// allocate.
if (IntvMap[i] == 0) {
setStage(Reg, RS_Spill);
continue;
}
// Global intervals. Allow repeated splitting as long as the number of live
// blocks is strictly decreasing.
if (IntvMap[i] < NumGlobalIntvs) {
if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
<< " blocks as original.\n");
// Don't allow repeated splitting as a safe guard against looping.
setStage(Reg, RS_Split2);
}
continue;
}
// Other intervals are treated as new. This includes local intervals created
// for blocks with multiple uses, and anything created by DCE.
}
if (VerifyEnabled)
MF->verify(this, "After splitting live range around region");
}
unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
unsigned NumCands = 0;
BlockFrequency BestCost;
// Check if we can split this live range around a compact region.
bool HasCompact = calcCompactRegion(GlobalCand.front());
if (HasCompact) {
// Yes, keep GlobalCand[0] as the compact region candidate.
NumCands = 1;
BestCost = BlockFrequency::getMaxFrequency();
} else {
// No benefit from the compact region, our fallback will be per-block
// splitting. Make sure we find a solution that is cheaper than spilling.
BestCost = calcSpillCost();
DEBUG(dbgs() << "Cost of isolating all blocks = ";
MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
}
unsigned BestCand =
calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
false/*IgnoreCSR*/);
// No solutions found, fall back to single block splitting.
if (!HasCompact && BestCand == NoCand)
return 0;
return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
}
unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
AllocationOrder &Order,
BlockFrequency &BestCost,
unsigned &NumCands,
bool IgnoreCSR) {
unsigned BestCand = NoCand;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg))
continue;
// Discard bad candidates before we run out of interference cache cursors.
// This will only affect register classes with a lot of registers (>32).
if (NumCands == IntfCache.getMaxCursors()) {
unsigned WorstCount = ~0u;
unsigned Worst = 0;
for (unsigned i = 0; i != NumCands; ++i) {
if (i == BestCand || !GlobalCand[i].PhysReg)
continue;
unsigned Count = GlobalCand[i].LiveBundles.count();
if (Count < WorstCount)
Worst = i, WorstCount = Count;
}
--NumCands;
GlobalCand[Worst] = GlobalCand[NumCands];
if (BestCand == NumCands)
BestCand = Worst;
}
if (GlobalCand.size() <= NumCands)
GlobalCand.resize(NumCands+1);
GlobalSplitCandidate &Cand = GlobalCand[NumCands];
Cand.reset(IntfCache, PhysReg);
SpillPlacer->prepare(Cand.LiveBundles);
BlockFrequency Cost;
if (!addSplitConstraints(Cand.Intf, Cost)) {
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
continue;
}
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = ";
MBFI->printBlockFreq(dbgs(), Cost));
if (Cost >= BestCost) {
DEBUG({
if (BestCand == NoCand)
dbgs() << " worse than no bundles\n";
else
dbgs() << " worse than "
<< PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
});
continue;
}
growRegion(Cand);
SpillPlacer->finish();
// No live bundles, defer to splitSingleBlocks().
if (!Cand.LiveBundles.any()) {
DEBUG(dbgs() << " no bundles.\n");
continue;
}
Cost += calcGlobalSplitCost(Cand);
DEBUG({
dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost)
<< " with bundles";
for (int i = Cand.LiveBundles.find_first(); i>=0;
i = Cand.LiveBundles.find_next(i))
dbgs() << " EB#" << i;
dbgs() << ".\n";
});
if (Cost < BestCost) {
BestCand = NumCands;
BestCost = Cost;
}
++NumCands;
}
return BestCand;
}
unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
bool HasCompact,
SmallVectorImpl<unsigned> &NewVRegs) {
SmallVector<unsigned, 8> UsedCands;
// Prepare split editor.
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitSpillMode);
// Assign all edge bundles to the preferred candidate, or NoCand.
BundleCand.assign(Bundles->getNumBundles(), NoCand);
// Assign bundles for the best candidate region.
if (BestCand != NoCand) {
GlobalSplitCandidate &Cand = GlobalCand[BestCand];
if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
UsedCands.push_back(BestCand);
Cand.IntvIdx = SE->openIntv();
DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
<< B << " bundles, intv " << Cand.IntvIdx << ".\n");
(void)B;
}
}
// Assign bundles for the compact region.
if (HasCompact) {
GlobalSplitCandidate &Cand = GlobalCand.front();
assert(!Cand.PhysReg && "Compact region has no physreg");
if (unsigned B = Cand.getBundles(BundleCand, 0)) {
UsedCands.push_back(0);
Cand.IntvIdx = SE->openIntv();
DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
<< Cand.IntvIdx << ".\n");
(void)B;
}
}
splitAroundRegion(LREdit, UsedCands);
return 0;
}
//===----------------------------------------------------------------------===//
// Per-Block Splitting
//===----------------------------------------------------------------------===//
/// tryBlockSplit - Split a global live range around every block with uses. This
/// creates a lot of local live ranges, that will be split by tryLocalSplit if
/// they don't allocate.
unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
unsigned Reg = VirtReg.reg;
bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitSpillMode);
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
SE->splitSingleBlock(BI);
}
// No blocks were split.
if (LREdit.empty())
return 0;
// We did split for some blocks.
SmallVector<unsigned, 8> IntvMap;
SE->finish(&IntvMap);
// Tell LiveDebugVariables about the new ranges.
DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
ExtraRegInfo.resize(MRI->getNumVirtRegs());
// Sort out the new intervals created by splitting. The remainder interval
// goes straight to spilling, the new local ranges get to stay RS_New.
for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
LiveInterval &LI = LIS->getInterval(LREdit.get(i));
if (getStage(LI) == RS_New && IntvMap[i] == 0)
setStage(LI, RS_Spill);
}
if (VerifyEnabled)
MF->verify(this, "After splitting live range around basic blocks");
return 0;
}
//===----------------------------------------------------------------------===//
// Per-Instruction Splitting
//===----------------------------------------------------------------------===//
/// Get the number of allocatable registers that match the constraints of \p Reg
/// on \p MI and that are also in \p SuperRC.
static unsigned getNumAllocatableRegsForConstraints(
const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
const RegisterClassInfo &RCI) {
assert(SuperRC && "Invalid register class");
const TargetRegisterClass *ConstrainedRC =
MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
/* ExploreBundle */ true);
if (!ConstrainedRC)
return 0;
return RCI.getNumAllocatableRegs(ConstrainedRC);
}
/// tryInstructionSplit - Split a live range around individual instructions.
/// This is normally not worthwhile since the spiller is doing essentially the
/// same thing. However, when the live range is in a constrained register
/// class, it may help to insert copies such that parts of the live range can
/// be moved to a larger register class.
///
/// This is similar to spilling to a larger register class.
unsigned
RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
// There is no point to this if there are no larger sub-classes.
if (!RegClassInfo.isProperSubClass(CurRC))
return 0;
// Always enable split spill mode, since we're effectively spilling to a
// register.
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitEditor::SM_Size);
ArrayRef<SlotIndex> Uses = SA->getUseSlots();
if (Uses.size() <= 1)
return 0;
DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
const TargetRegisterClass *SuperRC =
TRI->getLargestLegalSuperClass(CurRC, *MF);
unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
// Split around every non-copy instruction if this split will relax
// the constraints on the virtual register.
// Otherwise, splitting just inserts uncoalescable copies that do not help
// the allocation.
for (unsigned i = 0; i != Uses.size(); ++i) {
if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
if (MI->isFullCopy() ||
SuperRCNumAllocatableRegs ==
getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII,
TRI, RCI)) {
DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
continue;
}
SE->openIntv();
SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
SE->useIntv(SegStart, SegStop);
}
if (LREdit.empty()) {
DEBUG(dbgs() << "All uses were copies.\n");
return 0;
}
SmallVector<unsigned, 8> IntvMap;
SE->finish(&IntvMap);
DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
ExtraRegInfo.resize(MRI->getNumVirtRegs());
// Assign all new registers to RS_Spill. This was the last chance.
setStage(LREdit.begin(), LREdit.end(), RS_Spill);
return 0;
}
//===----------------------------------------------------------------------===//
// Local Splitting
//===----------------------------------------------------------------------===//
/// calcGapWeights - Compute the maximum spill weight that needs to be evicted
/// in order to use PhysReg between two entries in SA->UseSlots.
///
/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
///
void RAGreedy::calcGapWeights(unsigned PhysReg,
SmallVectorImpl<float> &GapWeight) {
assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
ArrayRef<SlotIndex> Uses = SA->getUseSlots();
const unsigned NumGaps = Uses.size()-1;
// Start and end points for the interference check.
SlotIndex StartIdx =
BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
SlotIndex StopIdx =
BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
GapWeight.assign(NumGaps, 0.0f);
// Add interference from each overlapping register.
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
.checkInterference())
continue;
// We know that VirtReg is a continuous interval from FirstInstr to
// LastInstr, so we don't need InterferenceQuery.
//
// Interference that overlaps an instruction is counted in both gaps
// surrounding the instruction. The exception is interference before
// StartIdx and after StopIdx.
//
LiveIntervalUnion::SegmentIter IntI =
Matrix->getLiveUnions()[*Units] .find(StartIdx);
for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
// Skip the gaps before IntI.
while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
if (++Gap == NumGaps)
break;
if (Gap == NumGaps)
break;
// Update the gaps covered by IntI.
const float weight = IntI.value()->weight;
for (; Gap != NumGaps; ++Gap) {
GapWeight[Gap] = std::max(GapWeight[Gap], weight);
if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
break;
}
if (Gap == NumGaps)
break;
}
}
// Add fixed interference.
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
const LiveRange &LR = LIS->getRegUnit(*Units);
LiveRange::const_iterator I = LR.find(StartIdx);
LiveRange::const_iterator E = LR.end();
// Same loop as above. Mark any overlapped gaps as HUGE_VALF.
for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
while (Uses[Gap+1].getBoundaryIndex() < I->start)
if (++Gap == NumGaps)
break;
if (Gap == NumGaps)
break;
for (; Gap != NumGaps; ++Gap) {
GapWeight[Gap] = llvm::huge_valf;
if (Uses[Gap+1].getBaseIndex() >= I->end)
break;
}
if (Gap == NumGaps)
break;
}
}
}
/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
/// basic block.
///
unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
// Note that it is possible to have an interval that is live-in or live-out
// while only covering a single block - A phi-def can use undef values from
// predecessors, and the block could be a single-block loop.
// We don't bother doing anything clever about such a case, we simply assume
// that the interval is continuous from FirstInstr to LastInstr. We should
// make sure that we don't do anything illegal to such an interval, though.
ArrayRef<SlotIndex> Uses = SA->getUseSlots();
if (Uses.size() <= 2)
return 0;
const unsigned NumGaps = Uses.size()-1;
DEBUG({
dbgs() << "tryLocalSplit: ";
for (unsigned i = 0, e = Uses.size(); i != e; ++i)
dbgs() << ' ' << Uses[i];
dbgs() << '\n';
});
// If VirtReg is live across any register mask operands, compute a list of
// gaps with register masks.
SmallVector<unsigned, 8> RegMaskGaps;
if (Matrix->checkRegMaskInterference(VirtReg)) {
// Get regmask slots for the whole block.
ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
DEBUG(dbgs() << RMS.size() << " regmasks in block:");
// Constrain to VirtReg's live range.
unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
Uses.front().getRegSlot()) - RMS.begin();
unsigned re = RMS.size();
for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
// Look for Uses[i] <= RMS <= Uses[i+1].
assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
continue;
// Skip a regmask on the same instruction as the last use. It doesn't
// overlap the live range.
if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
break;
DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
RegMaskGaps.push_back(i);
// Advance ri to the next gap. A regmask on one of the uses counts in
// both gaps.
while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
++ri;
}
DEBUG(dbgs() << '\n');
}
// Since we allow local split results to be split again, there is a risk of
// creating infinite loops. It is tempting to require that the new live
// ranges have less instructions than the original. That would guarantee
// convergence, but it is too strict. A live range with 3 instructions can be
// split 2+3 (including the COPY), and we want to allow that.
//
// Instead we use these rules:
//
// 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
// noop split, of course).
// 2. Require progress be made for ranges with getStage() == RS_Split2. All
// the new ranges must have fewer instructions than before the split.
// 3. New ranges with the same number of instructions are marked RS_Split2,
// smaller ranges are marked RS_New.
//
// These rules allow a 3 -> 2+3 split once, which we need. They also prevent
// excessive splitting and infinite loops.
//
bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
// Best split candidate.
unsigned BestBefore = NumGaps;
unsigned BestAfter = 0;
float BestDiff = 0;
const float blockFreq =
SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
(1.0f / MBFI->getEntryFreq());
SmallVector<float, 8> GapWeight;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
// Keep track of the largest spill weight that would need to be evicted in
// order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
calcGapWeights(PhysReg, GapWeight);
// Remove any gaps with regmask clobbers.
if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
GapWeight[RegMaskGaps[i]] = llvm::huge_valf;
// Try to find the best sequence of gaps to close.
// The new spill weight must be larger than any gap interference.
// We will split before Uses[SplitBefore] and after Uses[SplitAfter].
unsigned SplitBefore = 0, SplitAfter = 1;
// MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
// It is the spill weight that needs to be evicted.
float MaxGap = GapWeight[0];
for (;;) {
// Live before/after split?
const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
<< Uses[SplitBefore] << '-' << Uses[SplitAfter]
<< " i=" << MaxGap);
// Stop before the interval gets so big we wouldn't be making progress.
if (!LiveBefore && !LiveAfter) {
DEBUG(dbgs() << " all\n");
break;
}
// Should the interval be extended or shrunk?
bool Shrink = true;
// How many gaps would the new range have?
unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
// Legally, without causing looping?
bool Legal = !ProgressRequired || NewGaps < NumGaps;
if (Legal && MaxGap < llvm::huge_valf) {
// Estimate the new spill weight. Each instruction reads or writes the
// register. Conservatively assume there are no read-modify-write
// instructions.
//
// Try to guess the size of the new interval.
const float EstWeight = normalizeSpillWeight(
blockFreq * (NewGaps + 1),
Uses[SplitBefore].distance(Uses[SplitAfter]) +
(LiveBefore + LiveAfter) * SlotIndex::InstrDist,
1);
// Would this split be possible to allocate?
// Never allocate all gaps, we wouldn't be making progress.
DEBUG(dbgs() << " w=" << EstWeight);
if (EstWeight * Hysteresis >= MaxGap) {
Shrink = false;
float Diff = EstWeight - MaxGap;
if (Diff > BestDiff) {
DEBUG(dbgs() << " (best)");
BestDiff = Hysteresis * Diff;
BestBefore = SplitBefore;
BestAfter = SplitAfter;
}
}
}
// Try to shrink.
if (Shrink) {
if (++SplitBefore < SplitAfter) {
DEBUG(dbgs() << " shrink\n");
// Recompute the max when necessary.
if (GapWeight[SplitBefore - 1] >= MaxGap) {
MaxGap = GapWeight[SplitBefore];
for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
MaxGap = std::max(MaxGap, GapWeight[i]);
}
continue;
}
MaxGap = 0;
}
// Try to extend the interval.
if (SplitAfter >= NumGaps) {
DEBUG(dbgs() << " end\n");
break;
}
DEBUG(dbgs() << " extend\n");
MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
}
}
// Didn't find any candidates?
if (BestBefore == NumGaps)
return 0;
DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
<< '-' << Uses[BestAfter] << ", " << BestDiff
<< ", " << (BestAfter - BestBefore + 1) << " instrs\n");
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit);
SE->openIntv();
SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
SE->useIntv(SegStart, SegStop);
SmallVector<unsigned, 8> IntvMap;
SE->finish(&IntvMap);
DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
// If the new range has the same number of instructions as before, mark it as
// RS_Split2 so the next split will be forced to make progress. Otherwise,
// leave the new intervals as RS_New so they can compete.
bool LiveBefore = BestBefore != 0 || BI.LiveIn;
bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
if (NewGaps >= NumGaps) {
DEBUG(dbgs() << "Tagging non-progress ranges: ");
assert(!ProgressRequired && "Didn't make progress when it was required.");
for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
if (IntvMap[i] == 1) {
setStage(LIS->getInterval(LREdit.get(i)), RS_Split2);
DEBUG(dbgs() << PrintReg(LREdit.get(i)));
}
DEBUG(dbgs() << '\n');
}
++NumLocalSplits;
return 0;
}
//===----------------------------------------------------------------------===//
// Live Range Splitting
//===----------------------------------------------------------------------===//
/// trySplit - Try to split VirtReg or one of its interferences, making it
/// assignable.
/// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned>&NewVRegs) {
// Ranges must be Split2 or less.
if (getStage(VirtReg) >= RS_Spill)
return 0;
// Local intervals are handled separately.
if (LIS->intervalIsInOneMBB(VirtReg)) {
NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
SA->analyze(&VirtReg);
unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
if (PhysReg || !NewVRegs.empty())
return PhysReg;
return tryInstructionSplit(VirtReg, Order, NewVRegs);
}
NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
SA->analyze(&VirtReg);
// FIXME: SplitAnalysis may repair broken live ranges coming from the
// coalescer. That may cause the range to become allocatable which means that
// tryRegionSplit won't be making progress. This check should be replaced with
// an assertion when the coalescer is fixed.
if (SA->didRepairRange()) {
// VirtReg has changed, so all cached queries are invalid.
Matrix->invalidateVirtRegs();
if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
return PhysReg;
}
// First try to split around a region spanning multiple blocks. RS_Split2
// ranges already made dubious progress with region splitting, so they go
// straight to single block splitting.
if (getStage(VirtReg) < RS_Split2) {
unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
if (PhysReg || !NewVRegs.empty())
return PhysReg;
}
// Then isolate blocks.
return tryBlockSplit(VirtReg, Order, NewVRegs);
}
//===----------------------------------------------------------------------===//
// Last Chance Recoloring
//===----------------------------------------------------------------------===//
/// mayRecolorAllInterferences - Check if the virtual registers that
/// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
/// recolored to free \p PhysReg.
/// When true is returned, \p RecoloringCandidates has been augmented with all
/// the live intervals that need to be recolored in order to free \p PhysReg
/// for \p VirtReg.
/// \p FixedRegisters contains all the virtual registers that cannot be
/// recolored.
bool
RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
SmallLISet &RecoloringCandidates,
const SmallVirtRegSet &FixedRegisters) {
const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
// If there is LastChanceRecoloringMaxInterference or more interferences,
// chances are one would not be recolorable.
if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
LastChanceRecoloringMaxInterference && !ExhaustiveSearch) {
DEBUG(dbgs() << "Early abort: too many interferences.\n");
CutOffInfo |= CO_Interf;
return false;
}
for (unsigned i = Q.interferingVRegs().size(); i; --i) {
LiveInterval *Intf = Q.interferingVRegs()[i - 1];
// If Intf is done and sit on the same register class as VirtReg,
// it would not be recolorable as it is in the same state as VirtReg.
if ((getStage(*Intf) == RS_Done &&
MRI->getRegClass(Intf->reg) == CurRC) ||
FixedRegisters.count(Intf->reg)) {
DEBUG(dbgs() << "Early abort: the inteference is not recolorable.\n");
return false;
}
RecoloringCandidates.insert(Intf);
}
}
return true;
}
/// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
/// its interferences.
/// Last chance recoloring chooses a color for \p VirtReg and recolors every
/// virtual register that was using it. The recoloring process may recursively
/// use the last chance recoloring. Therefore, when a virtual register has been
/// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
/// be last-chance-recolored again during this recoloring "session".
/// E.g.,
/// Let
/// vA can use {R1, R2 }
/// vB can use { R2, R3}
/// vC can use {R1 }
/// Where vA, vB, and vC cannot be split anymore (they are reloads for
/// instance) and they all interfere.
///
/// vA is assigned R1
/// vB is assigned R2
/// vC tries to evict vA but vA is already done.
/// Regular register allocation fails.
///
/// Last chance recoloring kicks in:
/// vC does as if vA was evicted => vC uses R1.
/// vC is marked as fixed.
/// vA needs to find a color.
/// None are available.
/// vA cannot evict vC: vC is a fixed virtual register now.
/// vA does as if vB was evicted => vA uses R2.
/// vB needs to find a color.
/// R3 is available.
/// Recoloring => vC = R1, vA = R2, vB = R3
///
/// \p Order defines the preferred allocation order for \p VirtReg.
/// \p NewRegs will contain any new virtual register that have been created
/// (split, spill) during the process and that must be assigned.
/// \p FixedRegisters contains all the virtual registers that cannot be
/// recolored.
/// \p Depth gives the current depth of the last chance recoloring.
/// \return a physical register that can be used for VirtReg or ~0u if none
/// exists.
unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVirtRegSet &FixedRegisters,
unsigned Depth) {
DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
// Ranges must be Done.
assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
"Last chance recoloring should really be last chance");
// Set the max depth to LastChanceRecoloringMaxDepth.
// We may want to reconsider that if we end up with a too large search space
// for target with hundreds of registers.
// Indeed, in that case we may want to cut the search space earlier.
if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
DEBUG(dbgs() << "Abort because max depth has been reached.\n");
CutOffInfo |= CO_Depth;
return ~0u;
}
// Set of Live intervals that will need to be recolored.
SmallLISet RecoloringCandidates;
// Record the original mapping virtual register to physical register in case
// the recoloring fails.
DenseMap<unsigned, unsigned> VirtRegToPhysReg;
// Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
// this recoloring "session".
FixedRegisters.insert(VirtReg.reg);
Order.rewind();
while (unsigned PhysReg = Order.next()) {
DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
<< PrintReg(PhysReg, TRI) << '\n');
RecoloringCandidates.clear();
VirtRegToPhysReg.clear();
// It is only possible to recolor virtual register interference.
if (Matrix->checkInterference(VirtReg, PhysReg) >
LiveRegMatrix::IK_VirtReg) {
DEBUG(dbgs() << "Some inteferences are not with virtual registers.\n");
continue;
}
// Early give up on this PhysReg if it is obvious we cannot recolor all
// the interferences.
if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
FixedRegisters)) {
DEBUG(dbgs() << "Some inteferences cannot be recolored.\n");
continue;
}
// RecoloringCandidates contains all the virtual registers that interfer
// with VirtReg on PhysReg (or one of its aliases).
// Enqueue them for recoloring and perform the actual recoloring.
PQueue RecoloringQueue;
for (SmallLISet::iterator It = RecoloringCandidates.begin(),
EndIt = RecoloringCandidates.end();
It != EndIt; ++It) {
unsigned ItVirtReg = (*It)->reg;
enqueue(RecoloringQueue, *It);
assert(VRM->hasPhys(ItVirtReg) &&
"Interferences are supposed to be with allocated vairables");
// Record the current allocation.
VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
// unset the related struct.
Matrix->unassign(**It);
}
// Do as if VirtReg was assigned to PhysReg so that the underlying
// recoloring has the right information about the interferes and
// available colors.
Matrix->assign(VirtReg, PhysReg);
// Save the current recoloring state.
// If we cannot recolor all the interferences, we will have to start again
// at this point for the next physical register.
SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
if (tryRecoloringCandidates(RecoloringQueue, NewVRegs, FixedRegisters,
Depth)) {
// Do not mess up with the global assignment process.
// I.e., VirtReg must be unassigned.
Matrix->unassign(VirtReg);
return PhysReg;
}
DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
<< PrintReg(PhysReg, TRI) << '\n');
// The recoloring attempt failed, undo the changes.
FixedRegisters = SaveFixedRegisters;
Matrix->unassign(VirtReg);
for (SmallLISet::iterator It = RecoloringCandidates.begin(),
EndIt = RecoloringCandidates.end();
It != EndIt; ++It) {
unsigned ItVirtReg = (*It)->reg;
if (VRM->hasPhys(ItVirtReg))
Matrix->unassign(**It);
unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg];
Matrix->assign(**It, ItPhysReg);
}
}
// Last chance recoloring did not worked either, give up.
return ~0u;
}
/// tryRecoloringCandidates - Try to assign a new color to every register
/// in \RecoloringQueue.
/// \p NewRegs will contain any new virtual register created during the
/// recoloring process.
/// \p FixedRegisters[in/out] contains all the registers that have been
/// recolored.
/// \return true if all virtual registers in RecoloringQueue were successfully
/// recolored, false otherwise.
bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVirtRegSet &FixedRegisters,
unsigned Depth) {
while (!RecoloringQueue.empty()) {
LiveInterval *LI = dequeue(RecoloringQueue);
DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
unsigned PhysReg;
PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
if (PhysReg == ~0u || !PhysReg)
return false;
DEBUG(dbgs() << "Recoloring of " << *LI
<< " succeeded with: " << PrintReg(PhysReg, TRI) << '\n');
Matrix->assign(*LI, PhysReg);
FixedRegisters.insert(LI->reg);
}
return true;
}
//===----------------------------------------------------------------------===//
// Main Entry Point
//===----------------------------------------------------------------------===//
unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &NewVRegs) {
CutOffInfo = CO_None;
LLVMContext &Ctx = MF->getFunction()->getContext();
SmallVirtRegSet FixedRegisters;
unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
if (Reg == ~0U && (CutOffInfo != CO_None)) {
uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
if (CutOffEncountered == CO_Depth)
Ctx.emitError("register allocation failed: maximum depth for recoloring "
"reached. Use -fexhaustive-register-search to skip "
"cutoffs");
else if (CutOffEncountered == CO_Interf)
Ctx.emitError("register allocation failed: maximum interference for "
"recoloring reached. Use -fexhaustive-register-search "
"to skip cutoffs");
else if (CutOffEncountered == (CO_Depth | CO_Interf))
Ctx.emitError("register allocation failed: maximum interference and "
"depth for recoloring reached. Use "
"-fexhaustive-register-search to skip cutoffs");
}
return Reg;
}
/// Using a CSR for the first time has a cost because it causes push|pop
/// to be added to prologue|epilogue. Splitting a cold section of the live
/// range can have lower cost than using the CSR for the first time;
/// Spilling a live range in the cold path can have lower cost than using
/// the CSR for the first time. Returns the physical register if we decide
/// to use the CSR; otherwise return 0.
unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg,
AllocationOrder &Order,
unsigned PhysReg,
unsigned &CostPerUseLimit,
SmallVectorImpl<unsigned> &NewVRegs) {
if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
// We choose spill over using the CSR for the first time if the spill cost
// is lower than CSRCost.
SA->analyze(&VirtReg);
if (calcSpillCost() >= CSRCost)
return PhysReg;
// We are going to spill, set CostPerUseLimit to 1 to make sure that
// we will not use a callee-saved register in tryEvict.
CostPerUseLimit = 1;
return 0;
}
if (getStage(VirtReg) < RS_Split) {
// We choose pre-splitting over using the CSR for the first time if
// the cost of splitting is lower than CSRCost.
SA->analyze(&VirtReg);
unsigned NumCands = 0;
BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
NumCands, true /*IgnoreCSR*/);
if (BestCand == NoCand)
// Use the CSR if we can't find a region split below CSRCost.
return PhysReg;
// Perform the actual pre-splitting.
doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
return 0;
}
return PhysReg;
}
void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) {
// Do not keep invalid information around.
SetOfBrokenHints.remove(&LI);
}
void RAGreedy::initializeCSRCost() {
// We use the larger one out of the command-line option and the value report
// by TRI.
CSRCost = BlockFrequency(
std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
if (!CSRCost.getFrequency())
return;
// Raw cost is relative to Entry == 2^14; scale it appropriately.
uint64_t ActualEntry = MBFI->getEntryFreq();
if (!ActualEntry) {
CSRCost = 0;
return;
}
uint64_t FixedEntry = 1 << 14;
if (ActualEntry < FixedEntry)
CSRCost *= BranchProbability(ActualEntry, FixedEntry);
else if (ActualEntry <= UINT32_MAX)
// Invert the fraction and divide.
CSRCost /= BranchProbability(FixedEntry, ActualEntry);
else
// Can't use BranchProbability in general, since it takes 32-bit numbers.
CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
}
/// \brief Collect the hint info for \p Reg.
/// The results are stored into \p Out.
/// \p Out is not cleared before being populated.
void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) {
for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) {
if (!Instr.isFullCopy())
continue;
// Look for the other end of the copy.
unsigned OtherReg = Instr.getOperand(0).getReg();
if (OtherReg == Reg) {
OtherReg = Instr.getOperand(1).getReg();
if (OtherReg == Reg)
continue;
}
// Get the current assignment.
unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
? OtherReg
: VRM->getPhys(OtherReg);
// Push the collected information.
Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
OtherPhysReg));
}
}
/// \brief Using the given \p List, compute the cost of the broken hints if
/// \p PhysReg was used.
/// \return The cost of \p List for \p PhysReg.
BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List,
unsigned PhysReg) {
BlockFrequency Cost = 0;
for (const HintInfo &Info : List) {
if (Info.PhysReg != PhysReg)
Cost += Info.Freq;
}
return Cost;
}
/// \brief Using the register assigned to \p VirtReg, try to recolor
/// all the live ranges that are copy-related with \p VirtReg.
/// The recoloring is then propagated to all the live-ranges that have
/// been recolored and so on, until no more copies can be coalesced or
/// it is not profitable.
/// For a given live range, profitability is determined by the sum of the
/// frequencies of the non-identity copies it would introduce with the old
/// and new register.
void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
// We have a broken hint, check if it is possible to fix it by
// reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
// some register and PhysReg may be available for the other live-ranges.
SmallSet<unsigned, 4> Visited;
SmallVector<unsigned, 2> RecoloringCandidates;
HintsInfo Info;
unsigned Reg = VirtReg.reg;
unsigned PhysReg = VRM->getPhys(Reg);
// Start the recoloring algorithm from the input live-interval, then
// it will propagate to the ones that are copy-related with it.
Visited.insert(Reg);
RecoloringCandidates.push_back(Reg);
DEBUG(dbgs() << "Trying to reconcile hints for: " << PrintReg(Reg, TRI) << '('
<< PrintReg(PhysReg, TRI) << ")\n");
do {
Reg = RecoloringCandidates.pop_back_val();
// We cannot recolor physcal register.
if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
assert(VRM->hasPhys(Reg) && "We have unallocated variable!!");
// Get the live interval mapped with this virtual register to be able
// to check for the interference with the new color.
LiveInterval &LI = LIS->getInterval(Reg);
unsigned CurrPhys = VRM->getPhys(Reg);
// Check that the new color matches the register class constraints and
// that it is free for this live range.
if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||
Matrix->checkInterference(LI, PhysReg)))
continue;
DEBUG(dbgs() << PrintReg(Reg, TRI) << '(' << PrintReg(CurrPhys, TRI)
<< ") is recolorable.\n");
// Gather the hint info.
Info.clear();
collectHintInfo(Reg, Info);
// Check if recoloring the live-range will increase the cost of the
// non-identity copies.
if (CurrPhys != PhysReg) {
DEBUG(dbgs() << "Checking profitability:\n");
BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
<< "\nNew Cost: " << NewCopiesCost.getFrequency() << '\n');
if (OldCopiesCost < NewCopiesCost) {
DEBUG(dbgs() << "=> Not profitable.\n");
continue;
}
// At this point, the cost is either cheaper or equal. If it is
// equal, we consider this is profitable because it may expose
// more recoloring opportunities.
DEBUG(dbgs() << "=> Profitable.\n");
// Recolor the live-range.
Matrix->unassign(LI);
Matrix->assign(LI, PhysReg);
}
// Push all copy-related live-ranges to keep reconciling the broken
// hints.
for (const HintInfo &HI : Info) {
if (Visited.insert(HI.Reg).second)
RecoloringCandidates.push_back(HI.Reg);
}
} while (!RecoloringCandidates.empty());
}
/// \brief Try to recolor broken hints.
/// Broken hints may be repaired by recoloring when an evicted variable
/// freed up a register for a larger live-range.
/// Consider the following example:
/// BB1:
/// a =
/// b =
/// BB2:
/// ...
/// = b
/// = a
/// Let us assume b gets split:
/// BB1:
/// a =
/// b =
/// BB2:
/// c = b
/// ...
/// d = c
/// = d
/// = a
/// Because of how the allocation work, b, c, and d may be assigned different
/// colors. Now, if a gets evicted later:
/// BB1:
/// a =
/// st a, SpillSlot
/// b =
/// BB2:
/// c = b
/// ...
/// d = c
/// = d
/// e = ld SpillSlot
/// = e
/// This is likely that we can assign the same register for b, c, and d,
/// getting rid of 2 copies.
void RAGreedy::tryHintsRecoloring() {
for (LiveInterval *LI : SetOfBrokenHints) {
assert(TargetRegisterInfo::isVirtualRegister(LI->reg) &&
"Recoloring is possible only for virtual registers");
// Some dead defs may be around (e.g., because of debug uses).
// Ignore those.
if (!VRM->hasPhys(LI->reg))
continue;
tryHintRecoloring(*LI);
}
}
unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVirtRegSet &FixedRegisters,
unsigned Depth) {
unsigned CostPerUseLimit = ~0u;
// First try assigning a free register.
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
// When NewVRegs is not empty, we may have made decisions such as evicting
// a virtual register, go with the earlier decisions and use the physical
// register.
if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) &&
NewVRegs.empty()) {
unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
CostPerUseLimit, NewVRegs);
if (CSRReg || !NewVRegs.empty())
// Return now if we decide to use a CSR or create new vregs due to
// pre-splitting.
return CSRReg;
} else
return PhysReg;
}
LiveRangeStage Stage = getStage(VirtReg);
DEBUG(dbgs() << StageName[Stage]
<< " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
// Try to evict a less worthy live range, but only for ranges from the primary
// queue. The RS_Split ranges already failed to do this, and they should not
// get a second chance until they have been split.
if (Stage != RS_Split)
if (unsigned PhysReg =
tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit)) {
unsigned Hint = MRI->getSimpleHint(VirtReg.reg);
// If VirtReg has a hint and that hint is broken record this
// virtual register as a recoloring candidate for broken hint.
// Indeed, since we evicted a variable in its neighborhood it is
// likely we can at least partially recolor some of the
// copy-related live-ranges.
if (Hint && Hint != PhysReg)
SetOfBrokenHints.insert(&VirtReg);
return PhysReg;
}
assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
// The first time we see a live range, don't try to split or spill.
// Wait until the second time, when all smaller ranges have been allocated.
// This gives a better picture of the interference to split around.
if (Stage < RS_Split) {
setStage(VirtReg, RS_Split);
DEBUG(dbgs() << "wait for second round\n");
NewVRegs.push_back(VirtReg.reg);
return 0;
}
// If we couldn't allocate a register from spilling, there is probably some
// invalid inline assembly. The base class wil report it.
if (Stage >= RS_Done || !VirtReg.isSpillable())
return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
Depth);
// Try splitting VirtReg or interferences.
unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
if (PhysReg || !NewVRegs.empty())
return PhysReg;
// Finally spill VirtReg itself.
NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
spiller().spill(LRE);
setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
if (VerifyEnabled)
MF->verify(this, "After spilling");
// The live virtual register requesting allocation was spilled, so tell
// the caller not to allocate anything during this round.
return 0;
}
bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
<< "********** Function: " << mf.getName() << '\n');
MF = &mf;
TRI = MF->getSubtarget().getRegisterInfo();
TII = MF->getSubtarget().getInstrInfo();
RCI.runOnMachineFunction(mf);
EnableLocalReassign = EnableLocalReassignment ||
MF->getSubtarget().enableRALocalReassignment(
MF->getTarget().getOptLevel());
if (VerifyEnabled)
MF->verify(this, "Before greedy register allocator");
RegAllocBase::init(getAnalysis<VirtRegMap>(),
getAnalysis<LiveIntervals>(),
getAnalysis<LiveRegMatrix>());
Indexes = &getAnalysis<SlotIndexes>();
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
DomTree = &getAnalysis<MachineDominatorTree>();
SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
Loops = &getAnalysis<MachineLoopInfo>();
Bundles = &getAnalysis<EdgeBundles>();
SpillPlacer = &getAnalysis<SpillPlacement>();
DebugVars = &getAnalysis<LiveDebugVariables>();
initializeCSRCost();
calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI);
DEBUG(LIS->dump());
SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI));
ExtraRegInfo.clear();
ExtraRegInfo.resize(MRI->getNumVirtRegs());
NextCascade = 1;
IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
GlobalCand.resize(32); // This will grow as needed.
SetOfBrokenHints.clear();
allocatePhysRegs();
tryHintsRecoloring();
releaseMemory();
return true;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/GCStrategy.cpp | //===-- GCStrategy.cpp - Garbage Collector Description --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the policy object GCStrategy which describes the
// behavior of a given garbage collector.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCStrategy.h"
using namespace llvm;
GCStrategy::GCStrategy()
: UseStatepoints(false), NeededSafePoints(0), CustomReadBarriers(false),
CustomWriteBarriers(false), CustomRoots(false), InitRoots(true),
UsesMetadata(false) {}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/AllocationOrder.h | //===-- llvm/CodeGen/AllocationOrder.h - Allocation Order -*- C++ -*-------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an allocation order for virtual registers.
//
// The preferred allocation order for a virtual register depends on allocation
// hints and target hooks. The AllocationOrder class encapsulates all of that.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ALLOCATIONORDER_H
#define LLVM_LIB_CODEGEN_ALLOCATIONORDER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/MC/MCRegisterInfo.h"
namespace llvm {
class RegisterClassInfo;
class VirtRegMap;
class LLVM_LIBRARY_VISIBILITY AllocationOrder {
SmallVector<MCPhysReg, 16> Hints;
ArrayRef<MCPhysReg> Order;
int Pos;
public:
/// Create a new AllocationOrder for VirtReg.
/// @param VirtReg Virtual register to allocate for.
/// @param VRM Virtual register map for function.
/// @param RegClassInfo Information about reserved and allocatable registers.
AllocationOrder(unsigned VirtReg,
const VirtRegMap &VRM,
const RegisterClassInfo &RegClassInfo);
/// Get the allocation order without reordered hints.
ArrayRef<MCPhysReg> getOrder() const { return Order; }
/// Return the next physical register in the allocation order, or 0.
/// It is safe to call next() again after it returned 0, it will keep
/// returning 0 until rewind() is called.
unsigned next(unsigned Limit = 0) {
if (Pos < 0)
return Hints.end()[Pos++];
if (!Limit)
Limit = Order.size();
while (Pos < int(Limit)) {
unsigned Reg = Order[Pos++];
if (!isHint(Reg))
return Reg;
}
return 0;
}
/// As next(), but allow duplicates to be returned, and stop before the
/// Limit'th register in the RegisterClassInfo allocation order.
///
/// This can produce more than Limit registers if there are hints.
unsigned nextWithDups(unsigned Limit) {
if (Pos < 0)
return Hints.end()[Pos++];
if (Pos < int(Limit))
return Order[Pos++];
return 0;
}
/// Start over from the beginning.
void rewind() { Pos = -int(Hints.size()); }
/// Return true if the last register returned from next() was a preferred register.
bool isHint() const { return Pos <= 0; }
/// Return true if PhysReg is a preferred register.
bool isHint(unsigned PhysReg) const {
return std::find(Hints.begin(), Hints.end(), PhysReg) != Hints.end();
}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegAllocBase.h | //===-- RegAllocBase.h - basic regalloc interface and driver --*- C++ -*---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RegAllocBase class, which is the skeleton of a basic
// register allocation algorithm and interface for extending it. It provides the
// building blocks on which to construct other experimental allocators and test
// the validity of two principles:
//
// - If virtual and physical register liveness is modeled using intervals, then
// on-the-fly interference checking is cheap. Furthermore, interferences can be
// lazily cached and reused.
//
// - Register allocation complexity, and generated code performance is
// determined by the effectiveness of live range splitting rather than optimal
// coloring.
//
// Following the first principle, interfering checking revolves around the
// LiveIntervalUnion data structure.
//
// To fulfill the second principle, the basic allocator provides a driver for
// incremental splitting. It essentially punts on the problem of register
// coloring, instead driving the assignment of virtual to physical registers by
// the cost of splitting. The basic allocator allows for heuristic reassignment
// of registers, if a more sophisticated allocator chooses to do that.
//
// This framework provides a way to engineer the compile time vs. code
// quality trade-off without relying on a particular theoretical solver.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_REGALLOCBASE_H
#define LLVM_LIB_CODEGEN_REGALLOCBASE_H
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
namespace llvm {
template<typename T> class SmallVectorImpl;
class TargetRegisterInfo;
class VirtRegMap;
class LiveIntervals;
class LiveRegMatrix;
class Spiller;
/// RegAllocBase provides the register allocation driver and interface that can
/// be extended to add interesting heuristics.
///
/// Register allocators must override the selectOrSplit() method to implement
/// live range splitting. They must also override enqueue/dequeue to provide an
/// assignment order.
class RegAllocBase {
virtual void anchor();
protected:
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
VirtRegMap *VRM;
LiveIntervals *LIS;
LiveRegMatrix *Matrix;
RegisterClassInfo RegClassInfo;
RegAllocBase()
: TRI(nullptr), MRI(nullptr), VRM(nullptr), LIS(nullptr), Matrix(nullptr) {}
virtual ~RegAllocBase() {}
// A RegAlloc pass should call this before allocatePhysRegs.
void init(VirtRegMap &vrm, LiveIntervals &lis, LiveRegMatrix &mat);
// The top-level driver. The output is a VirtRegMap that us updated with
// physical register assignments.
void allocatePhysRegs();
// Get a temporary reference to a Spiller instance.
virtual Spiller &spiller() = 0;
/// enqueue - Add VirtReg to the priority queue of unassigned registers.
virtual void enqueue(LiveInterval *LI) = 0;
/// dequeue - Return the next unassigned register, or NULL.
virtual LiveInterval *dequeue() = 0;
// A RegAlloc pass should override this to provide the allocation heuristics.
// Each call must guarantee forward progess by returning an available PhysReg
// or new set of split live virtual registers. It is up to the splitter to
// converge quickly toward fully spilled live ranges.
virtual unsigned selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &splitLVRs) = 0;
// Use this group name for NamedRegionTimer.
static const char TimerGroupName[];
/// Method called when the allocator is about to remove a LiveInterval.
virtual void aboutToRemoveInterval(LiveInterval &LI) {}
public:
/// VerifyEnabled - True when -verify-regalloc is given.
static bool VerifyEnabled;
private:
void seedLiveRegs();
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/GCRootLowering.cpp | //===-- GCRootLowering.cpp - Garbage collection infrastructure ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the lowering for the gc.root mechanism.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
/// LowerIntrinsics - This pass rewrites calls to the llvm.gcread or
/// llvm.gcwrite intrinsics, replacing them with simple loads and stores as
/// directed by the GCStrategy. It also performs automatic root initialization
/// and custom intrinsic lowering.
class LowerIntrinsics : public FunctionPass {
bool PerformDefaultLowering(Function &F, GCStrategy &Coll);
public:
static char ID;
LowerIntrinsics();
StringRef getPassName() const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool doInitialization(Module &M) override;
bool runOnFunction(Function &F) override;
};
/// GCMachineCodeAnalysis - This is a target-independent pass over the machine
/// function representation to identify safe points for the garbage collector
/// in the machine code. It inserts labels at safe points and populates a
/// GCMetadata record for each function.
class GCMachineCodeAnalysis : public MachineFunctionPass {
GCFunctionInfo *FI;
MachineModuleInfo *MMI;
const TargetInstrInfo *TII;
void FindSafePoints(MachineFunction &MF);
void VisitCallPoint(MachineBasicBlock::iterator MI);
MCSymbol *InsertLabel(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
DebugLoc DL) const;
void FindStackOffsets(MachineFunction &MF);
public:
static char ID;
GCMachineCodeAnalysis();
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction &MF) override;
};
}
// -----------------------------------------------------------------------------
INITIALIZE_PASS_BEGIN(LowerIntrinsics, "gc-lowering", "GC Lowering", false,
false)
INITIALIZE_PASS_DEPENDENCY(GCModuleInfo)
INITIALIZE_PASS_END(LowerIntrinsics, "gc-lowering", "GC Lowering", false, false)
FunctionPass *llvm::createGCLoweringPass() { return new LowerIntrinsics(); }
char LowerIntrinsics::ID = 0;
LowerIntrinsics::LowerIntrinsics() : FunctionPass(ID) {
initializeLowerIntrinsicsPass(*PassRegistry::getPassRegistry());
}
const char *LowerIntrinsics::getPassName() const {
return "Lower Garbage Collection Instructions";
}
void LowerIntrinsics::getAnalysisUsage(AnalysisUsage &AU) const {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<GCModuleInfo>();
AU.addPreserved<DominatorTreeWrapperPass>();
}
static bool NeedsDefaultLoweringPass(const GCStrategy &C) {
// Default lowering is necessary only if read or write barriers have a default
// action. The default for roots is no action.
return !C.customWriteBarrier() || !C.customReadBarrier() ||
C.initializeRoots();
}
/// doInitialization - If this module uses the GC intrinsics, find them now.
bool LowerIntrinsics::doInitialization(Module &M) {
GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
assert(MI && "LowerIntrinsics didn't require GCModuleInfo!?");
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (!I->isDeclaration() && I->hasGC())
MI->getFunctionInfo(*I); // Instantiate the GC strategy.
return false;
}
/// CouldBecomeSafePoint - Predicate to conservatively determine whether the
/// instruction could introduce a safe point.
static bool CouldBecomeSafePoint(Instruction *I) {
// The natural definition of instructions which could introduce safe points
// are:
//
// - call, invoke (AfterCall, BeforeCall)
// - phis (Loops)
// - invoke, ret, unwind (Exit)
//
// However, instructions as seemingly inoccuous as arithmetic can become
// libcalls upon lowering (e.g., div i64 on a 32-bit platform), so instead
// it is necessary to take a conservative approach.
if (isa<AllocaInst>(I) || isa<GetElementPtrInst>(I) || isa<StoreInst>(I) ||
isa<LoadInst>(I))
return false;
// llvm.gcroot is safe because it doesn't do anything at runtime.
if (CallInst *CI = dyn_cast<CallInst>(I))
if (Function *F = CI->getCalledFunction())
if (Intrinsic::ID IID = F->getIntrinsicID())
if (IID == Intrinsic::gcroot)
return false;
return true;
}
static bool InsertRootInitializers(Function &F, AllocaInst **Roots,
unsigned Count) {
// Scroll past alloca instructions.
BasicBlock::iterator IP = F.getEntryBlock().begin();
while (isa<AllocaInst>(IP))
++IP;
// Search for initializers in the initial BB.
SmallPtrSet<AllocaInst *, 16> InitedRoots;
for (; !CouldBecomeSafePoint(IP); ++IP)
if (StoreInst *SI = dyn_cast<StoreInst>(IP))
if (AllocaInst *AI =
dyn_cast<AllocaInst>(SI->getOperand(1)->stripPointerCasts()))
InitedRoots.insert(AI);
// Add root initializers.
bool MadeChange = false;
for (AllocaInst **I = Roots, **E = Roots + Count; I != E; ++I)
if (!InitedRoots.count(*I)) {
StoreInst *SI = new StoreInst(
ConstantPointerNull::get(cast<PointerType>(
cast<PointerType>((*I)->getType())->getElementType())),
*I);
SI->insertAfter(*I);
MadeChange = true;
}
return MadeChange;
}
/// runOnFunction - Replace gcread/gcwrite intrinsics with loads and stores.
/// Leave gcroot intrinsics; the code generator needs to see those.
bool LowerIntrinsics::runOnFunction(Function &F) {
// Quick exit for functions that do not use GC.
if (!F.hasGC())
return false;
GCFunctionInfo &FI = getAnalysis<GCModuleInfo>().getFunctionInfo(F);
GCStrategy &S = FI.getStrategy();
bool MadeChange = false;
if (NeedsDefaultLoweringPass(S))
MadeChange |= PerformDefaultLowering(F, S);
return MadeChange;
}
bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
bool LowerWr = !S.customWriteBarrier();
bool LowerRd = !S.customReadBarrier();
bool InitRoots = S.initializeRoots();
SmallVector<AllocaInst *, 32> Roots;
bool MadeChange = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++)) {
Function *F = CI->getCalledFunction();
switch (F->getIntrinsicID()) {
case Intrinsic::gcwrite:
if (LowerWr) {
// Replace a write barrier with a simple store.
Value *St =
new StoreInst(CI->getArgOperand(0), CI->getArgOperand(2), CI);
CI->replaceAllUsesWith(St);
CI->eraseFromParent();
}
break;
case Intrinsic::gcread:
if (LowerRd) {
// Replace a read barrier with a simple load.
Value *Ld = new LoadInst(CI->getArgOperand(1), "", CI);
Ld->takeName(CI);
CI->replaceAllUsesWith(Ld);
CI->eraseFromParent();
}
break;
case Intrinsic::gcroot:
if (InitRoots) {
// Initialize the GC root, but do not delete the intrinsic. The
// backend needs the intrinsic to flag the stack slot.
Roots.push_back(
cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
}
break;
default:
continue;
}
MadeChange = true;
}
}
}
if (Roots.size())
MadeChange |= InsertRootInitializers(F, Roots.begin(), Roots.size());
return MadeChange;
}
// -----------------------------------------------------------------------------
char GCMachineCodeAnalysis::ID = 0;
char &llvm::GCMachineCodeAnalysisID = GCMachineCodeAnalysis::ID;
INITIALIZE_PASS(GCMachineCodeAnalysis, "gc-analysis",
"Analyze Machine Code For Garbage Collection", false, false)
GCMachineCodeAnalysis::GCMachineCodeAnalysis() : MachineFunctionPass(ID) {}
void GCMachineCodeAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
AU.setPreservesAll();
AU.addRequired<MachineModuleInfo>();
AU.addRequired<GCModuleInfo>();
}
MCSymbol *GCMachineCodeAnalysis::InsertLabel(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
DebugLoc DL) const {
MCSymbol *Label = MBB.getParent()->getContext().createTempSymbol();
BuildMI(MBB, MI, DL, TII->get(TargetOpcode::GC_LABEL)).addSym(Label);
return Label;
}
void GCMachineCodeAnalysis::VisitCallPoint(MachineBasicBlock::iterator CI) {
// Find the return address (next instruction), too, so as to bracket the call
// instruction.
MachineBasicBlock::iterator RAI = CI;
++RAI;
if (FI->getStrategy().needsSafePoint(GC::PreCall)) {
MCSymbol *Label = InsertLabel(*CI->getParent(), CI, CI->getDebugLoc());
FI->addSafePoint(GC::PreCall, Label, CI->getDebugLoc());
}
if (FI->getStrategy().needsSafePoint(GC::PostCall)) {
MCSymbol *Label = InsertLabel(*CI->getParent(), RAI, CI->getDebugLoc());
FI->addSafePoint(GC::PostCall, Label, CI->getDebugLoc());
}
}
void GCMachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {
for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE;
++BBI)
for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end();
MI != ME; ++MI)
if (MI->isCall()) {
// Do not treat tail or sibling call sites as safe points. This is
// legal since any arguments passed to the callee which live in the
// remnants of the callers frame will be owned and updated by the
// callee if required.
if (MI->isTerminator())
continue;
VisitCallPoint(MI);
}
}
void GCMachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) {
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
assert(TFI && "TargetRegisterInfo not available!");
for (GCFunctionInfo::roots_iterator RI = FI->roots_begin();
RI != FI->roots_end();) {
// If the root references a dead object, no need to keep it.
if (MF.getFrameInfo()->isDeadObjectIndex(RI->Num)) {
RI = FI->removeStackRoot(RI);
} else {
RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num);
++RI;
}
}
}
bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
// Quick exit for functions that do not use GC.
if (!MF.getFunction()->hasGC())
return false;
FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(*MF.getFunction());
MMI = &getAnalysis<MachineModuleInfo>();
TII = MF.getSubtarget().getInstrInfo();
// Find the size of the stack frame. There may be no correct static frame
// size, we use UINT64_MAX to represent this.
const MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const bool DynamicFrameSize = MFI->hasVarSizedObjects() ||
RegInfo->needsStackRealignment(MF);
FI->setFrameSize(DynamicFrameSize ? UINT64_MAX : MFI->getStackSize());
// Find all safe points.
if (FI->getStrategy().needsSafePoints())
FindSafePoints(MF);
// Find the concrete stack offsets for all roots (stack slots)
FindStackOffsets(MF);
return false;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/CodeGen.cpp | //===-- CodeGen.cpp -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the common initialization routines for the
// CodeGen library.
//
//===----------------------------------------------------------------------===//
#include "llvm/InitializePasses.h"
#include "llvm-c/Initialization.h"
#include "llvm/PassRegistry.h"
using namespace llvm;
/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeAtomicExpandPass(Registry);
initializeBranchFolderPassPass(Registry);
initializeCodeGenPreparePass(Registry);
initializeDeadMachineInstructionElimPass(Registry);
initializeDwarfEHPreparePass(Registry);
initializeEarlyIfConverterPass(Registry);
initializeExpandISelPseudosPass(Registry);
initializeExpandPostRAPass(Registry);
initializeFinalizeMachineBundlesPass(Registry);
initializeGCMachineCodeAnalysisPass(Registry);
initializeGCModuleInfoPass(Registry);
initializeIfConverterPass(Registry);
initializeLiveDebugVariablesPass(Registry);
initializeLiveIntervalsPass(Registry);
initializeLiveStacksPass(Registry);
initializeLiveVariablesPass(Registry);
initializeLocalStackSlotPassPass(Registry);
initializeLowerIntrinsicsPass(Registry);
initializeMachineBlockFrequencyInfoPass(Registry);
initializeMachineBlockPlacementPass(Registry);
initializeMachineBlockPlacementStatsPass(Registry);
initializeMachineCSEPass(Registry);
initializeImplicitNullChecksPass(Registry);
initializeMachineCombinerPass(Registry);
initializeMachineCopyPropagationPass(Registry);
initializeMachineDominatorTreePass(Registry);
initializeMachineFunctionPrinterPassPass(Registry);
initializeMachineLICMPass(Registry);
initializeMachineLoopInfoPass(Registry);
initializeMachineModuleInfoPass(Registry);
initializeMachinePostDominatorTreePass(Registry);
initializeMachineSchedulerPass(Registry);
initializeMachineSinkingPass(Registry);
initializeMachineVerifierPassPass(Registry);
initializeOptimizePHIsPass(Registry);
initializePEIPass(Registry);
initializePHIEliminationPass(Registry);
initializePeepholeOptimizerPass(Registry);
initializePostMachineSchedulerPass(Registry);
initializePostRASchedulerPass(Registry);
initializeProcessImplicitDefsPass(Registry);
initializeRegisterCoalescerPass(Registry);
initializeShrinkWrapPass(Registry);
initializeSlotIndexesPass(Registry);
initializeStackColoringPass(Registry);
initializeStackMapLivenessPass(Registry);
initializeStackProtectorPass(Registry);
initializeStackSlotColoringPass(Registry);
initializeTailDuplicatePassPass(Registry);
initializeTargetPassConfigPass(Registry);
initializeTwoAddressInstructionPassPass(Registry);
initializeUnpackMachineBundlesPass(Registry);
initializeUnreachableBlockElimPass(Registry);
initializeUnreachableMachineBlockElimPass(Registry);
initializeVirtRegMapPass(Registry);
initializeVirtRegRewriterPass(Registry);
initializeWinEHPreparePass(Registry);
}
void LLVMInitializeCodeGen(LLVMPassRegistryRef R) {
initializeCodeGen(*unwrap(R));
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/StackMaps.cpp | //===---------------------------- StackMaps.cpp ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <iterator>
using namespace llvm;
#define DEBUG_TYPE "stackmaps"
static cl::opt<int> StackMapVersion(
"stackmap-version", cl::init(1),
cl::desc("Specify the stackmap encoding version (default = 1)"));
const char *StackMaps::WSMP = "Stack Maps: ";
PatchPointOpers::PatchPointOpers(const MachineInstr *MI)
: MI(MI), HasDef(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
!MI->getOperand(0).isImplicit()),
IsAnyReg(MI->getOperand(getMetaIdx(CCPos)).getImm() ==
CallingConv::AnyReg) {
#ifndef NDEBUG
unsigned CheckStartIdx = 0, e = MI->getNumOperands();
while (CheckStartIdx < e && MI->getOperand(CheckStartIdx).isReg() &&
MI->getOperand(CheckStartIdx).isDef() &&
!MI->getOperand(CheckStartIdx).isImplicit())
++CheckStartIdx;
assert(getMetaIdx() == CheckStartIdx &&
"Unexpected additional definition in Patchpoint intrinsic.");
#endif
}
unsigned PatchPointOpers::getNextScratchIdx(unsigned StartIdx) const {
if (!StartIdx)
StartIdx = getVarIdx();
// Find the next scratch register (implicit def and early clobber)
unsigned ScratchIdx = StartIdx, e = MI->getNumOperands();
while (ScratchIdx < e &&
!(MI->getOperand(ScratchIdx).isReg() &&
MI->getOperand(ScratchIdx).isDef() &&
MI->getOperand(ScratchIdx).isImplicit() &&
MI->getOperand(ScratchIdx).isEarlyClobber()))
++ScratchIdx;
assert(ScratchIdx != e && "No scratch register available");
return ScratchIdx;
}
StackMaps::StackMaps(AsmPrinter &AP) : AP(AP) {
if (StackMapVersion != 1)
llvm_unreachable("Unsupported stackmap version!");
}
/// Go up the super-register chain until we hit a valid dwarf register number.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI) {
int RegNum = TRI->getDwarfRegNum(Reg, false);
for (MCSuperRegIterator SR(Reg, TRI); SR.isValid() && RegNum < 0; ++SR)
RegNum = TRI->getDwarfRegNum(*SR, false);
assert(RegNum >= 0 && "Invalid Dwarf register number.");
return (unsigned)RegNum;
}
MachineInstr::const_mop_iterator
StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
LiveOutVec &LiveOuts) const {
const TargetRegisterInfo *TRI = AP.MF->getSubtarget().getRegisterInfo();
if (MOI->isImm()) {
switch (MOI->getImm()) {
default:
llvm_unreachable("Unrecognized operand type.");
case StackMaps::DirectMemRefOp: {
unsigned Size = AP.TM.getDataLayout()->getPointerSizeInBits();
assert((Size % 8) == 0 && "Need pointer size in bytes.");
Size /= 8;
unsigned Reg = (++MOI)->getReg();
int64_t Imm = (++MOI)->getImm();
Locs.emplace_back(StackMaps::Location::Direct, Size,
getDwarfRegNum(Reg, TRI), Imm);
break;
}
case StackMaps::IndirectMemRefOp: {
int64_t Size = (++MOI)->getImm();
assert(Size > 0 && "Need a valid size for indirect memory locations.");
unsigned Reg = (++MOI)->getReg();
int64_t Imm = (++MOI)->getImm();
Locs.emplace_back(StackMaps::Location::Indirect, Size,
getDwarfRegNum(Reg, TRI), Imm);
break;
}
case StackMaps::ConstantOp: {
++MOI;
assert(MOI->isImm() && "Expected constant operand.");
int64_t Imm = MOI->getImm();
Locs.emplace_back(Location::Constant, sizeof(int64_t), 0, Imm);
break;
}
}
return ++MOI;
}
// The physical register number will ultimately be encoded as a DWARF regno.
// The stack map also records the size of a spill slot that can hold the
// register content. (The runtime can track the actual size of the data type
// if it needs to.)
if (MOI->isReg()) {
// Skip implicit registers (this includes our scratch registers)
if (MOI->isImplicit())
return ++MOI;
assert(TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) &&
"Virtreg operands should have been rewritten before now.");
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(MOI->getReg());
assert(!MOI->getSubReg() && "Physical subreg still around.");
unsigned Offset = 0;
unsigned DwarfRegNum = getDwarfRegNum(MOI->getReg(), TRI);
unsigned LLVMRegNum = TRI->getLLVMRegNum(DwarfRegNum, false);
unsigned SubRegIdx = TRI->getSubRegIndex(LLVMRegNum, MOI->getReg());
if (SubRegIdx)
Offset = TRI->getSubRegIdxOffset(SubRegIdx);
Locs.emplace_back(Location::Register, RC->getSize(), DwarfRegNum, Offset);
return ++MOI;
}
if (MOI->isRegLiveOut())
LiveOuts = parseRegisterLiveOutMask(MOI->getRegLiveOut());
return ++MOI;
}
void StackMaps::print(raw_ostream &OS) {
const TargetRegisterInfo *TRI =
AP.MF ? AP.MF->getSubtarget().getRegisterInfo() : nullptr;
OS << WSMP << "callsites:\n";
for (const auto &CSI : CSInfos) {
const LocationVec &CSLocs = CSI.Locations;
const LiveOutVec &LiveOuts = CSI.LiveOuts;
OS << WSMP << "callsite " << CSI.ID << "\n";
OS << WSMP << " has " << CSLocs.size() << " locations\n";
unsigned Idx = 0;
for (const auto &Loc : CSLocs) {
OS << WSMP << "\t\tLoc " << Idx << ": ";
switch (Loc.Type) {
case Location::Unprocessed:
OS << "<Unprocessed operand>";
break;
case Location::Register:
OS << "Register ";
if (TRI)
OS << TRI->getName(Loc.Reg);
else
OS << Loc.Reg;
break;
case Location::Direct:
OS << "Direct ";
if (TRI)
OS << TRI->getName(Loc.Reg);
else
OS << Loc.Reg;
if (Loc.Offset)
OS << " + " << Loc.Offset;
break;
case Location::Indirect:
OS << "Indirect ";
if (TRI)
OS << TRI->getName(Loc.Reg);
else
OS << Loc.Reg;
OS << "+" << Loc.Offset;
break;
case Location::Constant:
OS << "Constant " << Loc.Offset;
break;
case Location::ConstantIndex:
OS << "Constant Index " << Loc.Offset;
break;
}
OS << "\t[encoding: .byte " << Loc.Type << ", .byte " << Loc.Size
<< ", .short " << Loc.Reg << ", .int " << Loc.Offset << "]\n";
Idx++;
}
OS << WSMP << "\thas " << LiveOuts.size() << " live-out registers\n";
Idx = 0;
for (const auto &LO : LiveOuts) {
OS << WSMP << "\t\tLO " << Idx << ": ";
if (TRI)
OS << TRI->getName(LO.Reg);
else
OS << LO.Reg;
OS << "\t[encoding: .short " << LO.DwarfRegNum << ", .byte 0, .byte "
<< LO.Size << "]\n";
Idx++;
}
}
}
/// Create a live-out register record for the given register Reg.
StackMaps::LiveOutReg
StackMaps::createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI) const {
unsigned DwarfRegNum = getDwarfRegNum(Reg, TRI);
unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
return LiveOutReg(Reg, DwarfRegNum, Size);
}
/// Parse the register live-out mask and return a vector of live-out registers
/// that need to be recorded in the stackmap.
StackMaps::LiveOutVec
StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
assert(Mask && "No register mask specified");
const TargetRegisterInfo *TRI = AP.MF->getSubtarget().getRegisterInfo();
LiveOutVec LiveOuts;
// Create a LiveOutReg for each bit that is set in the register mask.
for (unsigned Reg = 0, NumRegs = TRI->getNumRegs(); Reg != NumRegs; ++Reg)
if ((Mask[Reg / 32] >> Reg % 32) & 1)
LiveOuts.push_back(createLiveOutReg(Reg, TRI));
// We don't need to keep track of a register if its super-register is already
// in the list. Merge entries that refer to the same dwarf register and use
// the maximum size that needs to be spilled.
std::sort(LiveOuts.begin(), LiveOuts.end(),
[](const LiveOutReg &LHS, const LiveOutReg &RHS) {
// Only sort by the dwarf register number.
return LHS.DwarfRegNum < RHS.DwarfRegNum;
});
for (auto I = LiveOuts.begin(), E = LiveOuts.end(); I != E; ++I) {
for (auto II = std::next(I); II != E; ++II) {
if (I->DwarfRegNum != II->DwarfRegNum) {
// Skip all the now invalid entries.
I = --II;
break;
}
I->Size = std::max(I->Size, II->Size);
if (TRI->isSuperRegister(I->Reg, II->Reg))
I->Reg = II->Reg;
II->Reg = 0; // mark for deletion.
}
}
LiveOuts.erase(
std::remove_if(LiveOuts.begin(), LiveOuts.end(),
[](const LiveOutReg &LO) { return LO.Reg == 0; }),
LiveOuts.end());
return LiveOuts;
}
void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
MachineInstr::const_mop_iterator MOI,
MachineInstr::const_mop_iterator MOE,
bool recordResult) {
MCContext &OutContext = AP.OutStreamer->getContext();
MCSymbol *MILabel = OutContext.createTempSymbol();
AP.OutStreamer->EmitLabel(MILabel);
LocationVec Locations;
LiveOutVec LiveOuts;
if (recordResult) {
assert(PatchPointOpers(&MI).hasDef() && "Stackmap has no return value.");
parseOperand(MI.operands_begin(), std::next(MI.operands_begin()), Locations,
LiveOuts);
}
// Parse operands.
while (MOI != MOE) {
MOI = parseOperand(MOI, MOE, Locations, LiveOuts);
}
// Move large constants into the constant pool.
for (auto &Loc : Locations) {
// Constants are encoded as sign-extended integers.
// -1 is directly encoded as .long 0xFFFFFFFF with no constant pool.
if (Loc.Type == Location::Constant && !isInt<32>(Loc.Offset)) {
Loc.Type = Location::ConstantIndex;
// ConstPool is intentionally a MapVector of 'uint64_t's (as
// opposed to 'int64_t's). We should never be in a situation
// where we have to insert either the tombstone or the empty
// keys into a map, and for a DenseMap<uint64_t, T> these are
// (uint64_t)0 and (uint64_t)-1. They can be and are
// represented using 32 bit integers.
assert((uint64_t)Loc.Offset != DenseMapInfo<uint64_t>::getEmptyKey() &&
(uint64_t)Loc.Offset !=
DenseMapInfo<uint64_t>::getTombstoneKey() &&
"empty and tombstone keys should fit in 32 bits!");
auto Result = ConstPool.insert(std::make_pair(Loc.Offset, Loc.Offset));
Loc.Offset = Result.first - ConstPool.begin();
}
}
// Create an expression to calculate the offset of the callsite from function
// entry.
const MCExpr *CSOffsetExpr = MCBinaryExpr::createSub(
MCSymbolRefExpr::create(MILabel, OutContext),
MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
CSInfos.emplace_back(CSOffsetExpr, ID, std::move(Locations),
std::move(LiveOuts));
// Record the stack size of the current function.
const MachineFrameInfo *MFI = AP.MF->getFrameInfo();
const TargetRegisterInfo *RegInfo = AP.MF->getSubtarget().getRegisterInfo();
bool HasDynamicFrameSize =
MFI->hasVarSizedObjects() || RegInfo->needsStackRealignment(*(AP.MF));
FnStackSize[AP.CurrentFnSym] =
HasDynamicFrameSize ? UINT64_MAX : MFI->getStackSize();
}
void StackMaps::recordStackMap(const MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::STACKMAP && "expected stackmap");
int64_t ID = MI.getOperand(0).getImm();
recordStackMapOpers(MI, ID, std::next(MI.operands_begin(), 2),
MI.operands_end());
}
void StackMaps::recordPatchPoint(const MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::PATCHPOINT && "expected patchpoint");
PatchPointOpers opers(&MI);
int64_t ID = opers.getMetaOper(PatchPointOpers::IDPos).getImm();
auto MOI = std::next(MI.operands_begin(), opers.getStackMapStartIdx());
recordStackMapOpers(MI, ID, MOI, MI.operands_end(),
opers.isAnyReg() && opers.hasDef());
#ifndef NDEBUG
// verify anyregcc
auto &Locations = CSInfos.back().Locations;
if (opers.isAnyReg()) {
unsigned NArgs = opers.getMetaOper(PatchPointOpers::NArgPos).getImm();
for (unsigned i = 0, e = (opers.hasDef() ? NArgs + 1 : NArgs); i != e; ++i)
assert(Locations[i].Type == Location::Register &&
"anyreg arg must be in reg.");
}
#endif
}
void StackMaps::recordStatepoint(const MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::STATEPOINT && "expected statepoint");
StatepointOpers opers(&MI);
// Record all the deopt and gc operands (they're contiguous and run from the
// initial index to the end of the operand list)
const unsigned StartIdx = opers.getVarIdx();
recordStackMapOpers(MI, opers.getID(), MI.operands_begin() + StartIdx,
MI.operands_end(), false);
}
/// Emit the stackmap header.
///
/// Header {
/// uint8 : Stack Map Version (currently 1)
/// uint8 : Reserved (expected to be 0)
/// uint16 : Reserved (expected to be 0)
/// }
/// uint32 : NumFunctions
/// uint32 : NumConstants
/// uint32 : NumRecords
void StackMaps::emitStackmapHeader(MCStreamer &OS) {
// Header.
OS.EmitIntValue(StackMapVersion, 1); // Version.
OS.EmitIntValue(0, 1); // Reserved.
OS.EmitIntValue(0, 2); // Reserved.
// Num functions.
DEBUG(dbgs() << WSMP << "#functions = " << FnStackSize.size() << '\n');
OS.EmitIntValue(FnStackSize.size(), 4);
// Num constants.
DEBUG(dbgs() << WSMP << "#constants = " << ConstPool.size() << '\n');
OS.EmitIntValue(ConstPool.size(), 4);
// Num callsites.
DEBUG(dbgs() << WSMP << "#callsites = " << CSInfos.size() << '\n');
OS.EmitIntValue(CSInfos.size(), 4);
}
/// Emit the function frame record for each function.
///
/// StkSizeRecord[NumFunctions] {
/// uint64 : Function Address
/// uint64 : Stack Size
/// }
void StackMaps::emitFunctionFrameRecords(MCStreamer &OS) {
// Function Frame records.
DEBUG(dbgs() << WSMP << "functions:\n");
for (auto const &FR : FnStackSize) {
DEBUG(dbgs() << WSMP << "function addr: " << FR.first
<< " frame size: " << FR.second);
OS.EmitSymbolValue(FR.first, 8);
OS.EmitIntValue(FR.second, 8);
}
}
/// Emit the constant pool.
///
/// int64 : Constants[NumConstants]
void StackMaps::emitConstantPoolEntries(MCStreamer &OS) {
// Constant pool entries.
DEBUG(dbgs() << WSMP << "constants:\n");
for (const auto &ConstEntry : ConstPool) {
DEBUG(dbgs() << WSMP << ConstEntry.second << '\n');
OS.EmitIntValue(ConstEntry.second, 8);
}
}
/// Emit the callsite info for each callsite.
///
/// StkMapRecord[NumRecords] {
/// uint64 : PatchPoint ID
/// uint32 : Instruction Offset
/// uint16 : Reserved (record flags)
/// uint16 : NumLocations
/// Location[NumLocations] {
/// uint8 : Register | Direct | Indirect | Constant | ConstantIndex
/// uint8 : Size in Bytes
/// uint16 : Dwarf RegNum
/// int32 : Offset
/// }
/// uint16 : Padding
/// uint16 : NumLiveOuts
/// LiveOuts[NumLiveOuts] {
/// uint16 : Dwarf RegNum
/// uint8 : Reserved
/// uint8 : Size in Bytes
/// }
/// uint32 : Padding (only if required to align to 8 byte)
/// }
///
/// Location Encoding, Type, Value:
/// 0x1, Register, Reg (value in register)
/// 0x2, Direct, Reg + Offset (frame index)
/// 0x3, Indirect, [Reg + Offset] (spilled value)
/// 0x4, Constant, Offset (small constant)
/// 0x5, ConstIndex, Constants[Offset] (large constant)
void StackMaps::emitCallsiteEntries(MCStreamer &OS) {
DEBUG(print(dbgs()));
// Callsite entries.
for (const auto &CSI : CSInfos) {
const LocationVec &CSLocs = CSI.Locations;
const LiveOutVec &LiveOuts = CSI.LiveOuts;
// Verify stack map entry. It's better to communicate a problem to the
// runtime than crash in case of in-process compilation. Currently, we do
// simple overflow checks, but we may eventually communicate other
// compilation errors this way.
if (CSLocs.size() > UINT16_MAX || LiveOuts.size() > UINT16_MAX) {
OS.EmitIntValue(UINT64_MAX, 8); // Invalid ID.
OS.EmitValue(CSI.CSOffsetExpr, 4);
OS.EmitIntValue(0, 2); // Reserved.
OS.EmitIntValue(0, 2); // 0 locations.
OS.EmitIntValue(0, 2); // padding.
OS.EmitIntValue(0, 2); // 0 live-out registers.
OS.EmitIntValue(0, 4); // padding.
continue;
}
OS.EmitIntValue(CSI.ID, 8);
OS.EmitValue(CSI.CSOffsetExpr, 4);
// Reserved for flags.
OS.EmitIntValue(0, 2);
OS.EmitIntValue(CSLocs.size(), 2);
for (const auto &Loc : CSLocs) {
OS.EmitIntValue(Loc.Type, 1);
OS.EmitIntValue(Loc.Size, 1);
OS.EmitIntValue(Loc.Reg, 2);
OS.EmitIntValue(Loc.Offset, 4);
}
// Num live-out registers and padding to align to 4 byte.
OS.EmitIntValue(0, 2);
OS.EmitIntValue(LiveOuts.size(), 2);
for (const auto &LO : LiveOuts) {
OS.EmitIntValue(LO.DwarfRegNum, 2);
OS.EmitIntValue(0, 1);
OS.EmitIntValue(LO.Size, 1);
}
// Emit alignment to 8 byte.
OS.EmitValueToAlignment(8);
}
}
/// Serialize the stackmap data.
void StackMaps::serializeToStackMapSection() {
(void)WSMP;
// Bail out if there's no stack map data.
assert((!CSInfos.empty() || (CSInfos.empty() && ConstPool.empty())) &&
"Expected empty constant pool too!");
assert((!CSInfos.empty() || (CSInfos.empty() && FnStackSize.empty())) &&
"Expected empty function record too!");
if (CSInfos.empty())
return;
MCContext &OutContext = AP.OutStreamer->getContext();
MCStreamer &OS = *AP.OutStreamer;
// Create the section.
MCSection *StackMapSection =
OutContext.getObjectFileInfo()->getStackMapSection();
OS.SwitchSection(StackMapSection);
// Emit a dummy symbol to force section inclusion.
OS.EmitLabel(OutContext.getOrCreateSymbol(Twine("__LLVM_StackMaps")));
// Serialize data.
DEBUG(dbgs() << "********** Stack Map Output **********\n");
emitStackmapHeader(OS);
emitFunctionFrameRecords(OS);
emitConstantPoolEntries(OS);
emitCallsiteEntries(OS);
OS.AddBlankLine();
// Clean up.
CSInfos.clear();
ConstPool.clear();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineDominators.cpp | //===- MachineDominators.cpp - Machine Dominator Calculation --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements simple dominator construction algorithms for finding
// forward dominators on machine functions.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/SmallBitVector.h"
using namespace llvm;
namespace llvm {
template class DomTreeNodeBase<MachineBasicBlock>;
template class DominatorTreeBase<MachineBasicBlock>;
}
char MachineDominatorTree::ID = 0;
INITIALIZE_PASS(MachineDominatorTree, "machinedomtree",
"MachineDominator Tree Construction", true, true)
char &llvm::MachineDominatorsID = MachineDominatorTree::ID;
void MachineDominatorTree::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool MachineDominatorTree::runOnMachineFunction(MachineFunction &F) {
CriticalEdgesToSplit.clear();
NewBBs.clear();
DT->recalculate(F);
return false;
}
MachineDominatorTree::MachineDominatorTree()
: MachineFunctionPass(ID) {
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
DT = new DominatorTreeBase<MachineBasicBlock>(false);
}
MachineDominatorTree::~MachineDominatorTree() {
delete DT;
}
void MachineDominatorTree::releaseMemory() {
DT->releaseMemory();
}
void MachineDominatorTree::print(raw_ostream &OS, const Module*) const {
DT->print(OS);
}
void MachineDominatorTree::applySplitCriticalEdges() const {
// Bail out early if there is nothing to do.
if (CriticalEdgesToSplit.empty())
return;
// For each element in CriticalEdgesToSplit, remember whether or not element
// is the new immediate domminator of its successor. The mapping is done by
// index, i.e., the information for the ith element of CriticalEdgesToSplit is
// the ith element of IsNewIDom.
SmallBitVector IsNewIDom(CriticalEdgesToSplit.size(), true);
size_t Idx = 0;
// Collect all the dominance properties info, before invalidating
// the underlying DT.
for (CriticalEdge &Edge : CriticalEdgesToSplit) {
// Update dominator information.
MachineBasicBlock *Succ = Edge.ToBB;
MachineDomTreeNode *SuccDTNode = DT->getNode(Succ);
for (MachineBasicBlock *PredBB : Succ->predecessors()) {
if (PredBB == Edge.NewBB)
continue;
// If we are in this situation:
// FromBB1 FromBB2
// + +
// + + + +
// + + + +
// ... Split1 Split2 ...
// + +
// + +
// +
// Succ
// Instead of checking the domiance property with Split2, we check it with
// FromBB2 since Split2 is still unknown of the underlying DT structure.
if (NewBBs.count(PredBB)) {
assert(PredBB->pred_size() == 1 && "A basic block resulting from a "
"critical edge split has more "
"than one predecessor!");
PredBB = *PredBB->pred_begin();
}
if (!DT->dominates(SuccDTNode, DT->getNode(PredBB))) {
IsNewIDom[Idx] = false;
break;
}
}
++Idx;
}
// Now, update DT with the collected dominance properties info.
Idx = 0;
for (CriticalEdge &Edge : CriticalEdgesToSplit) {
// We know FromBB dominates NewBB.
MachineDomTreeNode *NewDTNode = DT->addNewBlock(Edge.NewBB, Edge.FromBB);
// If all the other predecessors of "Succ" are dominated by "Succ" itself
// then the new block is the new immediate dominator of "Succ". Otherwise,
// the new block doesn't dominate anything.
if (IsNewIDom[Idx])
DT->changeImmediateDominator(DT->getNode(Edge.ToBB), NewDTNode);
++Idx;
}
NewBBs.clear();
CriticalEdgesToSplit.clear();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/ExpandPostRAPseudos.cpp | //===-- ExpandPostRAPseudos.cpp - Pseudo instruction expansion pass -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a pass that expands COPY and SUBREG_TO_REG pseudo
// instructions after register allocation.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "postrapseudos"
namespace {
struct ExpandPostRA : public MachineFunctionPass {
private:
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
public:
static char ID; // Pass identification, replacement for typeid
ExpandPostRA() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
MachineFunctionPass::getAnalysisUsage(AU);
}
/// runOnMachineFunction - pass entry point
bool runOnMachineFunction(MachineFunction&) override;
private:
bool LowerSubregToReg(MachineInstr *MI);
bool LowerCopy(MachineInstr *MI);
void TransferImplicitDefs(MachineInstr *MI);
};
} // end anonymous namespace
char ExpandPostRA::ID = 0;
char &llvm::ExpandPostRAPseudosID = ExpandPostRA::ID;
INITIALIZE_PASS(ExpandPostRA, "postrapseudos",
"Post-RA pseudo instruction expansion pass", false, false)
/// TransferImplicitDefs - MI is a pseudo-instruction, and the lowered
/// replacement instructions immediately precede it. Copy any implicit-def
/// operands from MI to the replacement instruction.
void
ExpandPostRA::TransferImplicitDefs(MachineInstr *MI) {
MachineBasicBlock::iterator CopyMI = MI;
--CopyMI;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isImplicit() || MO.isUse())
continue;
CopyMI->addOperand(MachineOperand::CreateReg(MO.getReg(), true, true));
}
}
bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
MachineBasicBlock *MBB = MI->getParent();
assert((MI->getOperand(0).isReg() && MI->getOperand(0).isDef()) &&
MI->getOperand(1).isImm() &&
(MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
MI->getOperand(3).isImm() && "Invalid subreg_to_reg");
unsigned DstReg = MI->getOperand(0).getReg();
unsigned InsReg = MI->getOperand(2).getReg();
assert(!MI->getOperand(2).getSubReg() && "SubIdx on physreg?");
unsigned SubIdx = MI->getOperand(3).getImm();
assert(SubIdx != 0 && "Invalid index for insert_subreg");
unsigned DstSubReg = TRI->getSubReg(DstReg, SubIdx);
assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&
"Insert destination must be in a physical register");
assert(TargetRegisterInfo::isPhysicalRegister(InsReg) &&
"Inserted value must be in a physical register");
DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
if (MI->allDefsAreDead()) {
MI->setDesc(TII->get(TargetOpcode::KILL));
DEBUG(dbgs() << "subreg: replaced by: " << *MI);
return true;
}
if (DstSubReg == InsReg) {
// No need to insert an identity copy instruction.
// Watch out for case like this:
// %RAX<def> = SUBREG_TO_REG 0, %EAX<kill>, 3
// We must leave %RAX live.
if (DstReg != InsReg) {
MI->setDesc(TII->get(TargetOpcode::KILL));
MI->RemoveOperand(3); // SubIdx
MI->RemoveOperand(1); // Imm
DEBUG(dbgs() << "subreg: replace by: " << *MI);
return true;
}
DEBUG(dbgs() << "subreg: eliminated!");
} else {
TII->copyPhysReg(*MBB, MI, MI->getDebugLoc(), DstSubReg, InsReg,
MI->getOperand(2).isKill());
// Implicitly define DstReg for subsequent uses.
MachineBasicBlock::iterator CopyMI = MI;
--CopyMI;
CopyMI->addRegisterDefined(DstReg);
DEBUG(dbgs() << "subreg: " << *CopyMI);
}
DEBUG(dbgs() << '\n');
MBB->erase(MI);
return true;
}
bool ExpandPostRA::LowerCopy(MachineInstr *MI) {
if (MI->allDefsAreDead()) {
DEBUG(dbgs() << "dead copy: " << *MI);
MI->setDesc(TII->get(TargetOpcode::KILL));
DEBUG(dbgs() << "replaced by: " << *MI);
return true;
}
MachineOperand &DstMO = MI->getOperand(0);
MachineOperand &SrcMO = MI->getOperand(1);
if (SrcMO.getReg() == DstMO.getReg()) {
DEBUG(dbgs() << "identity copy: " << *MI);
// No need to insert an identity copy instruction, but replace with a KILL
// if liveness is changed.
if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
// We must make sure the super-register gets killed. Replace the
// instruction with KILL.
MI->setDesc(TII->get(TargetOpcode::KILL));
DEBUG(dbgs() << "replaced by: " << *MI);
return true;
}
// Vanilla identity copy.
MI->eraseFromParent();
return true;
}
DEBUG(dbgs() << "real copy: " << *MI);
TII->copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(),
DstMO.getReg(), SrcMO.getReg(), SrcMO.isKill());
if (MI->getNumOperands() > 2)
TransferImplicitDefs(MI);
DEBUG({
MachineBasicBlock::iterator dMI = MI;
dbgs() << "replaced by: " << *(--dMI);
});
MI->eraseFromParent();
return true;
}
/// runOnMachineFunction - Reduce subregister inserts and extracts to register
/// copies.
///
bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Machine Function\n"
<< "********** EXPANDING POST-RA PSEUDO INSTRS **********\n"
<< "********** Function: " << MF.getName() << '\n');
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo();
bool MadeChange = false;
for (MachineFunction::iterator mbbi = MF.begin(), mbbe = MF.end();
mbbi != mbbe; ++mbbi) {
for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end();
mi != me;) {
MachineInstr *MI = mi;
// Advance iterator here because MI may be erased.
++mi;
// Only expand pseudos.
if (!MI->isPseudo())
continue;
// Give targets a chance to expand even standard pseudos.
if (TII->expandPostRAPseudo(MI)) {
MadeChange = true;
continue;
}
// Expand standard pseudos.
switch (MI->getOpcode()) {
case TargetOpcode::SUBREG_TO_REG:
MadeChange |= LowerSubregToReg(MI);
break;
case TargetOpcode::COPY:
MadeChange |= LowerCopy(MI);
break;
case TargetOpcode::DBG_VALUE:
continue;
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::EXTRACT_SUBREG:
llvm_unreachable("Sub-register pseudos should have been eliminated.");
}
}
}
return MadeChange;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/StatepointExampleGC.cpp | //===-- StatepointDefaultGC.cpp - The default statepoint GC strategy ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a GCStrategy which serves as an example for the usage
// of a statepoint based lowering strategy. This GCStrategy is intended to
// suitable as a default implementation usable with any collector which can
// consume the standard stackmap format generated by statepoints, uses the
// default addrespace to distinguish between gc managed and non-gc managed
// pointers, and has reasonable relocation semantics.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Value.h"
using namespace llvm;
namespace {
class StatepointGC : public GCStrategy {
public:
StatepointGC() {
UseStatepoints = true;
// These options are all gc.root specific, we specify them so that the
// gc.root lowering code doesn't run.
InitRoots = false;
NeededSafePoints = 0;
UsesMetadata = false;
CustomRoots = false;
}
Optional<bool> isGCManagedPointer(const Value *V) const override {
// Method is only valid on pointer typed values.
PointerType *PT = cast<PointerType>(V->getType());
// For the sake of this example GC, we arbitrarily pick addrspace(1) as our
// GC managed heap. We know that a pointer into this heap needs to be
// updated and that no other pointer does. Note that addrspace(1) is used
// only as an example, it has no special meaning, and is not reserved for
// GC usage.
return (1 == PT->getAddressSpace());
}
};
}
static GCRegistry::Add<StatepointGC> X("statepoint-example",
"an example strategy for statepoint");
namespace llvm {
void linkStatepointExampleGC() {}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/EdgeBundles.cpp | //===-------- EdgeBundles.cpp - Bundles of CFG edges ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides the implementation of the EdgeBundles analysis.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/EdgeBundles.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/GraphWriter.h"
using namespace llvm;
static cl::opt<bool>
ViewEdgeBundles("view-edge-bundles", cl::Hidden,
cl::desc("Pop up a window to show edge bundle graphs"));
char EdgeBundles::ID = 0;
INITIALIZE_PASS(EdgeBundles, "edge-bundles", "Bundle Machine CFG Edges",
/* cfg = */true, /* analysis = */ true)
char &llvm::EdgeBundlesID = EdgeBundles::ID;
void EdgeBundles::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool EdgeBundles::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
EC.clear();
EC.grow(2 * MF->getNumBlockIDs());
for (const auto &MBB : *MF) {
unsigned OutE = 2 * MBB.getNumber() + 1;
// Join the outgoing bundle with the ingoing bundles of all successors.
for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
SE = MBB.succ_end(); SI != SE; ++SI)
EC.join(OutE, 2 * (*SI)->getNumber());
}
EC.compress();
if (ViewEdgeBundles)
view();
// Compute the reverse mapping.
Blocks.clear();
Blocks.resize(getNumBundles());
for (unsigned i = 0, e = MF->getNumBlockIDs(); i != e; ++i) {
unsigned b0 = getBundle(i, 0);
unsigned b1 = getBundle(i, 1);
Blocks[b0].push_back(i);
if (b1 != b0)
Blocks[b1].push_back(i);
}
return false;
}
/// Specialize WriteGraph, the standard implementation won't work.
namespace llvm {
template<>
raw_ostream &WriteGraph<>(raw_ostream &O, const EdgeBundles &G,
bool ShortNames,
const Twine &Title) {
const MachineFunction *MF = G.getMachineFunction();
O << "digraph {\n";
for (const auto &MBB : *MF) {
unsigned BB = MBB.getNumber();
O << "\t\"BB#" << BB << "\" [ shape=box ]\n"
<< '\t' << G.getBundle(BB, false) << " -> \"BB#" << BB << "\"\n"
<< "\t\"BB#" << BB << "\" -> " << G.getBundle(BB, true) << '\n';
for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
SE = MBB.succ_end(); SI != SE; ++SI)
O << "\t\"BB#" << BB << "\" -> \"BB#" << (*SI)->getNumber()
<< "\" [ color=lightgray ]\n";
}
O << "}\n";
return O;
}
}
/// view - Visualize the annotated bipartite CFG with Graphviz.
void EdgeBundles::view() const {
ViewGraph(*this, "EdgeBundles");
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/RegisterClassInfo.cpp | //===-- RegisterClassInfo.cpp - Dynamic Register Class Info ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the RegisterClassInfo class which provides dynamic
// information about target register classes. Callee-saved vs. caller-saved and
// reserved registers depend on calling conventions and other dynamic
// information, so some things cannot be determined statically.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
static cl::opt<unsigned>
StressRA("stress-regalloc", cl::Hidden, cl::init(0), cl::value_desc("N"),
cl::desc("Limit all regclasses to N registers"));
RegisterClassInfo::RegisterClassInfo()
: Tag(0), MF(nullptr), TRI(nullptr), CalleeSaved(nullptr) {}
void RegisterClassInfo::runOnMachineFunction(const MachineFunction &mf) {
bool Update = false;
MF = &mf;
// Allocate new array the first time we see a new target.
if (MF->getSubtarget().getRegisterInfo() != TRI) {
TRI = MF->getSubtarget().getRegisterInfo();
RegClass.reset(new RCInfo[TRI->getNumRegClasses()]);
unsigned NumPSets = TRI->getNumRegPressureSets();
PSetLimits.reset(new unsigned[NumPSets]);
std::fill(&PSetLimits[0], &PSetLimits[NumPSets], 0);
Update = true;
}
// Does this MF have different CSRs?
assert(TRI && "no register info set");
const MCPhysReg *CSR = TRI->getCalleeSavedRegs(MF);
if (Update || CSR != CalleeSaved) {
// Build a CSRNum map. Every CSR alias gets an entry pointing to the last
// overlapping CSR.
CSRNum.clear();
CSRNum.resize(TRI->getNumRegs(), 0);
for (unsigned N = 0; unsigned Reg = CSR[N]; ++N)
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
CSRNum[*AI] = N + 1; // 0 means no CSR, 1 means CalleeSaved[0], ...
Update = true;
}
CalleeSaved = CSR;
// Different reserved registers?
const BitVector &RR = MF->getRegInfo().getReservedRegs();
if (Reserved.size() != RR.size() || RR != Reserved) {
Update = true;
Reserved = RR;
}
// Invalidate cached information from previous function.
if (Update)
++Tag;
}
/// compute - Compute the preferred allocation order for RC with reserved
/// registers filtered out. Volatile registers come first followed by CSR
/// aliases ordered according to the CSR order specified by the target.
void RegisterClassInfo::compute(const TargetRegisterClass *RC) const {
assert(RC && "no register class given");
RCInfo &RCI = RegClass[RC->getID()];
// Raw register count, including all reserved regs.
unsigned NumRegs = RC->getNumRegs();
if (!RCI.Order)
RCI.Order.reset(new MCPhysReg[NumRegs]);
unsigned N = 0;
SmallVector<MCPhysReg, 16> CSRAlias;
unsigned MinCost = 0xff;
unsigned LastCost = ~0u;
unsigned LastCostChange = 0;
// FIXME: Once targets reserve registers instead of removing them from the
// allocation order, we can simply use begin/end here.
ArrayRef<MCPhysReg> RawOrder = RC->getRawAllocationOrder(*MF);
for (unsigned i = 0; i != RawOrder.size(); ++i) {
unsigned PhysReg = RawOrder[i];
// Remove reserved registers from the allocation order.
if (Reserved.test(PhysReg))
continue;
unsigned Cost = TRI->getCostPerUse(PhysReg);
MinCost = std::min(MinCost, Cost);
if (CSRNum[PhysReg])
// PhysReg aliases a CSR, save it for later.
CSRAlias.push_back(PhysReg);
else {
if (Cost != LastCost)
LastCostChange = N;
RCI.Order[N++] = PhysReg;
LastCost = Cost;
}
}
RCI.NumRegs = N + CSRAlias.size();
assert (RCI.NumRegs <= NumRegs && "Allocation order larger than regclass");
// CSR aliases go after the volatile registers, preserve the target's order.
for (unsigned i = 0, e = CSRAlias.size(); i != e; ++i) {
unsigned PhysReg = CSRAlias[i];
unsigned Cost = TRI->getCostPerUse(PhysReg);
if (Cost != LastCost)
LastCostChange = N;
RCI.Order[N++] = PhysReg;
LastCost = Cost;
}
// Register allocator stress test. Clip register class to N registers.
if (StressRA && RCI.NumRegs > StressRA)
RCI.NumRegs = StressRA;
// Check if RC is a proper sub-class.
if (const TargetRegisterClass *Super =
TRI->getLargestLegalSuperClass(RC, *MF))
if (Super != RC && getNumAllocatableRegs(Super) > RCI.NumRegs)
RCI.ProperSubClass = true;
RCI.MinCost = uint8_t(MinCost);
RCI.LastCostChange = LastCostChange;
DEBUG({
dbgs() << "AllocationOrder(" << TRI->getRegClassName(RC) << ") = [";
for (unsigned I = 0; I != RCI.NumRegs; ++I)
dbgs() << ' ' << PrintReg(RCI.Order[I], TRI);
dbgs() << (RCI.ProperSubClass ? " ] (sub-class)\n" : " ]\n");
});
// RCI is now up-to-date.
RCI.Tag = Tag;
}
/// This is not accurate because two overlapping register sets may have some
/// nonoverlapping reserved registers. However, computing the allocation order
/// for all register classes would be too expensive.
unsigned RegisterClassInfo::computePSetLimit(unsigned Idx) const {
const TargetRegisterClass *RC = nullptr;
unsigned NumRCUnits = 0;
for (TargetRegisterInfo::regclass_iterator
RI = TRI->regclass_begin(), RE = TRI->regclass_end(); RI != RE; ++RI) {
const int *PSetID = TRI->getRegClassPressureSets(*RI);
for (; *PSetID != -1; ++PSetID) {
if ((unsigned)*PSetID == Idx)
break;
}
if (*PSetID == -1)
continue;
// Found a register class that counts against this pressure set.
// For efficiency, only compute the set order for the largest set.
unsigned NUnits = TRI->getRegClassWeight(*RI).WeightLimit;
if (!RC || NUnits > NumRCUnits) {
RC = *RI;
NumRCUnits = NUnits;
}
}
compute(RC);
unsigned NReserved = RC->getNumRegs() - getNumAllocatableRegs(RC);
return TRI->getRegPressureSetLimit(*MF, Idx) -
TRI->getRegClassWeight(RC).RegWeight * NReserved;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/MachineModuleInfoImpls.cpp | //===-- llvm/CodeGen/MachineModuleInfoImpls.cpp ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements object-file format specific implementations of
// MachineModuleInfoImpl.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/MC/MCSymbol.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
// MachineModuleInfoMachO
//===----------------------------------------------------------------------===//
// Out of line virtual method.
void MachineModuleInfoMachO::anchor() {}
void MachineModuleInfoELF::anchor() {}
// HLSL Change: changed calling convention to __cdecl
static int __cdecl SortSymbolPair(const void *LHS, const void *RHS) {
typedef std::pair<MCSymbol*, MachineModuleInfoImpl::StubValueTy> PairTy;
const MCSymbol *LHSS = ((const PairTy *)LHS)->first;
const MCSymbol *RHSS = ((const PairTy *)RHS)->first;
return LHSS->getName().compare(RHSS->getName());
}
MachineModuleInfoImpl::SymbolListTy MachineModuleInfoImpl::getSortedStubs(
DenseMap<MCSymbol *, MachineModuleInfoImpl::StubValueTy> &Map) {
MachineModuleInfoImpl::SymbolListTy List(Map.begin(), Map.end());
if (!List.empty())
qsort(&List[0], List.size(), sizeof(List[0]), SortSymbolPair);
Map.clear();
return List;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/LiveRegMatrix.cpp | //===-- LiveRegMatrix.cpp - Track register interference -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the LiveRegMatrix analysis pass.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveRegMatrix.h"
#include "RegisterCoalescer.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumAssigned , "Number of registers assigned");
STATISTIC(NumUnassigned , "Number of registers unassigned");
char LiveRegMatrix::ID = 0;
INITIALIZE_PASS_BEGIN(LiveRegMatrix, "liveregmatrix",
"Live Register Matrix", false, false)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
INITIALIZE_PASS_END(LiveRegMatrix, "liveregmatrix",
"Live Register Matrix", false, false)
LiveRegMatrix::LiveRegMatrix() : MachineFunctionPass(ID),
UserTag(0), RegMaskTag(0), RegMaskVirtReg(0) {}
void LiveRegMatrix::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<LiveIntervals>();
AU.addRequiredTransitive<VirtRegMap>();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool LiveRegMatrix::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
LIS = &getAnalysis<LiveIntervals>();
VRM = &getAnalysis<VirtRegMap>();
unsigned NumRegUnits = TRI->getNumRegUnits();
if (NumRegUnits != Matrix.size())
Queries.reset(new LiveIntervalUnion::Query[NumRegUnits]);
Matrix.init(LIUAlloc, NumRegUnits);
// Make sure no stale queries get reused.
invalidateVirtRegs();
return false;
}
void LiveRegMatrix::releaseMemory() {
for (unsigned i = 0, e = Matrix.size(); i != e; ++i) {
Matrix[i].clear();
// No need to clear Queries here, since LiveIntervalUnion::Query doesn't
// have anything important to clear and LiveRegMatrix's runOnFunction()
// does a std::unique_ptr::reset anyways.
}
}
template<typename Callable>
bool foreachUnit(const TargetRegisterInfo *TRI, LiveInterval &VRegInterval,
unsigned PhysReg, Callable Func) {
if (VRegInterval.hasSubRanges()) {
for (MCRegUnitMaskIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
unsigned Unit = (*Units).first;
unsigned Mask = (*Units).second;
for (LiveInterval::SubRange &S : VRegInterval.subranges()) {
if (S.LaneMask & Mask) {
if (Func(Unit, S))
return true;
break;
}
}
}
} else {
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
if (Func(*Units, VRegInterval))
return true;
}
}
return false;
}
void LiveRegMatrix::assign(LiveInterval &VirtReg, unsigned PhysReg) {
DEBUG(dbgs() << "assigning " << PrintReg(VirtReg.reg, TRI)
<< " to " << PrintReg(PhysReg, TRI) << ':');
assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
MRI->setPhysRegUsed(PhysReg);
foreachUnit(TRI, VirtReg, PhysReg, [&](unsigned Unit,
const LiveRange &Range) {
DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << ' ' << Range);
Matrix[Unit].unify(VirtReg, Range);
return false;
});
++NumAssigned;
DEBUG(dbgs() << '\n');
}
void LiveRegMatrix::unassign(LiveInterval &VirtReg) {
unsigned PhysReg = VRM->getPhys(VirtReg.reg);
DEBUG(dbgs() << "unassigning " << PrintReg(VirtReg.reg, TRI)
<< " from " << PrintReg(PhysReg, TRI) << ':');
VRM->clearVirt(VirtReg.reg);
foreachUnit(TRI, VirtReg, PhysReg, [&](unsigned Unit,
const LiveRange &Range) {
DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI));
Matrix[Unit].extract(VirtReg, Range);
return false;
});
++NumUnassigned;
DEBUG(dbgs() << '\n');
}
bool LiveRegMatrix::isPhysRegUsed(unsigned PhysReg) const {
for (MCRegUnitIterator Unit(PhysReg, TRI); Unit.isValid(); ++Unit) {
if (!Matrix[*Unit].empty())
return true;
}
return false;
}
bool LiveRegMatrix::checkRegMaskInterference(LiveInterval &VirtReg,
unsigned PhysReg) {
// Check if the cached information is valid.
// The same BitVector can be reused for all PhysRegs.
// We could cache multiple VirtRegs if it becomes necessary.
if (RegMaskVirtReg != VirtReg.reg || RegMaskTag != UserTag) {
RegMaskVirtReg = VirtReg.reg;
RegMaskTag = UserTag;
RegMaskUsable.clear();
LIS->checkRegMaskInterference(VirtReg, RegMaskUsable);
}
// The BitVector is indexed by PhysReg, not register unit.
// Regmask interference is more fine grained than regunits.
// For example, a Win64 call can clobber %ymm8 yet preserve %xmm8.
return !RegMaskUsable.empty() && (!PhysReg || !RegMaskUsable.test(PhysReg));
}
bool LiveRegMatrix::checkRegUnitInterference(LiveInterval &VirtReg,
unsigned PhysReg) {
if (VirtReg.empty())
return false;
CoalescerPair CP(VirtReg.reg, PhysReg, *TRI);
bool Result = foreachUnit(TRI, VirtReg, PhysReg, [&](unsigned Unit,
const LiveRange &Range) {
const LiveRange &UnitRange = LIS->getRegUnit(Unit);
return Range.overlaps(UnitRange, CP, *LIS->getSlotIndexes());
});
return Result;
}
LiveIntervalUnion::Query &LiveRegMatrix::query(LiveInterval &VirtReg,
unsigned RegUnit) {
LiveIntervalUnion::Query &Q = Queries[RegUnit];
Q.init(UserTag, &VirtReg, &Matrix[RegUnit]);
return Q;
}
LiveRegMatrix::InterferenceKind
LiveRegMatrix::checkInterference(LiveInterval &VirtReg, unsigned PhysReg) {
if (VirtReg.empty())
return IK_Free;
// Regmask interference is the fastest check.
if (checkRegMaskInterference(VirtReg, PhysReg))
return IK_RegMask;
// Check for fixed interference.
if (checkRegUnitInterference(VirtReg, PhysReg))
return IK_RegUnit;
// Check the matrix for virtual register interference.
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
if (query(VirtReg, *Units).checkInterference())
return IK_VirtReg;
return IK_Free;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/CodeGen/PHIEliminationUtils.h | //=- PHIEliminationUtils.h - Helper functions for PHI elimination -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_PHIELIMINATIONUTILS_H
#define LLVM_LIB_CODEGEN_PHIELIMINATIONUTILS_H
#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm {
/// findPHICopyInsertPoint - Find a safe place in MBB to insert a copy from
/// SrcReg when following the CFG edge to SuccMBB. This needs to be after
/// any def of SrcReg, but before any subsequent point where control flow
/// might jump out of the basic block.
MachineBasicBlock::iterator
findPHICopyInsertPoint(MachineBasicBlock* MBB, MachineBasicBlock* SuccMBB,
unsigned SrcReg);
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp | //===------- LegalizeVectorTypes.cpp - Legalization of vector types -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file performs vector type splitting and scalarization for LegalizeTypes.
// Scalarization is the act of changing a computation in an illegal one-element
// vector type to be a computation in its scalar element type. For example,
// implementing <1 x f32> arithmetic in a scalar f32 register. This is needed
// as a base case when scalarizing vector arithmetic like <4 x f32>, which
// eventually decomposes to scalars if the target doesn't support v4f32 or v2f32
// types.
// Splitting is the act of changing a computation in an invalid vector type to
// be a computation in two vectors of half the size. For example, implementing
// <128 x f32> operations in terms of two <64 x f32> operations.
//
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "legalize-types"
//===----------------------------------------------------------------------===//
// Result Vector Scalarization: <1 x ty> -> ty.
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
DEBUG(dbgs() << "Scalarize node result " << ResNo << ": ";
N->dump(&DAG);
dbgs() << "\n");
SDValue R = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "ScalarizeVectorResult #" << ResNo << ": ";
N->dump(&DAG);
dbgs() << "\n";
#endif
report_fatal_error("Do not know how to scalarize the result of this "
"operator!\n");
case ISD::MERGE_VALUES: R = ScalarizeVecRes_MERGE_VALUES(N, ResNo);break;
case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N); break;
case ISD::BUILD_VECTOR: R = ScalarizeVecRes_BUILD_VECTOR(N); break;
case ISD::CONVERT_RNDSAT: R = ScalarizeVecRes_CONVERT_RNDSAT(N); break;
case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break;
case ISD::FP_ROUND: R = ScalarizeVecRes_FP_ROUND(N); break;
case ISD::FP_ROUND_INREG: R = ScalarizeVecRes_InregOp(N); break;
case ISD::FPOWI: R = ScalarizeVecRes_FPOWI(N); break;
case ISD::INSERT_VECTOR_ELT: R = ScalarizeVecRes_INSERT_VECTOR_ELT(N); break;
case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(N));break;
case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break;
case ISD::SIGN_EXTEND_INREG: R = ScalarizeVecRes_InregOp(N); break;
case ISD::VSELECT: R = ScalarizeVecRes_VSELECT(N); break;
case ISD::SELECT: R = ScalarizeVecRes_SELECT(N); break;
case ISD::SELECT_CC: R = ScalarizeVecRes_SELECT_CC(N); break;
case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break;
case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE: R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
case ISD::ANY_EXTEND:
case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTPOP:
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::FABS:
case ISD::FCEIL:
case ISD::FCOS:
case ISD::FEXP:
case ISD::FEXP2:
case ISD::FFLOOR:
case ISD::FLOG:
case ISD::FLOG10:
case ISD::FLOG2:
case ISD::FNEARBYINT:
case ISD::FNEG:
case ISD::FP_EXTEND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::FRINT:
case ISD::FROUND:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
case ISD::SIGN_EXTEND:
case ISD::SINT_TO_FP:
case ISD::TRUNCATE:
case ISD::UINT_TO_FP:
case ISD::ZERO_EXTEND:
R = ScalarizeVecRes_UnaryOp(N);
break;
case ISD::ADD:
case ISD::AND:
case ISD::FADD:
case ISD::FCOPYSIGN:
case ISD::FDIV:
case ISD::FMUL:
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::FPOW:
case ISD::FREM:
case ISD::FSUB:
case ISD::MUL:
case ISD::OR:
case ISD::SDIV:
case ISD::SREM:
case ISD::SUB:
case ISD::UDIV:
case ISD::UREM:
case ISD::XOR:
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
R = ScalarizeVecRes_BinOp(N);
break;
case ISD::FMA:
R = ScalarizeVecRes_TernaryOp(N);
break;
}
// If R is null, the sub-method took care of registering the result.
if (R.getNode())
SetScalarizedVector(SDValue(N, ResNo), R);
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
SDValue LHS = GetScalarizedVector(N->getOperand(0));
SDValue RHS = GetScalarizedVector(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N),
LHS.getValueType(), LHS, RHS);
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_TernaryOp(SDNode *N) {
SDValue Op0 = GetScalarizedVector(N->getOperand(0));
SDValue Op1 = GetScalarizedVector(N->getOperand(1));
SDValue Op2 = GetScalarizedVector(N->getOperand(2));
return DAG.getNode(N->getOpcode(), SDLoc(N),
Op0.getValueType(), Op0, Op1, Op2);
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_MERGE_VALUES(SDNode *N,
unsigned ResNo) {
SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
return GetScalarizedVector(Op);
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) {
EVT NewVT = N->getValueType(0).getVectorElementType();
return DAG.getNode(ISD::BITCAST, SDLoc(N),
NewVT, N->getOperand(0));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(SDNode *N) {
EVT EltVT = N->getValueType(0).getVectorElementType();
SDValue InOp = N->getOperand(0);
// The BUILD_VECTOR operands may be of wider element types and
// we may need to truncate them back to the requested return type.
if (EltVT.isInteger())
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, InOp);
return InOp;
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N) {
EVT NewVT = N->getValueType(0).getVectorElementType();
SDValue Op0 = GetScalarizedVector(N->getOperand(0));
return DAG.getConvertRndSat(NewVT, SDLoc(N),
Op0, DAG.getValueType(NewVT),
DAG.getValueType(Op0.getValueType()),
N->getOperand(3),
N->getOperand(4),
cast<CvtRndSatSDNode>(N)->getCvtCode());
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
N->getValueType(0).getVectorElementType(),
N->getOperand(0), N->getOperand(1));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_ROUND(SDNode *N) {
EVT NewVT = N->getValueType(0).getVectorElementType();
SDValue Op = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(ISD::FP_ROUND, SDLoc(N),
NewVT, Op, N->getOperand(1));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_FPOWI(SDNode *N) {
SDValue Op = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(ISD::FPOWI, SDLoc(N),
Op.getValueType(), Op, N->getOperand(1));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) {
// The value to insert may have a wider type than the vector element type,
// so be sure to truncate it to the element type if necessary.
SDValue Op = N->getOperand(1);
EVT EltVT = N->getValueType(0).getVectorElementType();
if (Op.getValueType() != EltVT)
// FIXME: Can this happen for floating point types?
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, Op);
return Op;
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
assert(N->isUnindexed() && "Indexed vector load?");
SDValue Result = DAG.getLoad(ISD::UNINDEXED,
N->getExtensionType(),
N->getValueType(0).getVectorElementType(),
SDLoc(N),
N->getChain(), N->getBasePtr(),
DAG.getUNDEF(N->getBasePtr().getValueType()),
N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
N->isVolatile(), N->isNonTemporal(),
N->isInvariant(), N->getOriginalAlignment(),
N->getAAInfo());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Result.getValue(1));
return Result;
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) {
// Get the dest type - it doesn't always match the input type, e.g. int_to_fp.
EVT DestVT = N->getValueType(0).getVectorElementType();
SDValue Op = N->getOperand(0);
EVT OpVT = Op.getValueType();
SDLoc DL(N);
// The result needs scalarizing, but it's not a given that the source does.
// This is a workaround for targets where it's impossible to scalarize the
// result of a conversion, because the source type is legal.
// For instance, this happens on AArch64: v1i1 is illegal but v1i{8,16,32}
// are widened to v8i8, v4i16, and v2i32, which is legal, because v1i64 is
// legal and was not scalarized.
// See the similar logic in ScalarizeVecRes_VSETCC
if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
Op = GetScalarizedVector(Op);
} else {
EVT VT = OpVT.getVectorElementType();
Op = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, VT, Op,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
return DAG.getNode(N->getOpcode(), SDLoc(N), DestVT, Op);
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_InregOp(SDNode *N) {
EVT EltVT = N->getValueType(0).getVectorElementType();
EVT ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT().getVectorElementType();
SDValue LHS = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(N->getOpcode(), SDLoc(N), EltVT,
LHS, DAG.getValueType(ExtVT));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
// If the operand is wider than the vector element type then it is implicitly
// truncated. Make that explicit here.
EVT EltVT = N->getValueType(0).getVectorElementType();
SDValue InOp = N->getOperand(0);
if (InOp.getValueType() != EltVT)
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, InOp);
return InOp;
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_VSELECT(SDNode *N) {
SDValue Cond = GetScalarizedVector(N->getOperand(0));
SDValue LHS = GetScalarizedVector(N->getOperand(1));
TargetLowering::BooleanContent ScalarBool =
TLI.getBooleanContents(false, false);
TargetLowering::BooleanContent VecBool = TLI.getBooleanContents(true, false);
// If integer and float booleans have different contents then we can't
// reliably optimize in all cases. There is a full explanation for this in
// DAGCombiner::visitSELECT() where the same issue affects folding
// (select C, 0, 1) to (xor C, 1).
if (TLI.getBooleanContents(false, false) !=
TLI.getBooleanContents(false, true)) {
// At least try the common case where the boolean is generated by a
// comparison.
if (Cond->getOpcode() == ISD::SETCC) {
EVT OpVT = Cond->getOperand(0)->getValueType(0);
ScalarBool = TLI.getBooleanContents(OpVT.getScalarType());
VecBool = TLI.getBooleanContents(OpVT);
} else
ScalarBool = TargetLowering::UndefinedBooleanContent;
}
if (ScalarBool != VecBool) {
EVT CondVT = Cond.getValueType();
switch (ScalarBool) {
case TargetLowering::UndefinedBooleanContent:
break;
case TargetLowering::ZeroOrOneBooleanContent:
assert(VecBool == TargetLowering::UndefinedBooleanContent ||
VecBool == TargetLowering::ZeroOrNegativeOneBooleanContent);
// Vector read from all ones, scalar expects a single 1 so mask.
Cond = DAG.getNode(ISD::AND, SDLoc(N), CondVT,
Cond, DAG.getConstant(1, SDLoc(N), CondVT));
break;
case TargetLowering::ZeroOrNegativeOneBooleanContent:
assert(VecBool == TargetLowering::UndefinedBooleanContent ||
VecBool == TargetLowering::ZeroOrOneBooleanContent);
// Vector reads from a one, scalar from all ones so sign extend.
Cond = DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), CondVT,
Cond, DAG.getValueType(MVT::i1));
break;
}
}
return DAG.getSelect(SDLoc(N),
LHS.getValueType(), Cond, LHS,
GetScalarizedVector(N->getOperand(2)));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) {
SDValue LHS = GetScalarizedVector(N->getOperand(1));
return DAG.getSelect(SDLoc(N),
LHS.getValueType(), N->getOperand(0), LHS,
GetScalarizedVector(N->getOperand(2)));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT_CC(SDNode *N) {
SDValue LHS = GetScalarizedVector(N->getOperand(2));
return DAG.getNode(ISD::SELECT_CC, SDLoc(N), LHS.getValueType(),
N->getOperand(0), N->getOperand(1),
LHS, GetScalarizedVector(N->getOperand(3)),
N->getOperand(4));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) {
assert(N->getValueType(0).isVector() ==
N->getOperand(0).getValueType().isVector() &&
"Scalar/Vector type mismatch");
if (N->getValueType(0).isVector()) return ScalarizeVecRes_VSETCC(N);
SDValue LHS = GetScalarizedVector(N->getOperand(0));
SDValue RHS = GetScalarizedVector(N->getOperand(1));
SDLoc DL(N);
// Turn it into a scalar SETCC.
return DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, N->getOperand(2));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) {
return DAG.getUNDEF(N->getValueType(0).getVectorElementType());
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) {
// Figure out if the scalar is the LHS or RHS and return it.
SDValue Arg = N->getOperand(2).getOperand(0);
if (Arg.getOpcode() == ISD::UNDEF)
return DAG.getUNDEF(N->getValueType(0).getVectorElementType());
unsigned Op = !cast<ConstantSDNode>(Arg)->isNullValue();
return GetScalarizedVector(N->getOperand(Op));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
"Operand types must be vectors");
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
EVT OpVT = LHS.getValueType();
EVT NVT = N->getValueType(0).getVectorElementType();
SDLoc DL(N);
// The result needs scalarizing, but it's not a given that the source does.
if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
LHS = GetScalarizedVector(LHS);
RHS = GetScalarizedVector(RHS);
} else {
EVT VT = OpVT.getVectorElementType();
LHS = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, VT, LHS,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
RHS = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, VT, RHS,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
// Turn it into a scalar SETCC.
SDValue Res = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS,
N->getOperand(2));
// Vectors may have a different boolean contents to scalars. Promote the
// value appropriately.
ISD::NodeType ExtendCode =
TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
return DAG.getNode(ExtendCode, DL, NVT, Res);
}
//===----------------------------------------------------------------------===//
// Operand Vector Scalarization <1 x ty> -> ty.
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Scalarize node operand " << OpNo << ": ";
N->dump(&DAG);
dbgs() << "\n");
SDValue Res = SDValue();
if (!Res.getNode()) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "ScalarizeVectorOperand Op #" << OpNo << ": ";
N->dump(&DAG);
dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to scalarize this operator's operand!");
case ISD::BITCAST:
Res = ScalarizeVecOp_BITCAST(N);
break;
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::TRUNCATE:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
Res = ScalarizeVecOp_UnaryOp(N);
break;
case ISD::CONCAT_VECTORS:
Res = ScalarizeVecOp_CONCAT_VECTORS(N);
break;
case ISD::EXTRACT_VECTOR_ELT:
Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N);
break;
case ISD::VSELECT:
Res = ScalarizeVecOp_VSELECT(N);
break;
case ISD::STORE:
Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo);
break;
case ISD::FP_ROUND:
Res = ScalarizeVecOp_FP_ROUND(N, OpNo);
break;
}
}
// If the result is null, the sub-method took care of registering results etc.
if (!Res.getNode()) return false;
// If the result is N, the sub-method updated N in place. Tell the legalizer
// core about this.
if (Res.getNode() == N)
return true;
assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
return false;
}
/// ScalarizeVecOp_BITCAST - If the value to convert is a vector that needs
/// to be scalarized, it must be <1 x ty>. Convert the element instead.
SDValue DAGTypeLegalizer::ScalarizeVecOp_BITCAST(SDNode *N) {
SDValue Elt = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(ISD::BITCAST, SDLoc(N),
N->getValueType(0), Elt);
}
/// ScalarizeVecOp_UnaryOp - If the input is a vector that needs to be
/// scalarized, it must be <1 x ty>. Do the operation on the element instead.
SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp(SDNode *N) {
assert(N->getValueType(0).getVectorNumElements() == 1 &&
"Unexpected vector type!");
SDValue Elt = GetScalarizedVector(N->getOperand(0));
SDValue Op = DAG.getNode(N->getOpcode(), SDLoc(N),
N->getValueType(0).getScalarType(), Elt);
// Revectorize the result so the types line up with what the uses of this
// expression expect.
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N->getValueType(0), Op);
}
/// ScalarizeVecOp_CONCAT_VECTORS - The vectors to concatenate have length one -
/// use a BUILD_VECTOR instead.
SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(SDNode *N) {
SmallVector<SDValue, 8> Ops(N->getNumOperands());
for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
Ops[i] = GetScalarizedVector(N->getOperand(i));
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N->getValueType(0), Ops);
}
/// ScalarizeVecOp_EXTRACT_VECTOR_ELT - If the input is a vector that needs to
/// be scalarized, it must be <1 x ty>, so just return the element, ignoring the
/// index.
SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDValue Res = GetScalarizedVector(N->getOperand(0));
if (Res.getValueType() != N->getValueType(0))
Res = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), N->getValueType(0),
Res);
return Res;
}
/// ScalarizeVecOp_VSELECT - If the input condition is a vector that needs to be
/// scalarized, it must be <1 x i1>, so just convert to a normal ISD::SELECT
/// (still with vector output type since that was acceptable if we got here).
SDValue DAGTypeLegalizer::ScalarizeVecOp_VSELECT(SDNode *N) {
SDValue ScalarCond = GetScalarizedVector(N->getOperand(0));
EVT VT = N->getValueType(0);
return DAG.getNode(ISD::SELECT, SDLoc(N), VT, ScalarCond, N->getOperand(1),
N->getOperand(2));
}
/// ScalarizeVecOp_STORE - If the value to store is a vector that needs to be
/// scalarized, it must be <1 x ty>. Just store the element.
SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
assert(N->isUnindexed() && "Indexed store of one-element vector?");
assert(OpNo == 1 && "Do not know how to scalarize this operand!");
SDLoc dl(N);
if (N->isTruncatingStore())
return DAG.getTruncStore(N->getChain(), dl,
GetScalarizedVector(N->getOperand(1)),
N->getBasePtr(), N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
N->isVolatile(), N->isNonTemporal(),
N->getAlignment(), N->getAAInfo());
return DAG.getStore(N->getChain(), dl, GetScalarizedVector(N->getOperand(1)),
N->getBasePtr(), N->getPointerInfo(),
N->isVolatile(), N->isNonTemporal(),
N->getOriginalAlignment(), N->getAAInfo());
}
/// ScalarizeVecOp_FP_ROUND - If the value to round is a vector that needs
/// to be scalarized, it must be <1 x ty>. Convert the element instead.
SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo) {
SDValue Elt = GetScalarizedVector(N->getOperand(0));
SDValue Res = DAG.getNode(ISD::FP_ROUND, SDLoc(N),
N->getValueType(0).getVectorElementType(), Elt,
N->getOperand(1));
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
}
//===----------------------------------------------------------------------===//
// Result Vector Splitting
//===----------------------------------------------------------------------===//
/// SplitVectorResult - This method is called when the specified result of the
/// specified node is found to need vector splitting. At this point, the node
/// may also have invalid operands or may have other results that need
/// legalization, we just know that (at least) one result needs vector
/// splitting.
void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
DEBUG(dbgs() << "Split node result: ";
N->dump(&DAG);
dbgs() << "\n");
SDValue Lo, Hi;
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(ResNo), true))
return;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "SplitVectorResult #" << ResNo << ": ";
N->dump(&DAG);
dbgs() << "\n";
#endif
report_fatal_error("Do not know how to split the result of this "
"operator!\n");
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
case ISD::VSELECT:
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break;
case ISD::EXTRACT_SUBVECTOR: SplitVecRes_EXTRACT_SUBVECTOR(N, Lo, Hi); break;
case ISD::INSERT_SUBVECTOR: SplitVecRes_INSERT_SUBVECTOR(N, Lo, Hi); break;
case ISD::FP_ROUND_INREG: SplitVecRes_InregOp(N, Lo, Hi); break;
case ISD::FPOWI: SplitVecRes_FPOWI(N, Lo, Hi); break;
case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
case ISD::SCALAR_TO_VECTOR: SplitVecRes_SCALAR_TO_VECTOR(N, Lo, Hi); break;
case ISD::SIGN_EXTEND_INREG: SplitVecRes_InregOp(N, Lo, Hi); break;
case ISD::LOAD:
SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
break;
case ISD::MLOAD:
SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(N), Lo, Hi);
break;
case ISD::MGATHER:
SplitVecRes_MGATHER(cast<MaskedGatherSDNode>(N), Lo, Hi);
break;
case ISD::SETCC:
SplitVecRes_SETCC(N, Lo, Hi);
break;
case ISD::VECTOR_SHUFFLE:
SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi);
break;
case ISD::BSWAP:
case ISD::CONVERT_RNDSAT:
case ISD::CTLZ:
case ISD::CTTZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTPOP:
case ISD::FABS:
case ISD::FCEIL:
case ISD::FCOS:
case ISD::FEXP:
case ISD::FEXP2:
case ISD::FFLOOR:
case ISD::FLOG:
case ISD::FLOG10:
case ISD::FLOG2:
case ISD::FNEARBYINT:
case ISD::FNEG:
case ISD::FP_EXTEND:
case ISD::FP_ROUND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::FRINT:
case ISD::FROUND:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
case ISD::SINT_TO_FP:
case ISD::TRUNCATE:
case ISD::UINT_TO_FP:
SplitVecRes_UnaryOp(N, Lo, Hi);
break;
case ISD::ANY_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
SplitVecRes_ExtendOp(N, Lo, Hi);
break;
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::FADD:
case ISD::FCOPYSIGN:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::SDIV:
case ISD::UDIV:
case ISD::FDIV:
case ISD::FPOW:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::UREM:
case ISD::SREM:
case ISD::FREM:
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX:
SplitVecRes_BinOp(N, Lo, Hi);
break;
case ISD::FMA:
SplitVecRes_TernaryOp(N, Lo, Hi);
break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
if (Lo.getNode())
SetSplitVector(SDValue(N, ResNo), Lo, Hi);
}
void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue LHSLo, LHSHi;
GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
SDValue RHSLo, RHSHi;
GetSplitVector(N->getOperand(1), RHSLo, RHSHi);
SDLoc dl(N);
Lo = DAG.getNode(N->getOpcode(), dl, LHSLo.getValueType(), LHSLo, RHSLo);
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi, RHSHi);
}
void DAGTypeLegalizer::SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Op0Lo, Op0Hi;
GetSplitVector(N->getOperand(0), Op0Lo, Op0Hi);
SDValue Op1Lo, Op1Hi;
GetSplitVector(N->getOperand(1), Op1Lo, Op1Hi);
SDValue Op2Lo, Op2Hi;
GetSplitVector(N->getOperand(2), Op2Lo, Op2Hi);
SDLoc dl(N);
Lo = DAG.getNode(N->getOpcode(), dl, Op0Lo.getValueType(),
Op0Lo, Op1Lo, Op2Lo);
Hi = DAG.getNode(N->getOpcode(), dl, Op0Hi.getValueType(),
Op0Hi, Op1Hi, Op2Hi);
}
void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
SDValue &Hi) {
// We know the result is a vector. The input may be either a vector or a
// scalar value.
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
SDLoc dl(N);
SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType();
// Handle some special cases efficiently.
switch (getTypeAction(InVT)) {
case TargetLowering::TypeLegal:
case TargetLowering::TypePromoteInteger:
case TargetLowering::TypePromoteFloat:
case TargetLowering::TypeSoftenFloat:
case TargetLowering::TypeScalarizeVector:
case TargetLowering::TypeWidenVector:
break;
case TargetLowering::TypeExpandInteger:
case TargetLowering::TypeExpandFloat:
// A scalar to vector conversion, where the scalar needs expansion.
// If the vector is being split in two then we can just convert the
// expanded pieces.
if (LoVT == HiVT) {
GetExpandedOp(InOp, Lo, Hi);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
return;
}
break;
case TargetLowering::TypeSplitVector:
// If the input is a vector that needs to be split, convert each split
// piece of the input now.
GetSplitVector(InOp, Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
return;
}
// In the general case, convert the input to an integer and split it by hand.
EVT LoIntVT = EVT::getIntegerVT(*DAG.getContext(), LoVT.getSizeInBits());
EVT HiIntVT = EVT::getIntegerVT(*DAG.getContext(), HiVT.getSizeInBits());
if (DAG.getDataLayout().isBigEndian())
std::swap(LoIntVT, HiIntVT);
SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
}
void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc dl(N);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
unsigned LoNumElts = LoVT.getVectorNumElements();
SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts);
Lo = DAG.getNode(ISD::BUILD_VECTOR, dl, LoVT, LoOps);
SmallVector<SDValue, 8> HiOps(N->op_begin()+LoNumElts, N->op_end());
Hi = DAG.getNode(ISD::BUILD_VECTOR, dl, HiVT, HiOps);
}
void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo,
SDValue &Hi) {
assert(!(N->getNumOperands() & 1) && "Unsupported CONCAT_VECTORS");
SDLoc dl(N);
unsigned NumSubvectors = N->getNumOperands() / 2;
if (NumSubvectors == 1) {
Lo = N->getOperand(0);
Hi = N->getOperand(1);
return;
}
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors);
Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, LoVT, LoOps);
SmallVector<SDValue, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end());
Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HiVT, HiOps);
}
void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Vec = N->getOperand(0);
SDValue Idx = N->getOperand(1);
SDLoc dl(N);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
DAG.getConstant(IdxVal + LoVT.getVectorNumElements(), dl,
TLI.getVectorIdxTy(DAG.getDataLayout())));
}
void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Vec = N->getOperand(0);
SDValue SubVec = N->getOperand(1);
SDValue Idx = N->getOperand(2);
SDLoc dl(N);
GetSplitVector(Vec, Lo, Hi);
// Spill the vector to the stack.
EVT VecVT = Vec.getValueType();
EVT SubVecVT = VecVT.getVectorElementType();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
MachinePointerInfo(), false, false, 0);
// Store the new subvector into the specified index.
SDValue SubVecPtr = GetVectorElementPointer(StackPtr, SubVecVT, Idx);
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType);
Store = DAG.getStore(Store, dl, SubVec, SubVecPtr, MachinePointerInfo(),
false, false, 0);
// Load the Lo part from the stack slot.
Lo = DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
false, false, false, 0);
// Increment the pointer to the other part.
unsigned IncrementSize = Lo.getValueType().getSizeInBits() / 8;
StackPtr =
DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
DAG.getConstant(IncrementSize, dl, StackPtr.getValueType()));
// Load the Hi part from the stack slot.
Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
false, false, false, MinAlign(Alignment, IncrementSize));
}
void DAGTypeLegalizer::SplitVecRes_FPOWI(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDLoc dl(N);
GetSplitVector(N->getOperand(0), Lo, Hi);
Lo = DAG.getNode(ISD::FPOWI, dl, Lo.getValueType(), Lo, N->getOperand(1));
Hi = DAG.getNode(ISD::FPOWI, dl, Hi.getValueType(), Hi, N->getOperand(1));
}
void DAGTypeLegalizer::SplitVecRes_InregOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue LHSLo, LHSHi;
GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
SDLoc dl(N);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) =
DAG.GetSplitDestVTs(cast<VTSDNode>(N->getOperand(1))->getVT());
Lo = DAG.getNode(N->getOpcode(), dl, LHSLo.getValueType(), LHSLo,
DAG.getValueType(LoVT));
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi,
DAG.getValueType(HiVT));
}
void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Vec = N->getOperand(0);
SDValue Elt = N->getOperand(1);
SDValue Idx = N->getOperand(2);
SDLoc dl(N);
GetSplitVector(Vec, Lo, Hi);
if (ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
unsigned IdxVal = CIdx->getZExtValue();
unsigned LoNumElts = Lo.getValueType().getVectorNumElements();
if (IdxVal < LoNumElts)
Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
Lo.getValueType(), Lo, Elt, Idx);
else
Hi =
DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Hi.getValueType(), Hi, Elt,
DAG.getConstant(IdxVal - LoNumElts, dl,
TLI.getVectorIdxTy(DAG.getDataLayout())));
return;
}
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(0), true))
return;
// Spill the vector to the stack.
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
MachinePointerInfo(), false, false, 0);
// Store the new element. This may be larger than the vector element type,
// so use a truncating store.
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType);
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
false, false, 0);
// Load the Lo part from the stack slot.
Lo = DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
false, false, false, 0);
// Increment the pointer to the other part.
unsigned IncrementSize = Lo.getValueType().getSizeInBits() / 8;
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
DAG.getConstant(IncrementSize, dl,
StackPtr.getValueType()));
// Load the Hi part from the stack slot.
Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
false, false, false, MinAlign(Alignment, IncrementSize));
}
void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc dl(N);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoVT, N->getOperand(0));
Hi = DAG.getUNDEF(HiVT);
}
void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
SDValue &Hi) {
assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!");
EVT LoVT, HiVT;
SDLoc dl(LD);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0));
ISD::LoadExtType ExtType = LD->getExtensionType();
SDValue Ch = LD->getChain();
SDValue Ptr = LD->getBasePtr();
SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
EVT MemoryVT = LD->getMemoryVT();
unsigned Alignment = LD->getOriginalAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
bool isInvariant = LD->isInvariant();
AAMDNodes AAInfo = LD->getAAInfo();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset,
LD->getPointerInfo(), LoMemVT, isVolatile, isNonTemporal,
isInvariant, Alignment, AAInfo);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset,
LD->getPointerInfo().getWithOffset(IncrementSize),
HiMemVT, isVolatile, isNonTemporal, isInvariant, Alignment,
AAInfo);
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(LD, 1), Ch);
}
void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
SDValue &Lo, SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc dl(MLD);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0));
SDValue Ch = MLD->getChain();
SDValue Ptr = MLD->getBasePtr();
SDValue Mask = MLD->getMask();
unsigned Alignment = MLD->getOriginalAlignment();
ISD::LoadExtType ExtType = MLD->getExtensionType();
// if Alignment is equal to the vector size,
// take the half of it for the second part
unsigned SecondHalfAlignment =
(Alignment == MLD->getValueType(0).getSizeInBits()/8) ?
Alignment/2 : Alignment;
SDValue MaskLo, MaskHi;
std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
EVT MemoryVT = MLD->getMemoryVT();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue Src0 = MLD->getSrc0();
SDValue Src0Lo, Src0Hi;
std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, dl);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MLD->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MLD->getAAInfo(), MLD->getRanges());
Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, MaskLo, Src0Lo, LoMemVT, MMO,
ExtType);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
MMO = DAG.getMachineFunction().
getMachineMemOperand(MLD->getPointerInfo(),
MachineMemOperand::MOLoad, HiMemVT.getStoreSize(),
SecondHalfAlignment, MLD->getAAInfo(), MLD->getRanges());
Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, MaskHi, Src0Hi, HiMemVT, MMO,
ExtType);
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(MLD, 1), Ch);
}
void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT,
SDValue &Lo, SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc dl(MGT);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MGT->getValueType(0));
SDValue Ch = MGT->getChain();
SDValue Ptr = MGT->getBasePtr();
SDValue Mask = MGT->getMask();
unsigned Alignment = MGT->getOriginalAlignment();
SDValue MaskLo, MaskHi;
std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
EVT MemoryVT = MGT->getMemoryVT();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue Src0Lo, Src0Hi;
std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(MGT->getValue(), dl);
SDValue IndexHi, IndexLo;
std::tie(IndexLo, IndexHi) = DAG.SplitVector(MGT->getIndex(), dl);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(), MGT->getRanges());
SDValue OpsLo[] = {Ch, Src0Lo, MaskLo, Ptr, IndexLo};
Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, dl, OpsLo,
MMO);
SDValue OpsHi[] = {Ch, Src0Hi, MaskHi, Ptr, IndexHi};
Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, dl, OpsHi,
MMO);
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(MGT, 1), Ch);
}
void DAGTypeLegalizer::SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
"Operand types must be vectors");
EVT LoVT, HiVT;
SDLoc DL(N);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// Split the input.
SDValue LL, LH, RL, RH;
std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
}
void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
// Get the dest types - they may not match the input types, e.g. int_to_fp.
EVT LoVT, HiVT;
SDLoc dl(N);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// If the input also splits, handle it directly for a compile time speedup.
// Otherwise split it by hand.
EVT InVT = N->getOperand(0).getValueType();
if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
GetSplitVector(N->getOperand(0), Lo, Hi);
else
std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
if (N->getOpcode() == ISD::FP_ROUND) {
Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo, N->getOperand(1));
Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi, N->getOperand(1));
} else if (N->getOpcode() == ISD::CONVERT_RNDSAT) {
SDValue DTyOpLo = DAG.getValueType(LoVT);
SDValue DTyOpHi = DAG.getValueType(HiVT);
SDValue STyOpLo = DAG.getValueType(Lo.getValueType());
SDValue STyOpHi = DAG.getValueType(Hi.getValueType());
SDValue RndOp = N->getOperand(3);
SDValue SatOp = N->getOperand(4);
ISD::CvtCode CvtCode = cast<CvtRndSatSDNode>(N)->getCvtCode();
Lo = DAG.getConvertRndSat(LoVT, dl, Lo, DTyOpLo, STyOpLo, RndOp, SatOp,
CvtCode);
Hi = DAG.getConvertRndSat(HiVT, dl, Hi, DTyOpHi, STyOpHi, RndOp, SatOp,
CvtCode);
} else {
Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo);
Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi);
}
}
void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDLoc dl(N);
EVT SrcVT = N->getOperand(0).getValueType();
EVT DestVT = N->getValueType(0);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(DestVT);
// We can do better than a generic split operation if the extend is doing
// more than just doubling the width of the elements and the following are
// true:
// - The number of vector elements is even,
// - the source type is legal,
// - the type of a split source is illegal,
// - the type of an extended (by doubling element size) source is legal, and
// - the type of that extended source when split is legal.
//
// This won't necessarily completely legalize the operation, but it will
// more effectively move in the right direction and prevent falling down
// to scalarization in many cases due to the input vector being split too
// far.
unsigned NumElements = SrcVT.getVectorNumElements();
if ((NumElements & 1) == 0 &&
SrcVT.getSizeInBits() * 2 < DestVT.getSizeInBits()) {
LLVMContext &Ctx = *DAG.getContext();
EVT NewSrcVT = EVT::getVectorVT(
Ctx, EVT::getIntegerVT(
Ctx, SrcVT.getVectorElementType().getSizeInBits() * 2),
NumElements);
EVT SplitSrcVT =
EVT::getVectorVT(Ctx, SrcVT.getVectorElementType(), NumElements / 2);
EVT SplitLoVT, SplitHiVT;
std::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT);
if (TLI.isTypeLegal(SrcVT) && !TLI.isTypeLegal(SplitSrcVT) &&
TLI.isTypeLegal(NewSrcVT) && TLI.isTypeLegal(SplitLoVT)) {
DEBUG(dbgs() << "Split vector extend via incremental extend:";
N->dump(&DAG); dbgs() << "\n");
// Extend the source vector by one step.
SDValue NewSrc =
DAG.getNode(N->getOpcode(), dl, NewSrcVT, N->getOperand(0));
// Get the low and high halves of the new, extended one step, vector.
std::tie(Lo, Hi) = DAG.SplitVector(NewSrc, dl);
// Extend those vector halves the rest of the way.
Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo);
Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi);
return;
}
}
// Fall back to the generic unary operator splitting otherwise.
SplitVecRes_UnaryOp(N, Lo, Hi);
}
void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
SDValue &Lo, SDValue &Hi) {
// The low and high parts of the original input give four input vectors.
SDValue Inputs[4];
SDLoc dl(N);
GetSplitVector(N->getOperand(0), Inputs[0], Inputs[1]);
GetSplitVector(N->getOperand(1), Inputs[2], Inputs[3]);
EVT NewVT = Inputs[0].getValueType();
unsigned NewElts = NewVT.getVectorNumElements();
// If Lo or Hi uses elements from at most two of the four input vectors, then
// express it as a vector shuffle of those two inputs. Otherwise extract the
// input elements by hand and construct the Lo/Hi output using a BUILD_VECTOR.
SmallVector<int, 16> Ops;
for (unsigned High = 0; High < 2; ++High) {
SDValue &Output = High ? Hi : Lo;
// Build a shuffle mask for the output, discovering on the fly which
// input vectors to use as shuffle operands (recorded in InputUsed).
// If building a suitable shuffle vector proves too hard, then bail
// out with useBuildVector set.
unsigned InputUsed[2] = { -1U, -1U }; // Not yet discovered.
unsigned FirstMaskIdx = High * NewElts;
bool useBuildVector = false;
for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
// The mask element. This indexes into the input.
int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset);
// The input vector this mask element indexes into.
unsigned Input = (unsigned)Idx / NewElts;
if (Input >= array_lengthof(Inputs)) {
// The mask element does not index into any input vector.
Ops.push_back(-1);
continue;
}
// Turn the index into an offset from the start of the input vector.
Idx -= Input * NewElts;
// Find or create a shuffle vector operand to hold this input.
unsigned OpNo;
for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
if (InputUsed[OpNo] == Input) {
// This input vector is already an operand.
break;
} else if (InputUsed[OpNo] == -1U) {
// Create a new operand for this input vector.
InputUsed[OpNo] = Input;
break;
}
}
if (OpNo >= array_lengthof(InputUsed)) {
// More than two input vectors used! Give up on trying to create a
// shuffle vector. Insert all elements into a BUILD_VECTOR instead.
useBuildVector = true;
break;
}
// Add the mask index for the new shuffle vector.
Ops.push_back(Idx + OpNo * NewElts);
}
if (useBuildVector) {
EVT EltVT = NewVT.getVectorElementType();
SmallVector<SDValue, 16> SVOps;
// Extract the input elements by hand.
for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
// The mask element. This indexes into the input.
int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset);
// The input vector this mask element indexes into.
unsigned Input = (unsigned)Idx / NewElts;
if (Input >= array_lengthof(Inputs)) {
// The mask element is "undef" or indexes off the end of the input.
SVOps.push_back(DAG.getUNDEF(EltVT));
continue;
}
// Turn the index into an offset from the start of the input vector.
Idx -= Input * NewElts;
// Extract the vector element by hand.
SVOps.push_back(DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Inputs[Input],
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
}
// Construct the Lo/Hi output using a BUILD_VECTOR.
Output = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, SVOps);
} else if (InputUsed[0] == -1U) {
// No input vectors were used! The result is undefined.
Output = DAG.getUNDEF(NewVT);
} else {
SDValue Op0 = Inputs[InputUsed[0]];
// If only one input was used, use an undefined vector for the other.
SDValue Op1 = InputUsed[1] == -1U ?
DAG.getUNDEF(NewVT) : Inputs[InputUsed[1]];
// At least one input vector was used. Create a new shuffle vector.
Output = DAG.getVectorShuffle(NewVT, dl, Op0, Op1, &Ops[0]);
}
Ops.clear();
}
}
//===----------------------------------------------------------------------===//
// Operand Vector Splitting
//===----------------------------------------------------------------------===//
/// SplitVectorOperand - This method is called when the specified operand of the
/// specified node is found to need vector splitting. At this point, all of the
/// result types of the node are known to be legal, but other operands of the
/// node may need legalization as well as the specified one.
bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Split node operand: ";
N->dump(&DAG);
dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom split this node.
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
return false;
if (!Res.getNode()) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "SplitVectorOperand Op #" << OpNo << ": ";
N->dump(&DAG);
dbgs() << "\n";
#endif
report_fatal_error("Do not know how to split this operator's "
"operand!\n");
case ISD::SETCC: Res = SplitVecOp_VSETCC(N); break;
case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
case ISD::TRUNCATE:
Res = SplitVecOp_TruncateHelper(N);
break;
case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break;
case ISD::STORE:
Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
break;
case ISD::MSTORE:
Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(N), OpNo);
break;
case ISD::MSCATTER:
Res = SplitVecOp_MSCATTER(cast<MaskedScatterSDNode>(N), OpNo);
break;
case ISD::MGATHER:
Res = SplitVecOp_MGATHER(cast<MaskedGatherSDNode>(N), OpNo);
break;
case ISD::VSELECT:
Res = SplitVecOp_VSELECT(N, OpNo);
break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
if (N->getValueType(0).bitsLT(N->getOperand(0)->getValueType(0)))
Res = SplitVecOp_TruncateHelper(N);
else
Res = SplitVecOp_UnaryOp(N);
break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
if (N->getValueType(0).bitsLT(N->getOperand(0)->getValueType(0)))
Res = SplitVecOp_TruncateHelper(N);
else
Res = SplitVecOp_UnaryOp(N);
break;
case ISD::CTTZ:
case ISD::CTLZ:
case ISD::CTPOP:
case ISD::FP_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
case ISD::FTRUNC:
Res = SplitVecOp_UnaryOp(N);
break;
}
}
// If the result is null, the sub-method took care of registering results etc.
if (!Res.getNode()) return false;
// If the result is N, the sub-method updated N in place. Tell the legalizer
// core about this.
if (Res.getNode() == N)
return true;
assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
return false;
}
SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(SDNode *N, unsigned OpNo) {
// The only possibility for an illegal operand is the mask, since result type
// legalization would have handled this node already otherwise.
assert(OpNo == 0 && "Illegal operand must be mask");
SDValue Mask = N->getOperand(0);
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
EVT Src0VT = Src0.getValueType();
SDLoc DL(N);
assert(Mask.getValueType().isVector() && "VSELECT without a vector mask?");
SDValue Lo, Hi;
GetSplitVector(N->getOperand(0), Lo, Hi);
assert(Lo.getValueType() == Hi.getValueType() &&
"Lo and Hi have differing types");
EVT LoOpVT, HiOpVT;
std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
assert(LoOpVT == HiOpVT && "Asymmetric vector split?");
SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0, DL);
std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1, DL);
std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL);
SDValue LoSelect =
DAG.getNode(ISD::VSELECT, DL, LoOpVT, LoMask, LoOp0, LoOp1);
SDValue HiSelect =
DAG.getNode(ISD::VSELECT, DL, HiOpVT, HiMask, HiOp0, HiOp1);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, Src0VT, LoSelect, HiSelect);
}
SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
// The result has a legal vector type, but the input needs splitting.
EVT ResVT = N->getValueType(0);
SDValue Lo, Hi;
SDLoc dl(N);
GetSplitVector(N->getOperand(0), Lo, Hi);
EVT InVT = Lo.getValueType();
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
InVT.getVectorNumElements());
Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo);
Hi = DAG.getNode(N->getOpcode(), dl, OutVT, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
}
SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
// For example, i64 = BITCAST v4i16 on alpha. Typically the vector will
// end up being split all the way down to individual components. Convert the
// split pieces into integers and reassemble.
SDValue Lo, Hi;
GetSplitVector(N->getOperand(0), Lo, Hi);
Lo = BitConvertToInteger(Lo);
Hi = BitConvertToInteger(Hi);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0),
JoinIntegers(Lo, Hi));
}
SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
// We know that the extracted result type is legal.
EVT SubVT = N->getValueType(0);
SDValue Idx = N->getOperand(1);
SDLoc dl(N);
SDValue Lo, Hi;
GetSplitVector(N->getOperand(0), Lo, Hi);
uint64_t LoElts = Lo.getValueType().getVectorNumElements();
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
if (IdxVal < LoElts) {
assert(IdxVal + SubVT.getVectorNumElements() <= LoElts &&
"Extracted subvector crosses vector split!");
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Lo, Idx);
} else {
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Hi,
DAG.getConstant(IdxVal - LoElts, dl,
Idx.getValueType()));
}
}
SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDValue Vec = N->getOperand(0);
SDValue Idx = N->getOperand(1);
EVT VecVT = Vec.getValueType();
if (isa<ConstantSDNode>(Idx)) {
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
assert(IdxVal < VecVT.getVectorNumElements() && "Invalid vector index!");
SDValue Lo, Hi;
GetSplitVector(Vec, Lo, Hi);
uint64_t LoElts = Lo.getValueType().getVectorNumElements();
if (IdxVal < LoElts)
return SDValue(DAG.UpdateNodeOperands(N, Lo, Idx), 0);
return SDValue(DAG.UpdateNodeOperands(N, Hi,
DAG.getConstant(IdxVal - LoElts, SDLoc(N),
Idx.getValueType())), 0);
}
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(0), true))
return SDValue();
// Store the vector to the stack.
EVT EltVT = VecVT.getVectorElementType();
SDLoc dl(N);
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
MachinePointerInfo(), false, false, 0);
// Load back the required element.
StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
return DAG.getExtLoad(ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
MachinePointerInfo(), EltVT, false, false, false, 0);
}
SDValue DAGTypeLegalizer::SplitVecOp_MGATHER(MaskedGatherSDNode *MGT,
unsigned OpNo) {
EVT LoVT, HiVT;
SDLoc dl(MGT);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MGT->getValueType(0));
SDValue Ch = MGT->getChain();
SDValue Ptr = MGT->getBasePtr();
SDValue Index = MGT->getIndex();
SDValue Mask = MGT->getMask();
unsigned Alignment = MGT->getOriginalAlignment();
SDValue MaskLo, MaskHi;
std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
EVT MemoryVT = MGT->getMemoryVT();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue Src0Lo, Src0Hi;
std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(MGT->getValue(), dl);
SDValue IndexHi, IndexLo;
if (Index.getNode())
std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, dl);
else
IndexLo = IndexHi = Index;
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(), MGT->getRanges());
SDValue OpsLo[] = {Ch, Src0Lo, MaskLo, Ptr, IndexLo};
SDValue Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, dl,
OpsLo, MMO);
MMO = DAG.getMachineFunction().
getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, HiMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(),
MGT->getRanges());
SDValue OpsHi[] = {Ch, Src0Hi, MaskHi, Ptr, IndexHi};
SDValue Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, dl,
OpsHi, MMO);
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(MGT, 1), Ch);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MGT->getValueType(0), Lo,
Hi);
ReplaceValueWith(SDValue(MGT, 0), Res);
return SDValue();
}
SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
unsigned OpNo) {
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
SDValue Mask = N->getMask();
SDValue Data = N->getValue();
EVT MemoryVT = N->getMemoryVT();
unsigned Alignment = N->getOriginalAlignment();
SDLoc DL(N);
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue DataLo, DataHi;
GetSplitVector(Data, DataLo, DataHi);
SDValue MaskLo, MaskHi;
GetSplitVector(Mask, MaskLo, MaskHi);
// if Alignment is equal to the vector size,
// take the half of it for the second part
unsigned SecondHalfAlignment =
(Alignment == Data->getValueType(0).getSizeInBits()/8) ?
Alignment/2 : Alignment;
SDValue Lo, Hi;
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(N->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, N->getAAInfo(), N->getRanges());
Lo = DAG.getMaskedStore(Ch, DL, DataLo, Ptr, MaskLo, LoMemVT, MMO,
N->isTruncatingStore());
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, DL, Ptr.getValueType()));
MMO = DAG.getMachineFunction().
getMachineMemOperand(N->getPointerInfo(),
MachineMemOperand::MOStore, HiMemVT.getStoreSize(),
SecondHalfAlignment, N->getAAInfo(), N->getRanges());
Hi = DAG.getMaskedStore(Ch, DL, DataHi, Ptr, MaskHi, HiMemVT, MMO,
N->isTruncatingStore());
// Build a factor node to remember that this store is independent of the
// other one.
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
SDValue DAGTypeLegalizer::SplitVecOp_MSCATTER(MaskedScatterSDNode *N,
unsigned OpNo) {
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
SDValue Mask = N->getMask();
SDValue Index = N->getIndex();
SDValue Data = N->getValue();
EVT MemoryVT = N->getMemoryVT();
unsigned Alignment = N->getOriginalAlignment();
SDLoc DL(N);
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue DataLo, DataHi;
GetSplitVector(Data, DataLo, DataHi);
SDValue MaskLo, MaskHi;
GetSplitVector(Mask, MaskLo, MaskHi);
SDValue PtrLo, PtrHi;
if (Ptr.getValueType().isVector()) // gather form vector of pointers
std::tie(PtrLo, PtrHi) = DAG.SplitVector(Ptr, DL);
else
PtrLo = PtrHi = Ptr;
SDValue IndexHi, IndexLo;
if (Index.getNode())
std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
else
IndexLo = IndexHi = Index;
SDValue Lo, Hi;
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(N->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, N->getAAInfo(), N->getRanges());
SDValue OpsLo[] = {Ch, DataLo, MaskLo, PtrLo, IndexLo};
Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(),
DL, OpsLo, MMO);
MMO = DAG.getMachineFunction().
getMachineMemOperand(N->getPointerInfo(),
MachineMemOperand::MOStore, HiMemVT.getStoreSize(),
Alignment, N->getAAInfo(), N->getRanges());
SDValue OpsHi[] = {Ch, DataHi, MaskHi, PtrHi, IndexHi};
Hi = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataHi.getValueType(),
DL, OpsHi, MMO);
// Build a factor node to remember that this store is independent of the
// other one.
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
assert(N->isUnindexed() && "Indexed store of vector?");
assert(OpNo == 1 && "Can only split the stored value");
SDLoc DL(N);
bool isTruncating = N->isTruncatingStore();
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
EVT MemoryVT = N->getMemoryVT();
unsigned Alignment = N->getOriginalAlignment();
bool isVol = N->isVolatile();
bool isNT = N->isNonTemporal();
AAMDNodes AAInfo = N->getAAInfo();
SDValue Lo, Hi;
GetSplitVector(N->getOperand(1), Lo, Hi);
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
if (isTruncating)
Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(),
LoMemVT, isVol, isNT, Alignment, AAInfo);
else
Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(),
isVol, isNT, Alignment, AAInfo);
// Increment the pointer to the other half.
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, DL, Ptr.getValueType()));
if (isTruncating)
Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
HiMemVT, isVol, isNT, Alignment, AAInfo);
else
Hi = DAG.getStore(Ch, DL, Hi, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
isVol, isNT, Alignment, AAInfo);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
SDLoc DL(N);
// The input operands all must have the same type, and we know the result
// type is valid. Convert this to a buildvector which extracts all the
// input elements.
// TODO: If the input elements are power-two vectors, we could convert this to
// a new CONCAT_VECTORS node with elements that are half-wide.
SmallVector<SDValue, 32> Elts;
EVT EltVT = N->getValueType(0).getVectorElementType();
for (const SDValue &Op : N->op_values()) {
for (unsigned i = 0, e = Op.getValueType().getVectorNumElements();
i != e; ++i) {
Elts.push_back(DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Op,
DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
}
}
return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0), Elts);
}
SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) {
// The result type is legal, but the input type is illegal. If splitting
// ends up with the result type of each half still being legal, just
// do that. If, however, that would result in an illegal result type,
// we can try to get more clever with power-two vectors. Specifically,
// split the input type, but also widen the result element size, then
// concatenate the halves and truncate again. For example, consider a target
// where v8i8 is legal and v8i32 is not (ARM, which doesn't have 256-bit
// vectors). To perform a "%res = v8i8 trunc v8i32 %in" we do:
// %inlo = v4i32 extract_subvector %in, 0
// %inhi = v4i32 extract_subvector %in, 4
// %lo16 = v4i16 trunc v4i32 %inlo
// %hi16 = v4i16 trunc v4i32 %inhi
// %in16 = v8i16 concat_vectors v4i16 %lo16, v4i16 %hi16
// %res = v8i8 trunc v8i16 %in16
//
// Without this transform, the original truncate would end up being
// scalarized, which is pretty much always a last resort.
SDValue InVec = N->getOperand(0);
EVT InVT = InVec->getValueType(0);
EVT OutVT = N->getValueType(0);
unsigned NumElements = OutVT.getVectorNumElements();
bool IsFloat = OutVT.isFloatingPoint();
// Widening should have already made sure this is a power-two vector
// if we're trying to split it at all. assert() that's true, just in case.
assert(!(NumElements & 1) && "Splitting vector, but not in half!");
unsigned InElementSize = InVT.getVectorElementType().getSizeInBits();
unsigned OutElementSize = OutVT.getVectorElementType().getSizeInBits();
// If the input elements are only 1/2 the width of the result elements,
// just use the normal splitting. Our trick only work if there's room
// to split more than once.
if (InElementSize <= OutElementSize * 2)
return SplitVecOp_UnaryOp(N);
SDLoc DL(N);
// Extract the halves of the input via extract_subvector.
SDValue InLoVec, InHiVec;
std::tie(InLoVec, InHiVec) = DAG.SplitVector(InVec, DL);
// Truncate them to 1/2 the element size.
EVT HalfElementVT = IsFloat ?
EVT::getFloatingPointVT(InElementSize/2) :
EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT,
NumElements/2);
SDValue HalfLo = DAG.getNode(N->getOpcode(), DL, HalfVT, InLoVec);
SDValue HalfHi = DAG.getNode(N->getOpcode(), DL, HalfVT, InHiVec);
// Concatenate them to get the full intermediate truncation result.
EVT InterVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements);
SDValue InterVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InterVT, HalfLo,
HalfHi);
// Now finish up by truncating all the way down to the original result
// type. This should normally be something that ends up being legal directly,
// but in theory if a target has very wide vectors and an annoyingly
// restricted set of legal types, this split can chain to build things up.
return IsFloat
? DAG.getNode(ISD::FP_ROUND, DL, OutVT, InterVec,
DAG.getTargetConstant(
0, DL, TLI.getPointerTy(DAG.getDataLayout())))
: DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec);
}
SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
"Operand types must be vectors");
// The result has a legal vector type, but the input needs splitting.
SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
SDLoc DL(N);
GetSplitVector(N->getOperand(0), Lo0, Hi0);
GetSplitVector(N->getOperand(1), Lo1, Hi1);
unsigned PartElements = Lo0.getValueType().getVectorNumElements();
EVT PartResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, PartElements);
EVT WideResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, 2*PartElements);
LoRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Lo0, Lo1, N->getOperand(2));
HiRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Hi0, Hi1, N->getOperand(2));
SDValue Con = DAG.getNode(ISD::CONCAT_VECTORS, DL, WideResVT, LoRes, HiRes);
return PromoteTargetBoolean(Con, N->getValueType(0));
}
SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) {
// The result has a legal vector type, but the input needs splitting.
EVT ResVT = N->getValueType(0);
SDValue Lo, Hi;
SDLoc DL(N);
GetSplitVector(N->getOperand(0), Lo, Hi);
EVT InVT = Lo.getValueType();
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
InVT.getVectorNumElements());
Lo = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Lo, N->getOperand(1));
Hi = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Hi, N->getOperand(1));
return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi);
}
//===----------------------------------------------------------------------===//
// Result Vector Widening
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
DEBUG(dbgs() << "Widen node result " << ResNo << ": ";
N->dump(&DAG);
dbgs() << "\n");
// See if the target wants to custom widen this node.
if (CustomWidenLowerNode(N, N->getValueType(ResNo)))
return;
SDValue Res = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "WidenVectorResult #" << ResNo << ": ";
N->dump(&DAG);
dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to widen the result of this operator!");
case ISD::MERGE_VALUES: Res = WidenVecRes_MERGE_VALUES(N, ResNo); break;
case ISD::BITCAST: Res = WidenVecRes_BITCAST(N); break;
case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break;
case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break;
case ISD::CONVERT_RNDSAT: Res = WidenVecRes_CONVERT_RNDSAT(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecRes_EXTRACT_SUBVECTOR(N); break;
case ISD::FP_ROUND_INREG: Res = WidenVecRes_InregOp(N); break;
case ISD::INSERT_VECTOR_ELT: Res = WidenVecRes_INSERT_VECTOR_ELT(N); break;
case ISD::LOAD: Res = WidenVecRes_LOAD(N); break;
case ISD::SCALAR_TO_VECTOR: Res = WidenVecRes_SCALAR_TO_VECTOR(N); break;
case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_InregOp(N); break;
case ISD::VSELECT:
case ISD::SELECT: Res = WidenVecRes_SELECT(N); break;
case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break;
case ISD::SETCC: Res = WidenVecRes_SETCC(N); break;
case ISD::UNDEF: Res = WidenVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE:
Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N));
break;
case ISD::MLOAD:
Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(N));
break;
case ISD::ADD:
case ISD::AND:
case ISD::MUL:
case ISD::MULHS:
case ISD::MULHU:
case ISD::OR:
case ISD::SUB:
case ISD::XOR:
case ISD::FMINNUM:
case ISD::FMAXNUM:
Res = WidenVecRes_Binary(N);
break;
case ISD::FADD:
case ISD::FCOPYSIGN:
case ISD::FMUL:
case ISD::FPOW:
case ISD::FSUB:
case ISD::FDIV:
case ISD::FREM:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM:
Res = WidenVecRes_BinaryCanTrap(N);
break;
case ISD::FPOWI:
Res = WidenVecRes_POWI(N);
break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
Res = WidenVecRes_Shift(N);
break;
case ISD::ANY_EXTEND:
case ISD::FP_EXTEND:
case ISD::FP_ROUND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SIGN_EXTEND:
case ISD::SINT_TO_FP:
case ISD::TRUNCATE:
case ISD::UINT_TO_FP:
case ISD::ZERO_EXTEND:
Res = WidenVecRes_Convert(N);
break;
case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTPOP:
case ISD::CTTZ:
case ISD::FABS:
case ISD::FCEIL:
case ISD::FCOS:
case ISD::FEXP:
case ISD::FEXP2:
case ISD::FFLOOR:
case ISD::FLOG:
case ISD::FLOG10:
case ISD::FLOG2:
case ISD::FNEARBYINT:
case ISD::FNEG:
case ISD::FRINT:
case ISD::FROUND:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
Res = WidenVecRes_Unary(N);
break;
case ISD::FMA:
Res = WidenVecRes_Ternary(N);
break;
}
// If Res is null, the sub-method took care of registering the result.
if (Res.getNode())
SetWidenedVector(SDValue(N, ResNo), Res);
}
SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) {
// Ternary op widening.
SDLoc dl(N);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
SDValue InOp3 = GetWidenedVector(N->getOperand(2));
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
}
SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
// Binary op widening.
SDLoc dl(N);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2);
}
SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) {
// Binary op widening for operations that can trap.
unsigned Opcode = N->getOpcode();
SDLoc dl(N);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
EVT WidenEltVT = WidenVT.getVectorElementType();
EVT VT = WidenVT;
unsigned NumElts = VT.getVectorNumElements();
while (!TLI.isTypeLegal(VT) && NumElts != 1) {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
}
if (NumElts != 1 && !TLI.canOpTrap(N->getOpcode(), VT)) {
// Operation doesn't trap so just widen as normal.
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2);
}
// No legal vector version so unroll the vector operation and then widen.
if (NumElts == 1)
return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
// Since the operation can trap, apply operation on the original vector.
EVT MaxVT = VT;
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
unsigned CurNumElts = N->getValueType(0).getVectorNumElements();
SmallVector<SDValue, 16> ConcatOps(CurNumElts);
unsigned ConcatEnd = 0; // Current ConcatOps index.
int Idx = 0; // Current Idx into input vectors.
// NumElts := greatest legal vector size (at most WidenVT)
// while (orig. vector has unhandled elements) {
// take munches of size NumElts from the beginning and add to ConcatOps
// NumElts := next smaller supported vector size or 1
// }
while (CurNumElts != 0) {
while (CurNumElts >= NumElts) {
SDValue EOp1 = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue EOp2 = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, VT, InOp2,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2);
Idx += NumElts;
CurNumElts -= NumElts;
}
do {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
} while (!TLI.isTypeLegal(VT) && NumElts != 1);
if (NumElts == 1) {
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
SDValue EOp1 = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, InOp1,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue EOp2 = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, InOp2,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
EOp1, EOp2);
}
CurNumElts = 0;
}
}
// Check to see if we have a single operation with the widen type.
if (ConcatEnd == 1) {
VT = ConcatOps[0].getValueType();
if (VT == WidenVT)
return ConcatOps[0];
}
// while (Some element of ConcatOps is not of type MaxVT) {
// From the end of ConcatOps, collect elements of the same type and put
// them into an op of the next larger supported type
// }
while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
Idx = ConcatEnd - 1;
VT = ConcatOps[Idx--].getValueType();
while (Idx >= 0 && ConcatOps[Idx].getValueType() == VT)
Idx--;
int NextSize = VT.isVector() ? VT.getVectorNumElements() : 1;
EVT NextVT;
do {
NextSize *= 2;
NextVT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NextSize);
} while (!TLI.isTypeLegal(NextVT));
if (!VT.isVector()) {
// Scalar type, create an INSERT_VECTOR_ELEMENT of type NextVT
SDValue VecOp = DAG.getUNDEF(NextVT);
unsigned NumToInsert = ConcatEnd - Idx - 1;
for (unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) {
VecOp = DAG.getNode(
ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp, ConcatOps[OpIdx],
DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
ConcatOps[Idx+1] = VecOp;
ConcatEnd = Idx + 2;
} else {
// Vector type, create a CONCAT_VECTORS of type NextVT
SDValue undefVec = DAG.getUNDEF(VT);
unsigned OpsToConcat = NextSize/VT.getVectorNumElements();
SmallVector<SDValue, 16> SubConcatOps(OpsToConcat);
unsigned RealVals = ConcatEnd - Idx - 1;
unsigned SubConcatEnd = 0;
unsigned SubConcatIdx = Idx + 1;
while (SubConcatEnd < RealVals)
SubConcatOps[SubConcatEnd++] = ConcatOps[++Idx];
while (SubConcatEnd < OpsToConcat)
SubConcatOps[SubConcatEnd++] = undefVec;
ConcatOps[SubConcatIdx] = DAG.getNode(ISD::CONCAT_VECTORS, dl,
NextVT, SubConcatOps);
ConcatEnd = SubConcatIdx + 1;
}
}
// Check to see if we have a single operation with the widen type.
if (ConcatEnd == 1) {
VT = ConcatOps[0].getValueType();
if (VT == WidenVT)
return ConcatOps[0];
}
// add undefs of size MaxVT until ConcatOps grows to length of WidenVT
unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
if (NumOps != ConcatEnd ) {
SDValue UndefVal = DAG.getUNDEF(MaxVT);
for (unsigned j = ConcatEnd; j < NumOps; ++j)
ConcatOps[j] = UndefVal;
}
return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
makeArrayRef(ConcatOps.data(), NumOps));
}
SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
SDValue InOp = N->getOperand(0);
SDLoc DL(N);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
EVT InVT = InOp.getValueType();
EVT InEltVT = InVT.getVectorElementType();
EVT InWidenVT = EVT::getVectorVT(*DAG.getContext(), InEltVT, WidenNumElts);
unsigned Opcode = N->getOpcode();
unsigned InVTNumElts = InVT.getVectorNumElements();
if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) {
InOp = GetWidenedVector(N->getOperand(0));
InVT = InOp.getValueType();
InVTNumElts = InVT.getVectorNumElements();
if (InVTNumElts == WidenNumElts) {
if (N->getNumOperands() == 1)
return DAG.getNode(Opcode, DL, WidenVT, InOp);
return DAG.getNode(Opcode, DL, WidenVT, InOp, N->getOperand(1));
}
}
if (TLI.isTypeLegal(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
// and then widening it. To avoid this, we widen the input only if
// it results in a legal type.
if (WidenNumElts % InVTNumElts == 0) {
// Widen the input and call convert on the widened input vector.
unsigned NumConcat = WidenNumElts/InVTNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
Ops[0] = InOp;
SDValue UndefVal = DAG.getUNDEF(InVT);
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
SDValue InVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InWidenVT, Ops);
if (N->getNumOperands() == 1)
return DAG.getNode(Opcode, DL, WidenVT, InVec);
return DAG.getNode(Opcode, DL, WidenVT, InVec, N->getOperand(1));
}
if (InVTNumElts % WidenNumElts == 0) {
SDValue InVal = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, DL, InWidenVT, InOp,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
// Extract the input and convert the shorten input vector.
if (N->getNumOperands() == 1)
return DAG.getNode(Opcode, DL, WidenVT, InVal);
return DAG.getNode(Opcode, DL, WidenVT, InVal, N->getOperand(1));
}
}
// Otherwise unroll into some nasty scalar code and rebuild the vector.
SmallVector<SDValue, 16> Ops(WidenNumElts);
EVT EltVT = WidenVT.getVectorElementType();
unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
unsigned i;
for (i=0; i < MinElts; ++i) {
SDValue Val = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, InEltVT, InOp,
DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
if (N->getNumOperands() == 1)
Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val);
else
Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val, N->getOperand(1));
}
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, DL, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_POWI(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp = GetWidenedVector(N->getOperand(0));
SDValue ShOp = N->getOperand(1);
return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp, ShOp);
}
SDValue DAGTypeLegalizer::WidenVecRes_Shift(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp = GetWidenedVector(N->getOperand(0));
SDValue ShOp = N->getOperand(1);
EVT ShVT = ShOp.getValueType();
if (getTypeAction(ShVT) == TargetLowering::TypeWidenVector) {
ShOp = GetWidenedVector(ShOp);
ShVT = ShOp.getValueType();
}
EVT ShWidenVT = EVT::getVectorVT(*DAG.getContext(),
ShVT.getVectorElementType(),
WidenVT.getVectorNumElements());
if (ShVT != ShWidenVT)
ShOp = ModifyToType(ShOp, ShWidenVT);
return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp, ShOp);
}
SDValue DAGTypeLegalizer::WidenVecRes_Unary(SDNode *N) {
// Unary op widening.
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp = GetWidenedVector(N->getOperand(0));
return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp);
}
SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
cast<VTSDNode>(N->getOperand(1))->getVT()
.getVectorElementType(),
WidenVT.getVectorNumElements());
SDValue WidenLHS = GetWidenedVector(N->getOperand(0));
return DAG.getNode(N->getOpcode(), SDLoc(N),
WidenVT, WidenLHS, DAG.getValueType(ExtVT));
}
SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(SDNode *N, unsigned ResNo) {
SDValue WidenVec = DisintegrateMERGE_VALUES(N, ResNo);
return GetWidenedVector(WidenVec);
}
SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType();
EVT VT = N->getValueType(0);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDLoc dl(N);
switch (getTypeAction(InVT)) {
case TargetLowering::TypeLegal:
break;
case TargetLowering::TypePromoteInteger:
// If the incoming type is a vector that is being promoted, then
// we know that the elements are arranged differently and that we
// must perform the conversion using a stack slot.
if (InVT.isVector())
break;
// If the InOp is promoted to the same size, convert it. Otherwise,
// fall out of the switch and widen the promoted input.
InOp = GetPromotedInteger(InOp);
InVT = InOp.getValueType();
if (WidenVT.bitsEq(InVT))
return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
break;
case TargetLowering::TypeSoftenFloat:
case TargetLowering::TypePromoteFloat:
case TargetLowering::TypeExpandInteger:
case TargetLowering::TypeExpandFloat:
case TargetLowering::TypeScalarizeVector:
case TargetLowering::TypeSplitVector:
break;
case TargetLowering::TypeWidenVector:
// If the InOp is widened to the same size, convert it. Otherwise, fall
// out of the switch and widen the widened input.
InOp = GetWidenedVector(InOp);
InVT = InOp.getValueType();
if (WidenVT.bitsEq(InVT))
// The input widens to the same size. Convert to the widen value.
return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
break;
}
unsigned WidenSize = WidenVT.getSizeInBits();
unsigned InSize = InVT.getSizeInBits();
// x86mmx is not an acceptable vector element type, so don't try.
if (WidenSize % InSize == 0 && InVT != MVT::x86mmx) {
// Determine new input vector type. The new input vector type will use
// the same element type (if its a vector) or use the input type as a
// vector. It is the same size as the type to widen to.
EVT NewInVT;
unsigned NewNumElts = WidenSize / InSize;
if (InVT.isVector()) {
EVT InEltVT = InVT.getVectorElementType();
NewInVT = EVT::getVectorVT(*DAG.getContext(), InEltVT,
WidenSize / InEltVT.getSizeInBits());
} else {
NewInVT = EVT::getVectorVT(*DAG.getContext(), InVT, NewNumElts);
}
if (TLI.isTypeLegal(NewInVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
// and then widening it. To avoid this, we widen the input only if
// it results in a legal type.
SmallVector<SDValue, 16> Ops(NewNumElts);
SDValue UndefVal = DAG.getUNDEF(InVT);
Ops[0] = InOp;
for (unsigned i = 1; i < NewNumElts; ++i)
Ops[i] = UndefVal;
SDValue NewVec;
if (InVT.isVector())
NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewInVT, Ops);
else
NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl, NewInVT, Ops);
return DAG.getNode(ISD::BITCAST, dl, WidenVT, NewVec);
}
}
return CreateStackStoreLoad(InOp, WidenVT);
}
SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
SDLoc dl(N);
// Build a vector with undefined for the new nodes.
EVT VT = N->getValueType(0);
// Integer BUILD_VECTOR operands may be larger than the node's vector element
// type. The UNDEFs need to have the same type as the existing operands.
EVT EltVT = N->getOperand(0).getValueType();
unsigned NumElts = VT.getVectorNumElements();
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SmallVector<SDValue, 16> NewOps(N->op_begin(), N->op_end());
assert(WidenNumElts >= NumElts && "Shrinking vector instead of widening!");
NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, NewOps);
}
SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
EVT InVT = N->getOperand(0).getValueType();
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
unsigned WidenNumElts = WidenVT.getVectorNumElements();
unsigned NumInElts = InVT.getVectorNumElements();
unsigned NumOperands = N->getNumOperands();
bool InputWidened = false; // Indicates we need to widen the input.
if (getTypeAction(InVT) != TargetLowering::TypeWidenVector) {
if (WidenVT.getVectorNumElements() % InVT.getVectorNumElements() == 0) {
// Add undef vectors to widen to correct length.
unsigned NumConcat = WidenVT.getVectorNumElements() /
InVT.getVectorNumElements();
SDValue UndefVal = DAG.getUNDEF(InVT);
SmallVector<SDValue, 16> Ops(NumConcat);
for (unsigned i=0; i < NumOperands; ++i)
Ops[i] = N->getOperand(i);
for (unsigned i = NumOperands; i != NumConcat; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Ops);
}
} else {
InputWidened = true;
if (WidenVT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) {
// The inputs and the result are widen to the same value.
unsigned i;
for (i=1; i < NumOperands; ++i)
if (N->getOperand(i).getOpcode() != ISD::UNDEF)
break;
if (i == NumOperands)
// Everything but the first operand is an UNDEF so just return the
// widened first operand.
return GetWidenedVector(N->getOperand(0));
if (NumOperands == 2) {
// Replace concat of two operands with a shuffle.
SmallVector<int, 16> MaskOps(WidenNumElts, -1);
for (unsigned i = 0; i < NumInElts; ++i) {
MaskOps[i] = i;
MaskOps[i + NumInElts] = i + WidenNumElts;
}
return DAG.getVectorShuffle(WidenVT, dl,
GetWidenedVector(N->getOperand(0)),
GetWidenedVector(N->getOperand(1)),
&MaskOps[0]);
}
}
}
// Fall back to use extracts and build vector.
EVT EltVT = WidenVT.getVectorElementType();
SmallVector<SDValue, 16> Ops(WidenNumElts);
unsigned Idx = 0;
for (unsigned i=0; i < NumOperands; ++i) {
SDValue InOp = N->getOperand(i);
if (InputWidened)
InOp = GetWidenedVector(InOp);
for (unsigned j=0; j < NumInElts; ++j)
Ops[Idx++] = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
DAG.getConstant(j, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; Idx < WidenNumElts; ++Idx)
Ops[Idx] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SDLoc dl(N);
SDValue InOp = N->getOperand(0);
SDValue RndOp = N->getOperand(3);
SDValue SatOp = N->getOperand(4);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
EVT InVT = InOp.getValueType();
EVT InEltVT = InVT.getVectorElementType();
EVT InWidenVT = EVT::getVectorVT(*DAG.getContext(), InEltVT, WidenNumElts);
SDValue DTyOp = DAG.getValueType(WidenVT);
SDValue STyOp = DAG.getValueType(InWidenVT);
ISD::CvtCode CvtCode = cast<CvtRndSatSDNode>(N)->getCvtCode();
unsigned InVTNumElts = InVT.getVectorNumElements();
if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) {
InOp = GetWidenedVector(InOp);
InVT = InOp.getValueType();
InVTNumElts = InVT.getVectorNumElements();
if (InVTNumElts == WidenNumElts)
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
if (TLI.isTypeLegal(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
// and then widening it. To avoid this, we widen the input only if
// it results in a legal type.
if (WidenNumElts % InVTNumElts == 0) {
// Widen the input and call convert on the widened input vector.
unsigned NumConcat = WidenNumElts/InVTNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
Ops[0] = InOp;
SDValue UndefVal = DAG.getUNDEF(InVT);
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
InOp = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWidenVT, Ops);
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
if (InVTNumElts % WidenNumElts == 0) {
// Extract the input and convert the shorten input vector.
InOp = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, InWidenVT, InOp,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
}
// Otherwise unroll into some nasty scalar code and rebuild the vector.
SmallVector<SDValue, 16> Ops(WidenNumElts);
EVT EltVT = WidenVT.getVectorElementType();
DTyOp = DAG.getValueType(EltVT);
STyOp = DAG.getValueType(InEltVT);
unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
unsigned i;
for (i=0; i < MinElts; ++i) {
SDValue ExtVal = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Ops[i] = DAG.getConvertRndSat(WidenVT, dl, ExtVal, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
EVT VT = N->getValueType(0);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SDValue InOp = N->getOperand(0);
SDValue Idx = N->getOperand(1);
SDLoc dl(N);
if (getTypeAction(InOp.getValueType()) == TargetLowering::TypeWidenVector)
InOp = GetWidenedVector(InOp);
EVT InVT = InOp.getValueType();
// Check if we can just return the input vector after widening.
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
if (IdxVal == 0 && InVT == WidenVT)
return InOp;
// Check if we can extract from the vector.
unsigned InNumElts = InVT.getVectorNumElements();
if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, WidenVT, InOp, Idx);
// We could try widening the input to the right length but for now, extract
// the original elements, fill the rest with undefs and build a vector.
SmallVector<SDValue, 16> Ops(WidenNumElts);
EVT EltVT = VT.getVectorElementType();
unsigned NumElts = VT.getVectorNumElements();
unsigned i;
for (i=0; i < NumElts; ++i)
Ops[i] =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
DAG.getConstant(IdxVal + i, dl,
TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) {
SDValue InOp = GetWidenedVector(N->getOperand(0));
return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N),
InOp.getValueType(), InOp,
N->getOperand(1), N->getOperand(2));
}
SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::LoadExtType ExtType = LD->getExtensionType();
SDValue Result;
SmallVector<SDValue, 16> LdChain; // Chain for the series of load
if (ExtType != ISD::NON_EXTLOAD)
Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
else
Result = GenWidenVectorLoads(LdChain, LD);
// If we generate a single load, we can use that for the chain. Otherwise,
// build a factor node to remember the multiple loads are independent and
// chain to that.
SDValue NewChain;
if (LdChain.size() == 1)
NewChain = LdChain[0];
else
NewChain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, LdChain);
// Modified the chain - switch anything that used the old chain to use
// the new one.
ReplaceValueWith(SDValue(N, 1), NewChain);
return Result;
}
SDValue DAGTypeLegalizer::WidenVecRes_MLOAD(MaskedLoadSDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),N->getValueType(0));
SDValue Mask = N->getMask();
EVT MaskVT = Mask.getValueType();
SDValue Src0 = GetWidenedVector(N->getSrc0());
ISD::LoadExtType ExtType = N->getExtensionType();
SDLoc dl(N);
if (getTypeAction(MaskVT) == TargetLowering::TypeWidenVector)
Mask = GetWidenedVector(Mask);
else {
EVT BoolVT = getSetCCResultType(WidenVT);
// We can't use ModifyToType() because we should fill the mask with
// zeroes
unsigned WidenNumElts = BoolVT.getVectorNumElements();
unsigned MaskNumElts = MaskVT.getVectorNumElements();
unsigned NumConcat = WidenNumElts / MaskNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
SDValue ZeroVal = DAG.getConstant(0, dl, MaskVT);
Ops[0] = Mask;
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = ZeroVal;
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, BoolVT, Ops);
}
SDValue Res = DAG.getMaskedLoad(WidenVT, dl, N->getChain(), N->getBasePtr(),
Mask, Src0, N->getMemoryVT(),
N->getMemOperand(), ExtType);
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
return Res;
}
SDValue DAGTypeLegalizer::WidenVecRes_SCALAR_TO_VECTOR(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N),
WidenVT, N->getOperand(0));
}
SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SDValue Cond1 = N->getOperand(0);
EVT CondVT = Cond1.getValueType();
if (CondVT.isVector()) {
EVT CondEltVT = CondVT.getVectorElementType();
EVT CondWidenVT = EVT::getVectorVT(*DAG.getContext(),
CondEltVT, WidenNumElts);
if (getTypeAction(CondVT) == TargetLowering::TypeWidenVector)
Cond1 = GetWidenedVector(Cond1);
// If we have to split the condition there is no point in widening the
// select. This would result in an cycle of widening the select ->
// widening the condition operand -> splitting the condition operand ->
// splitting the select -> widening the select. Instead split this select
// further and widen the resulting type.
if (getTypeAction(CondVT) == TargetLowering::TypeSplitVector) {
SDValue SplitSelect = SplitVecOp_VSELECT(N, 0);
SDValue Res = ModifyToType(SplitSelect, WidenVT);
return Res;
}
if (Cond1.getValueType() != CondWidenVT)
Cond1 = ModifyToType(Cond1, CondWidenVT);
}
SDValue InOp1 = GetWidenedVector(N->getOperand(1));
SDValue InOp2 = GetWidenedVector(N->getOperand(2));
assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
return DAG.getNode(N->getOpcode(), SDLoc(N),
WidenVT, Cond1, InOp1, InOp2);
}
SDValue DAGTypeLegalizer::WidenVecRes_SELECT_CC(SDNode *N) {
SDValue InOp1 = GetWidenedVector(N->getOperand(2));
SDValue InOp2 = GetWidenedVector(N->getOperand(3));
return DAG.getNode(ISD::SELECT_CC, SDLoc(N),
InOp1.getValueType(), N->getOperand(0),
N->getOperand(1), InOp1, InOp2, N->getOperand(4));
}
SDValue DAGTypeLegalizer::WidenVecRes_SETCC(SDNode *N) {
assert(N->getValueType(0).isVector() ==
N->getOperand(0).getValueType().isVector() &&
"Scalar/Vector type mismatch");
if (N->getValueType(0).isVector()) return WidenVecRes_VSETCC(N);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
return DAG.getNode(ISD::SETCC, SDLoc(N), WidenVT,
InOp1, InOp2, N->getOperand(2));
}
SDValue DAGTypeLegalizer::WidenVecRes_UNDEF(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getUNDEF(WidenVT);
}
SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N) {
EVT VT = N->getValueType(0);
SDLoc dl(N);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
unsigned NumElts = VT.getVectorNumElements();
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
// Adjust mask based on new input vector length.
SmallVector<int, 16> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = N->getMaskElt(i);
if (Idx < (int)NumElts)
NewMask.push_back(Idx);
else
NewMask.push_back(Idx - NumElts + WidenNumElts);
}
for (unsigned i = NumElts; i != WidenNumElts; ++i)
NewMask.push_back(-1);
return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, &NewMask[0]);
}
SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
"Operands must be vectors");
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SDValue InOp1 = N->getOperand(0);
EVT InVT = InOp1.getValueType();
assert(InVT.isVector() && "can not widen non-vector type");
EVT WidenInVT = EVT::getVectorVT(*DAG.getContext(),
InVT.getVectorElementType(), WidenNumElts);
// The input and output types often differ here, and it could be that while
// we'd prefer to widen the result type, the input operands have been split.
// In this case, we also need to split the result of this node as well.
if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) {
SDValue SplitVSetCC = SplitVecOp_VSETCC(N);
SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
return Res;
}
InOp1 = GetWidenedVector(InOp1);
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
// Assume that the input and output will be widen appropriately. If not,
// we will have to unroll it at some point.
assert(InOp1.getValueType() == WidenInVT &&
InOp2.getValueType() == WidenInVT &&
"Input not widened to expected type!");
(void)WidenInVT;
return DAG.getNode(ISD::SETCC, SDLoc(N),
WidenVT, InOp1, InOp2, N->getOperand(2));
}
//===----------------------------------------------------------------------===//
// Widen Vector Operand
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Widen node operand " << OpNo << ": ";
N->dump(&DAG);
dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom widen this node.
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
return false;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "WidenVectorOperand op #" << OpNo << ": ";
N->dump(&DAG);
dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to widen this operator's operand!");
case ISD::BITCAST: Res = WidenVecOp_BITCAST(N); break;
case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::STORE: Res = WidenVecOp_STORE(N); break;
case ISD::MSTORE: Res = WidenVecOp_MSTORE(N, OpNo); break;
case ISD::SETCC: Res = WidenVecOp_SETCC(N); break;
case ISD::ANY_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
Res = WidenVecOp_EXTEND(N);
break;
case ISD::FP_EXTEND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::TRUNCATE:
Res = WidenVecOp_Convert(N);
break;
}
// If Res is null, the sub-method took care of registering the result.
if (!Res.getNode()) return false;
// If the result is N, the sub-method updated N in place. Tell the legalizer
// core about this.
if (Res.getNode() == N)
return true;
assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
return false;
}
SDValue DAGTypeLegalizer::WidenVecOp_EXTEND(SDNode *N) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue InOp = N->getOperand(0);
// If some legalization strategy other than widening is used on the operand,
// we can't safely assume that just extending the low lanes is the correct
// transformation.
if (getTypeAction(InOp.getValueType()) != TargetLowering::TypeWidenVector)
return WidenVecOp_Convert(N);
InOp = GetWidenedVector(InOp);
assert(VT.getVectorNumElements() <
InOp.getValueType().getVectorNumElements() &&
"Input wasn't widened!");
// We may need to further widen the operand until it has the same total
// vector size as the result.
EVT InVT = InOp.getValueType();
if (InVT.getSizeInBits() != VT.getSizeInBits()) {
EVT InEltVT = InVT.getVectorElementType();
for (int i = MVT::FIRST_VECTOR_VALUETYPE, e = MVT::LAST_VECTOR_VALUETYPE; i < e; ++i) {
EVT FixedVT = (MVT::SimpleValueType)i;
EVT FixedEltVT = FixedVT.getVectorElementType();
if (TLI.isTypeLegal(FixedVT) &&
FixedVT.getSizeInBits() == VT.getSizeInBits() &&
FixedEltVT == InEltVT) {
assert(FixedVT.getVectorNumElements() >= VT.getVectorNumElements() &&
"Not enough elements in the fixed type for the operand!");
assert(FixedVT.getVectorNumElements() != InVT.getVectorNumElements() &&
"We can't have the same type as we started with!");
if (FixedVT.getVectorNumElements() > InVT.getVectorNumElements())
InOp = DAG.getNode(
ISD::INSERT_SUBVECTOR, DL, FixedVT, DAG.getUNDEF(FixedVT), InOp,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
else
InOp = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, DL, FixedVT, InOp,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
break;
}
}
InVT = InOp.getValueType();
if (InVT.getSizeInBits() != VT.getSizeInBits())
// We couldn't find a legal vector type that was a widening of the input
// and could be extended in-register to the result type, so we have to
// scalarize.
return WidenVecOp_Convert(N);
}
// Use special DAG nodes to represent the operation of extending the
// low lanes.
switch (N->getOpcode()) {
default:
llvm_unreachable("Extend legalization on on extend operation!");
case ISD::ANY_EXTEND:
return DAG.getAnyExtendVectorInReg(InOp, DL, VT);
case ISD::SIGN_EXTEND:
return DAG.getSignExtendVectorInReg(InOp, DL, VT);
case ISD::ZERO_EXTEND:
return DAG.getZeroExtendVectorInReg(InOp, DL, VT);
}
}
SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
// Since the result is legal and the input is illegal, it is unlikely
// that we can fix the input to a legal type so unroll the convert
// into some scalar code and create a nasty build vector.
EVT VT = N->getValueType(0);
EVT EltVT = VT.getVectorElementType();
SDLoc dl(N);
unsigned NumElts = VT.getVectorNumElements();
SDValue InOp = N->getOperand(0);
if (getTypeAction(InOp.getValueType()) == TargetLowering::TypeWidenVector)
InOp = GetWidenedVector(InOp);
EVT InVT = InOp.getValueType();
EVT InEltVT = InVT.getVectorElementType();
unsigned Opcode = N->getOpcode();
SmallVector<SDValue, 16> Ops(NumElts);
for (unsigned i=0; i < NumElts; ++i)
Ops[i] = DAG.getNode(
Opcode, dl, EltVT,
DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
EVT VT = N->getValueType(0);
SDValue InOp = GetWidenedVector(N->getOperand(0));
EVT InWidenVT = InOp.getValueType();
SDLoc dl(N);
// Check if we can convert between two legal vector types and extract.
unsigned InWidenSize = InWidenVT.getSizeInBits();
unsigned Size = VT.getSizeInBits();
// x86mmx is not an acceptable vector element type, so don't try.
if (InWidenSize % Size == 0 && !VT.isVector() && VT != MVT::x86mmx) {
unsigned NewNumElts = InWidenSize / Size;
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
if (TLI.isTypeLegal(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
return DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
}
return CreateStackStoreLoad(InOp, VT);
}
SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
// If the input vector is not legal, it is likely that we will not find a
// legal vector of the same size. Replace the concatenate vector with a
// nasty build vector.
EVT VT = N->getValueType(0);
EVT EltVT = VT.getVectorElementType();
SDLoc dl(N);
unsigned NumElts = VT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(NumElts);
EVT InVT = N->getOperand(0).getValueType();
unsigned NumInElts = InVT.getVectorNumElements();
unsigned Idx = 0;
unsigned NumOperands = N->getNumOperands();
for (unsigned i=0; i < NumOperands; ++i) {
SDValue InOp = N->getOperand(i);
if (getTypeAction(InOp.getValueType()) == TargetLowering::TypeWidenVector)
InOp = GetWidenedVector(InOp);
for (unsigned j=0; j < NumInElts; ++j)
Ops[Idx++] = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
DAG.getConstant(j, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
SDValue InOp = GetWidenedVector(N->getOperand(0));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N),
N->getValueType(0), InOp, N->getOperand(1));
}
SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDValue InOp = GetWidenedVector(N->getOperand(0));
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
N->getValueType(0), InOp, N->getOperand(1));
}
SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
// We have to widen the value but we want only to store the original
// vector type.
StoreSDNode *ST = cast<StoreSDNode>(N);
SmallVector<SDValue, 16> StChain;
if (ST->isTruncatingStore())
GenWidenVectorTruncStores(StChain, ST);
else
GenWidenVectorStores(StChain, ST);
if (StChain.size() == 1)
return StChain[0];
else
return DAG.getNode(ISD::TokenFactor, SDLoc(ST), MVT::Other, StChain);
}
SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(SDNode *N, unsigned OpNo) {
MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
SDValue Mask = MST->getMask();
EVT MaskVT = Mask.getValueType();
SDValue StVal = MST->getValue();
// Widen the value
SDValue WideVal = GetWidenedVector(StVal);
SDLoc dl(N);
if (OpNo == 2 || getTypeAction(MaskVT) == TargetLowering::TypeWidenVector)
Mask = GetWidenedVector(Mask);
else {
// The mask should be widened as well
EVT BoolVT = getSetCCResultType(WideVal.getValueType());
// We can't use ModifyToType() because we should fill the mask with
// zeroes
unsigned WidenNumElts = BoolVT.getVectorNumElements();
unsigned MaskNumElts = MaskVT.getVectorNumElements();
unsigned NumConcat = WidenNumElts / MaskNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
SDValue ZeroVal = DAG.getConstant(0, dl, MaskVT);
Ops[0] = Mask;
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = ZeroVal;
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, BoolVT, Ops);
}
assert(Mask.getValueType().getVectorNumElements() ==
WideVal.getValueType().getVectorNumElements() &&
"Mask and data vectors should have the same number of elements");
return DAG.getMaskedStore(MST->getChain(), dl, WideVal, MST->getBasePtr(),
Mask, MST->getMemoryVT(), MST->getMemOperand(),
false);
}
SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
SDValue InOp0 = GetWidenedVector(N->getOperand(0));
SDValue InOp1 = GetWidenedVector(N->getOperand(1));
SDLoc dl(N);
// WARNING: In this code we widen the compare instruction with garbage.
// This garbage may contain denormal floats which may be slow. Is this a real
// concern ? Should we zero the unused lanes if this is a float compare ?
// Get a new SETCC node to compare the newly widened operands.
// Only some of the compared elements are legal.
EVT SVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
InOp0.getValueType());
SDValue WideSETCC = DAG.getNode(ISD::SETCC, SDLoc(N),
SVT, InOp0, InOp1, N->getOperand(2));
// Extract the needed results from the result vector.
EVT ResVT = EVT::getVectorVT(*DAG.getContext(),
SVT.getVectorElementType(),
N->getValueType(0).getVectorNumElements());
SDValue CC = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, ResVT, WideSETCC,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
return PromoteTargetBoolean(CC, N->getValueType(0));
}
//===----------------------------------------------------------------------===//
// Vector Widening Utilities
//===----------------------------------------------------------------------===//
// Utility function to find the type to chop up a widen vector for load/store
// TLI: Target lowering used to determine legal types.
// Width: Width left need to load/store.
// WidenVT: The widen vector type to load to/store from
// Align: If 0, don't allow use of a wider type
// WidenEx: If Align is not 0, the amount additional we can load/store from.
static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
unsigned Width, EVT WidenVT,
unsigned Align = 0, unsigned WidenEx = 0) {
EVT WidenEltVT = WidenVT.getVectorElementType();
unsigned WidenWidth = WidenVT.getSizeInBits();
unsigned WidenEltWidth = WidenEltVT.getSizeInBits();
unsigned AlignInBits = Align*8;
// If we have one element to load/store, return it.
EVT RetVT = WidenEltVT;
if (Width == WidenEltWidth)
return RetVT;
// See if there is larger legal integer than the element type to load/store
unsigned VT;
for (VT = (unsigned)MVT::LAST_INTEGER_VALUETYPE;
VT >= (unsigned)MVT::FIRST_INTEGER_VALUETYPE; --VT) {
EVT MemVT((MVT::SimpleValueType) VT);
unsigned MemVTWidth = MemVT.getSizeInBits();
if (MemVT.getSizeInBits() <= WidenEltWidth)
break;
auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT);
if ((Action == TargetLowering::TypeLegal ||
Action == TargetLowering::TypePromoteInteger) &&
(WidenWidth % MemVTWidth) == 0 &&
isPowerOf2_32(WidenWidth / MemVTWidth) &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
RetVT = MemVT;
break;
}
}
// See if there is a larger vector type to load/store that has the same vector
// element type and is evenly divisible with the WidenVT.
for (VT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
VT >= (unsigned)MVT::FIRST_VECTOR_VALUETYPE; --VT) {
EVT MemVT = (MVT::SimpleValueType) VT;
unsigned MemVTWidth = MemVT.getSizeInBits();
if (TLI.isTypeLegal(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
(WidenWidth % MemVTWidth) == 0 &&
isPowerOf2_32(WidenWidth / MemVTWidth) &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
if (RetVT.getSizeInBits() < MemVTWidth || MemVT == WidenVT)
return MemVT;
}
}
return RetVT;
}
// Builds a vector type from scalar loads
// VecTy: Resulting Vector type
// LDOps: Load operators to build a vector type
// [Start,End) the list of loads to use.
static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
SmallVectorImpl<SDValue> &LdOps,
unsigned Start, unsigned End) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDLoc dl(LdOps[Start]);
EVT LdTy = LdOps[Start].getValueType();
unsigned Width = VecTy.getSizeInBits();
unsigned NumElts = Width / LdTy.getSizeInBits();
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), LdTy, NumElts);
unsigned Idx = 1;
SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT,LdOps[Start]);
for (unsigned i = Start + 1; i != End; ++i) {
EVT NewLdTy = LdOps[i].getValueType();
if (NewLdTy != LdTy) {
NumElts = Width / NewLdTy.getSizeInBits();
NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewLdTy, NumElts);
VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, VecOp);
// Readjust position and vector position based on new load type
Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits();
LdTy = NewLdTy;
}
VecOp = DAG.getNode(
ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i],
DAG.getConstant(Idx++, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
}
SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD) {
// The strategy assumes that we can efficiently load powers of two widths.
// The routines chops the vector into the largest vector loads with the same
// element type or scalar loads and then recombines it to the widen vector
// type.
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),LD->getValueType(0));
unsigned WidenWidth = WidenVT.getSizeInBits();
EVT LdVT = LD->getMemoryVT();
SDLoc dl(LD);
assert(LdVT.isVector() && WidenVT.isVector());
assert(LdVT.getVectorElementType() == WidenVT.getVectorElementType());
// Load information
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
unsigned Align = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
bool isInvariant = LD->isInvariant();
AAMDNodes AAInfo = LD->getAAInfo();
int LdWidth = LdVT.getSizeInBits();
int WidthDiff = WidenWidth - LdWidth; // Difference
unsigned LdAlign = (isVolatile) ? 0 : Align; // Allow wider loads
// Find the vector type that can load from.
EVT NewVT = FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
int NewVTWidth = NewVT.getSizeInBits();
SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr, LD->getPointerInfo(),
isVolatile, isNonTemporal, isInvariant, Align,
AAInfo);
LdChain.push_back(LdOp.getValue(1));
// Check if we can load the element with one instruction
if (LdWidth <= NewVTWidth) {
if (!NewVT.isVector()) {
unsigned NumElts = WidenWidth / NewVTWidth;
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp);
}
if (NewVT == WidenVT)
return LdOp;
assert(WidenWidth % NewVTWidth == 0);
unsigned NumConcat = WidenWidth / NewVTWidth;
SmallVector<SDValue, 16> ConcatOps(NumConcat);
SDValue UndefVal = DAG.getUNDEF(NewVT);
ConcatOps[0] = LdOp;
for (unsigned i = 1; i != NumConcat; ++i)
ConcatOps[i] = UndefVal;
return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, ConcatOps);
}
// Load vector by using multiple loads from largest vector to scalar
SmallVector<SDValue, 16> LdOps;
LdOps.push_back(LdOp);
LdWidth -= NewVTWidth;
unsigned Offset = 0;
while (LdWidth > 0) {
unsigned Increment = NewVTWidth / 8;
Offset += Increment;
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getConstant(Increment, dl, BasePtr.getValueType()));
SDValue L;
if (LdWidth < NewVTWidth) {
// Our current type we are using is too large, find a better size
NewVT = FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
NewVTWidth = NewVT.getSizeInBits();
L = DAG.getLoad(NewVT, dl, Chain, BasePtr,
LD->getPointerInfo().getWithOffset(Offset), isVolatile,
isNonTemporal, isInvariant, MinAlign(Align, Increment),
AAInfo);
LdChain.push_back(L.getValue(1));
if (L->getValueType(0).isVector()) {
SmallVector<SDValue, 16> Loads;
Loads.push_back(L);
unsigned size = L->getValueSizeInBits(0);
while (size < LdOp->getValueSizeInBits(0)) {
Loads.push_back(DAG.getUNDEF(L->getValueType(0)));
size += L->getValueSizeInBits(0);
}
L = DAG.getNode(ISD::CONCAT_VECTORS, dl, LdOp->getValueType(0), Loads);
}
} else {
L = DAG.getLoad(NewVT, dl, Chain, BasePtr,
LD->getPointerInfo().getWithOffset(Offset), isVolatile,
isNonTemporal, isInvariant, MinAlign(Align, Increment),
AAInfo);
LdChain.push_back(L.getValue(1));
}
LdOps.push_back(L);
LdWidth -= NewVTWidth;
}
// Build the vector from the loads operations
unsigned End = LdOps.size();
if (!LdOps[0].getValueType().isVector())
// All the loads are scalar loads.
return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End);
// If the load contains vectors, build the vector using concat vector.
// All of the vectors used to loads are power of 2 and the scalars load
// can be combined to make a power of 2 vector.
SmallVector<SDValue, 16> ConcatOps(End);
int i = End - 1;
int Idx = End;
EVT LdTy = LdOps[i].getValueType();
// First combine the scalar loads to a vector
if (!LdTy.isVector()) {
for (--i; i >= 0; --i) {
LdTy = LdOps[i].getValueType();
if (LdTy.isVector())
break;
}
ConcatOps[--Idx] = BuildVectorFromScalar(DAG, LdTy, LdOps, i+1, End);
}
ConcatOps[--Idx] = LdOps[i];
for (--i; i >= 0; --i) {
EVT NewLdTy = LdOps[i].getValueType();
if (NewLdTy != LdTy) {
// Create a larger vector
ConcatOps[End-1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewLdTy,
makeArrayRef(&ConcatOps[Idx], End - Idx));
Idx = End - 1;
LdTy = NewLdTy;
}
ConcatOps[--Idx] = LdOps[i];
}
if (WidenWidth == LdTy.getSizeInBits()*(End - Idx))
return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
makeArrayRef(&ConcatOps[Idx], End - Idx));
// We need to fill the rest with undefs to build the vector
unsigned NumOps = WidenWidth / LdTy.getSizeInBits();
SmallVector<SDValue, 16> WidenOps(NumOps);
SDValue UndefVal = DAG.getUNDEF(LdTy);
{
unsigned i = 0;
for (; i != End-Idx; ++i)
WidenOps[i] = ConcatOps[Idx+i];
for (; i != NumOps; ++i)
WidenOps[i] = UndefVal;
}
return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, WidenOps);
}
SDValue
DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD,
ISD::LoadExtType ExtType) {
// For extension loads, it may not be more efficient to chop up the vector
// and then extended it. Instead, we unroll the load and build a new vector.
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),LD->getValueType(0));
EVT LdVT = LD->getMemoryVT();
SDLoc dl(LD);
assert(LdVT.isVector() && WidenVT.isVector());
// Load information
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
unsigned Align = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
bool isInvariant = LD->isInvariant();
AAMDNodes AAInfo = LD->getAAInfo();
EVT EltVT = WidenVT.getVectorElementType();
EVT LdEltVT = LdVT.getVectorElementType();
unsigned NumElts = LdVT.getVectorNumElements();
// Load each element and widen
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(WidenNumElts);
unsigned Increment = LdEltVT.getSizeInBits() / 8;
Ops[0] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
LdEltVT, isVolatile, isNonTemporal, isInvariant,
Align, AAInfo);
LdChain.push_back(Ops[0].getValue(1));
unsigned i = 0, Offset = Increment;
for (i=1; i < NumElts; ++i, Offset += Increment) {
SDValue NewBasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
BasePtr,
DAG.getConstant(Offset, dl,
BasePtr.getValueType()));
Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
LD->getPointerInfo().getWithOffset(Offset), LdEltVT,
isVolatile, isNonTemporal, isInvariant, Align,
AAInfo);
LdChain.push_back(Ops[i].getValue(1));
}
// Fill the rest with undefs
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i != WidenNumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
void DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST) {
// The strategy assumes that we can efficiently store powers of two widths.
// The routines chops the vector into the largest vector stores with the same
// element type or scalar stores.
SDValue Chain = ST->getChain();
SDValue BasePtr = ST->getBasePtr();
unsigned Align = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
AAMDNodes AAInfo = ST->getAAInfo();
SDValue ValOp = GetWidenedVector(ST->getValue());
SDLoc dl(ST);
EVT StVT = ST->getMemoryVT();
unsigned StWidth = StVT.getSizeInBits();
EVT ValVT = ValOp.getValueType();
unsigned ValWidth = ValVT.getSizeInBits();
EVT ValEltVT = ValVT.getVectorElementType();
unsigned ValEltWidth = ValEltVT.getSizeInBits();
assert(StVT.getVectorElementType() == ValEltVT);
int Idx = 0; // current index to store
unsigned Offset = 0; // offset from base to store
while (StWidth != 0) {
// Find the largest vector type we can store with
EVT NewVT = FindMemType(DAG, TLI, StWidth, ValVT);
unsigned NewVTWidth = NewVT.getSizeInBits();
unsigned Increment = NewVTWidth / 8;
if (NewVT.isVector()) {
unsigned NumVTElts = NewVT.getVectorNumElements();
do {
SDValue EOp = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, NewVT, ValOp,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo().getWithOffset(Offset),
isVolatile, isNonTemporal,
MinAlign(Align, Offset), AAInfo));
StWidth -= NewVTWidth;
Offset += Increment;
Idx += NumVTElts;
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getConstant(Increment, dl,
BasePtr.getValueType()));
} while (StWidth != 0 && StWidth >= NewVTWidth);
} else {
// Cast the vector to the scalar type we can store
unsigned NumElts = ValWidth / NewVTWidth;
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp);
// Readjust index position based on new vector type
Idx = Idx * ValEltWidth / NewVTWidth;
do {
SDValue EOp = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp,
DAG.getConstant(Idx++, dl,
TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo().getWithOffset(Offset),
isVolatile, isNonTemporal,
MinAlign(Align, Offset), AAInfo));
StWidth -= NewVTWidth;
Offset += Increment;
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getConstant(Increment, dl,
BasePtr.getValueType()));
} while (StWidth != 0 && StWidth >= NewVTWidth);
// Restore index back to be relative to the original widen element type
Idx = Idx * NewVTWidth / ValEltWidth;
}
}
}
void
DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST) {
// For extension loads, it may not be more efficient to truncate the vector
// and then store it. Instead, we extract each element and then store it.
SDValue Chain = ST->getChain();
SDValue BasePtr = ST->getBasePtr();
unsigned Align = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
AAMDNodes AAInfo = ST->getAAInfo();
SDValue ValOp = GetWidenedVector(ST->getValue());
SDLoc dl(ST);
EVT StVT = ST->getMemoryVT();
EVT ValVT = ValOp.getValueType();
// It must be true that we the widen vector type is bigger than where
// we need to store.
assert(StVT.isVector() && ValOp.getValueType().isVector());
assert(StVT.bitsLT(ValOp.getValueType()));
// For truncating stores, we can not play the tricks of chopping legal
// vector types and bit cast it to the right type. Instead, we unroll
// the store.
EVT StEltVT = StVT.getVectorElementType();
EVT ValEltVT = ValVT.getVectorElementType();
unsigned Increment = ValEltVT.getSizeInBits() / 8;
unsigned NumElts = StVT.getVectorNumElements();
SDValue EOp = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo(), StEltVT,
isVolatile, isNonTemporal, Align,
AAInfo));
unsigned Offset = Increment;
for (unsigned i=1; i < NumElts; ++i, Offset += Increment) {
SDValue NewBasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
BasePtr,
DAG.getConstant(Offset, dl,
BasePtr.getValueType()));
SDValue EOp = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, NewBasePtr,
ST->getPointerInfo().getWithOffset(Offset),
StEltVT, isVolatile, isNonTemporal,
MinAlign(Align, Offset), AAInfo));
}
}
/// Modifies a vector input (widen or narrows) to a vector of NVT. The
/// input vector must have the same element type as NVT.
SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
// Note that InOp might have been widened so it might already have
// the right width or it might need be narrowed.
EVT InVT = InOp.getValueType();
assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
"input and widen element type must match");
SDLoc dl(InOp);
// Check if InOp already has the right width.
if (InVT == NVT)
return InOp;
unsigned InNumElts = InVT.getVectorNumElements();
unsigned WidenNumElts = NVT.getVectorNumElements();
if (WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0) {
unsigned NumConcat = WidenNumElts / InNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
SDValue UndefVal = DAG.getUNDEF(InVT);
Ops[0] = InOp;
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::CONCAT_VECTORS, dl, NVT, Ops);
}
if (WidenNumElts < InNumElts && InNumElts % WidenNumElts)
return DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, NVT, InOp,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
// Fall back to extract and build.
SmallVector<SDValue, 16> Ops(WidenNumElts);
EVT EltVT = NVT.getVectorElementType();
unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
unsigned Idx;
for (Idx = 0; Idx < MinNumElts; ++Idx)
Ops[Idx] = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue UndefVal = DAG.getUNDEF(EltVT);
for ( ; Idx < WidenNumElts; ++Idx)
Ops[Idx] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Ops);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp | //===-------- LegalizeTypesGeneric.cpp - Generic type legalization --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements generic type expansion and splitting for LegalizeTypes.
// The routines here perform legalization when the details of the type (such as
// whether it is an integer or a float) do not matter.
// Expansion is the act of changing a computation in an illegal type to be a
// computation in two identical registers of a smaller type. The Lo/Hi part
// is required to be stored first in memory on little/big-endian machines.
// Splitting is the act of changing a computation in an illegal type to be a
// computation in two not necessarily identical registers of a smaller type.
// There are no requirements on how the type is represented in memory.
//
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
#include "llvm/IR/DataLayout.h"
using namespace llvm;
#define DEBUG_TYPE "legalize-types"
//===----------------------------------------------------------------------===//
// Generic Result Expansion.
//===----------------------------------------------------------------------===//
// These routines assume that the Lo/Hi part is stored first in memory on
// little/big-endian machines, followed by the Hi/Lo part. This means that
// they cannot be used as is on vectors, for which Lo is always stored first.
void DAGTypeLegalizer::ExpandRes_MERGE_VALUES(SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi) {
SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
GetExpandedOp(Op, Lo, Hi);
}
void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType();
SDLoc dl(N);
// Handle some special cases efficiently.
switch (getTypeAction(InVT)) {
case TargetLowering::TypeLegal:
case TargetLowering::TypePromoteInteger:
break;
case TargetLowering::TypePromoteFloat:
llvm_unreachable("Bitcast of a promotion-needing float should never need"
"expansion");
case TargetLowering::TypeSoftenFloat:
// Convert the integer operand instead.
SplitInteger(GetSoftenedFloat(InOp), Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case TargetLowering::TypeExpandInteger:
case TargetLowering::TypeExpandFloat: {
auto &DL = DAG.getDataLayout();
// Convert the expanded pieces of the input.
GetExpandedOp(InOp, Lo, Hi);
if (TLI.hasBigEndianPartOrdering(InVT, DL) !=
TLI.hasBigEndianPartOrdering(OutVT, DL))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
}
case TargetLowering::TypeSplitVector:
GetSplitVector(InOp, Lo, Hi);
if (TLI.hasBigEndianPartOrdering(OutVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case TargetLowering::TypeScalarizeVector:
// Convert the element instead.
SplitInteger(BitConvertToInteger(GetScalarizedVector(InOp)), Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case TargetLowering::TypeWidenVector: {
assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BITCAST");
InOp = GetWidenedVector(InOp);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(InVT);
std::tie(Lo, Hi) = DAG.SplitVector(InOp, dl, LoVT, HiVT);
if (TLI.hasBigEndianPartOrdering(OutVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
}
}
if (InVT.isVector() && OutVT.isInteger()) {
// Handle cases like i64 = BITCAST v1i64 on x86, where the operand
// is legal but the result is not.
unsigned NumElems = 2;
EVT ElemVT = NOutVT;
EVT NVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, NumElems);
// If <ElemVT * N> is not a legal type, try <ElemVT/2 * (N*2)>.
while (!isTypeLegal(NVT)) {
unsigned NewSizeInBits = ElemVT.getSizeInBits() / 2;
// If the element size is smaller than byte, bail.
if (NewSizeInBits < 8)
break;
NumElems *= 2;
ElemVT = EVT::getIntegerVT(*DAG.getContext(), NewSizeInBits);
NVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, NumElems);
}
if (isTypeLegal(NVT)) {
SDValue CastInOp = DAG.getNode(ISD::BITCAST, dl, NVT, InOp);
SmallVector<SDValue, 8> Vals;
for (unsigned i = 0; i < NumElems; ++i)
Vals.push_back(DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, ElemVT, CastInOp,
DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
// Build Lo, Hi pair by pairing extracted elements if needed.
unsigned Slot = 0;
for (unsigned e = Vals.size(); e - Slot > 2; Slot += 2, e += 1) {
// Each iteration will BUILD_PAIR two nodes and append the result until
// there are only two nodes left, i.e. Lo and Hi.
SDValue LHS = Vals[Slot];
SDValue RHS = Vals[Slot + 1];
if (DAG.getDataLayout().isBigEndian())
std::swap(LHS, RHS);
Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl,
EVT::getIntegerVT(
*DAG.getContext(),
LHS.getValueType().getSizeInBits() << 1),
LHS, RHS));
}
Lo = Vals[Slot++];
Hi = Vals[Slot++];
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
return;
}
}
// Lower the bit-convert to a store/load from the stack.
assert(NOutVT.isByteSized() && "Expanded type not byte sized!");
// Create the stack frame object. Make sure it is aligned for both
// the source and expanded destination types.
unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(
NOutVT.getTypeForEVT(*DAG.getContext()));
SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
// Emit a store to the stack slot.
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, InOp, StackPtr, PtrInfo,
false, false, 0);
// Load the first half from the stack slot.
Lo = DAG.getLoad(NOutVT, dl, Store, StackPtr, PtrInfo,
false, false, false, 0);
// Increment the pointer to the other half.
unsigned IncrementSize = NOutVT.getSizeInBits() / 8;
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
DAG.getConstant(IncrementSize, dl,
StackPtr.getValueType()));
// Load the second half from the stack slot.
Hi = DAG.getLoad(NOutVT, dl, Store, StackPtr,
PtrInfo.getWithOffset(IncrementSize), false,
false, false, MinAlign(Alignment, IncrementSize));
// Handle endianness of the load.
if (TLI.hasBigEndianPartOrdering(OutVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
}
void DAGTypeLegalizer::ExpandRes_BUILD_PAIR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
// Return the operands.
Lo = N->getOperand(0);
Hi = N->getOperand(1);
}
void DAGTypeLegalizer::ExpandRes_EXTRACT_ELEMENT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
GetExpandedOp(N->getOperand(0), Lo, Hi);
SDValue Part = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() ?
Hi : Lo;
assert(Part.getValueType() == N->getValueType(0) &&
"Type twice as big as expanded type not itself expanded!");
GetPairElements(Part, Lo, Hi);
}
void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue OldVec = N->getOperand(0);
unsigned OldElts = OldVec.getValueType().getVectorNumElements();
EVT OldEltVT = OldVec.getValueType().getVectorElementType();
SDLoc dl(N);
// Convert to a vector of the expanded element type, for example
// <3 x i64> -> <6 x i32>.
EVT OldVT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
if (OldVT != OldEltVT) {
// The result of EXTRACT_VECTOR_ELT may be larger than the element type of
// the input vector. If so, extend the elements of the input vector to the
// same bitwidth as the result before expanding.
assert(OldEltVT.bitsLT(OldVT) && "Result type smaller then element type!");
EVT NVecVT = EVT::getVectorVT(*DAG.getContext(), OldVT, OldElts);
OldVec = DAG.getNode(ISD::ANY_EXTEND, dl, NVecVT, N->getOperand(0));
}
SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
EVT::getVectorVT(*DAG.getContext(),
NewVT, 2*OldElts),
OldVec);
// Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector.
SDValue Idx = N->getOperand(1);
Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, Idx);
Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx);
Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
DAG.getConstant(1, dl, Idx.getValueType()));
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
}
void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
SDValue &Hi) {
assert(ISD::isNormalLoad(N) && "This routine only for normal loads!");
SDLoc dl(N);
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT ValueVT = LD->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT);
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
unsigned Alignment = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
bool isInvariant = LD->isInvariant();
AAMDNodes AAInfo = LD->getAAInfo();
assert(NVT.isByteSized() && "Expanded type not byte sized!");
Lo = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getPointerInfo(),
isVolatile, isNonTemporal, isInvariant, Alignment,
AAInfo);
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits() / 8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
Hi = DAG.getLoad(NVT, dl, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
isVolatile, isNonTemporal, isInvariant,
MinAlign(Alignment, IncrementSize), AAInfo);
// Build a factor node to remember that this load is independent of the
// other one.
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Handle endianness of the load.
if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
// Modified the chain - switch anything that used the old chain to use
// the new one.
ReplaceValueWith(SDValue(N, 1), Chain);
}
void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT OVT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
SDValue Chain = N->getOperand(0);
SDValue Ptr = N->getOperand(1);
SDLoc dl(N);
const unsigned Align = N->getConstantOperandVal(3);
Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2), Align);
Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2), 0);
// Handle endianness of the load.
if (TLI.hasBigEndianPartOrdering(OVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
// Modified the chain - switch anything that used the old chain to use
// the new one.
ReplaceValueWith(SDValue(N, 1), Hi.getValue(1));
}
//===--------------------------------------------------------------------===//
// Generic Operand Expansion.
//===--------------------------------------------------------------------===//
void DAGTypeLegalizer::IntegerToVector(SDValue Op, unsigned NumElements,
SmallVectorImpl<SDValue> &Ops,
EVT EltVT) {
assert(Op.getValueType().isInteger());
SDLoc DL(Op);
SDValue Parts[2];
if (NumElements > 1) {
NumElements >>= 1;
SplitInteger(Op, Parts[0], Parts[1]);
if (DAG.getDataLayout().isBigEndian())
std::swap(Parts[0], Parts[1]);
IntegerToVector(Parts[0], NumElements, Ops, EltVT);
IntegerToVector(Parts[1], NumElements, Ops, EltVT);
} else {
Ops.push_back(DAG.getNode(ISD::BITCAST, DL, EltVT, Op));
}
}
SDValue DAGTypeLegalizer::ExpandOp_BITCAST(SDNode *N) {
SDLoc dl(N);
if (N->getValueType(0).isVector()) {
// An illegal expanding type is being converted to a legal vector type.
// Make a two element vector out of the expanded parts and convert that
// instead, but only if the new vector type is legal (otherwise there
// is no point, and it might create expansion loops). For example, on
// x86 this turns v1i64 = BITCAST i64 into v1i64 = BITCAST v2i32.
//
// FIXME: I'm not sure why we are first trying to split the input into
// a 2 element vector, so I'm leaving it here to maintain the current
// behavior.
unsigned NumElts = 2;
EVT OVT = N->getOperand(0).getValueType();
EVT NVT = EVT::getVectorVT(*DAG.getContext(),
TLI.getTypeToTransformTo(*DAG.getContext(), OVT),
NumElts);
if (!isTypeLegal(NVT)) {
// If we can't find a legal type by splitting the integer in half,
// then we can use the node's value type.
NumElts = N->getValueType(0).getVectorNumElements();
NVT = N->getValueType(0);
}
SmallVector<SDValue, 8> Ops;
IntegerToVector(N->getOperand(0), NumElts, Ops, NVT.getVectorElementType());
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT,
makeArrayRef(Ops.data(), NumElts));
return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), Vec);
}
// Otherwise, store to a temporary and load out again as the new type.
return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0));
}
SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
// The vector type is legal but the element type needs expansion.
EVT VecVT = N->getValueType(0);
unsigned NumElts = VecVT.getVectorNumElements();
EVT OldVT = N->getOperand(0).getValueType();
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
SDLoc dl(N);
assert(OldVT == VecVT.getVectorElementType() &&
"BUILD_VECTOR operand type doesn't match vector element type!");
// Build a vector of twice the length out of the expanded elements.
// For example <3 x i64> -> <6 x i32>.
std::vector<SDValue> NewElts;
NewElts.reserve(NumElts*2);
for (unsigned i = 0; i < NumElts; ++i) {
SDValue Lo, Hi;
GetExpandedOp(N->getOperand(i), Lo, Hi);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
NewElts.push_back(Lo);
NewElts.push_back(Hi);
}
SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
EVT::getVectorVT(*DAG.getContext(),
NewVT, NewElts.size()),
NewElts);
// Convert the new vector to the old vector type.
return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
}
SDValue DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) {
SDValue Lo, Hi;
GetExpandedOp(N->getOperand(0), Lo, Hi);
return cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() ? Hi : Lo;
}
SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
// The vector type is legal but the element type needs expansion.
EVT VecVT = N->getValueType(0);
unsigned NumElts = VecVT.getVectorNumElements();
SDLoc dl(N);
SDValue Val = N->getOperand(1);
EVT OldEVT = Val.getValueType();
EVT NewEVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldEVT);
assert(OldEVT == VecVT.getVectorElementType() &&
"Inserted element type doesn't match vector element type!");
// Bitconvert to a vector of twice the length with elements of the expanded
// type, insert the expanded vector elements, and then convert back.
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEVT, NumElts*2);
SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
NewVecVT, N->getOperand(0));
SDValue Lo, Hi;
GetExpandedOp(Val, Lo, Hi);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
SDValue Idx = N->getOperand(2);
Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, Idx);
NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Lo, Idx);
Idx = DAG.getNode(ISD::ADD, dl,
Idx.getValueType(), Idx,
DAG.getConstant(1, dl, Idx.getValueType()));
NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Hi, Idx);
// Convert the new vector to the old vector type.
return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
}
SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
assert(VT.getVectorElementType() == N->getOperand(0).getValueType() &&
"SCALAR_TO_VECTOR operand type doesn't match vector element type!");
unsigned NumElts = VT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(NumElts);
Ops[0] = N->getOperand(0);
SDValue UndefVal = DAG.getUNDEF(Ops[0].getValueType());
for (unsigned i = 1; i < NumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
assert(ISD::isNormalStore(N) && "This routine only for normal stores!");
assert(OpNo == 1 && "Can only expand the stored value so far");
SDLoc dl(N);
StoreSDNode *St = cast<StoreSDNode>(N);
EVT ValueVT = St->getValue().getValueType();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT);
SDValue Chain = St->getChain();
SDValue Ptr = St->getBasePtr();
unsigned Alignment = St->getAlignment();
bool isVolatile = St->isVolatile();
bool isNonTemporal = St->isNonTemporal();
AAMDNodes AAInfo = St->getAAInfo();
assert(NVT.isByteSized() && "Expanded type not byte sized!");
unsigned IncrementSize = NVT.getSizeInBits() / 8;
SDValue Lo, Hi;
GetExpandedOp(St->getValue(), Lo, Hi);
if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Lo = DAG.getStore(Chain, dl, Lo, Ptr, St->getPointerInfo(),
isVolatile, isNonTemporal, Alignment, AAInfo);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
Hi = DAG.getStore(Chain, dl, Hi, Ptr,
St->getPointerInfo().getWithOffset(IncrementSize),
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize), AAInfo);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
//===--------------------------------------------------------------------===//
// Generic Result Splitting.
//===--------------------------------------------------------------------===//
// Be careful to make no assumptions about which of Lo/Hi is stored first in
// memory (for vectors it is always Lo first followed by Hi in the following
// bytes; for integers and floats it is Lo first if and only if the machine is
// little-endian).
void DAGTypeLegalizer::SplitRes_MERGE_VALUES(SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi) {
SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
GetSplitOp(Op, Lo, Hi);
}
void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue LL, LH, RL, RH, CL, CH;
SDLoc dl(N);
GetSplitOp(N->getOperand(1), LL, LH);
GetSplitOp(N->getOperand(2), RL, RH);
SDValue Cond = N->getOperand(0);
CL = CH = Cond;
if (Cond.getValueType().isVector()) {
// Check if there are already splitted versions of the vector available and
// use those instead of splitting the mask operand again.
if (getTypeAction(Cond.getValueType()) == TargetLowering::TypeSplitVector)
GetSplitVector(Cond, CL, CH);
else
std::tie(CL, CH) = DAG.SplitVector(Cond, dl);
}
Lo = DAG.getNode(N->getOpcode(), dl, LL.getValueType(), CL, LL, RL);
Hi = DAG.getNode(N->getOpcode(), dl, LH.getValueType(), CH, LH, RH);
}
void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue LL, LH, RL, RH;
SDLoc dl(N);
GetSplitOp(N->getOperand(2), LL, LH);
GetSplitOp(N->getOperand(3), RL, RH);
Lo = DAG.getNode(ISD::SELECT_CC, dl, LL.getValueType(), N->getOperand(0),
N->getOperand(1), LL, RL, N->getOperand(4));
Hi = DAG.getNode(ISD::SELECT_CC, dl, LH.getValueType(), N->getOperand(0),
N->getOperand(1), LH, RH, N->getOperand(4));
}
void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getUNDEF(LoVT);
Hi = DAG.getUNDEF(HiVT);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LegalizeTypes.h | //===-- LegalizeTypes.h - DAG Type Legalizer class definition ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the DAGTypeLegalizer class. This is a private interface
// shared between the code that implements the SelectionDAG::LegalizeTypes
// method.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_LEGALIZETYPES_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_LEGALIZETYPES_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetLowering.h"
namespace llvm {
// //
///////////////////////////////////////////////////////////////////////////////
/// DAGTypeLegalizer - This takes an arbitrary SelectionDAG as input and hacks
/// on it until only value types the target machine can handle are left. This
/// involves promoting small sizes to large sizes or splitting up large values
/// into small values.
///
class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
const TargetLowering &TLI;
SelectionDAG &DAG;
public:
// NodeIdFlags - This pass uses the NodeId on the SDNodes to hold information
// about the state of the node. The enum has all the values.
enum NodeIdFlags {
/// ReadyToProcess - All operands have been processed, so this node is ready
/// to be handled.
ReadyToProcess = 0,
/// NewNode - This is a new node, not before seen, that was created in the
/// process of legalizing some other node.
NewNode = -1,
/// Unanalyzed - This node's ID needs to be set to the number of its
/// unprocessed operands.
Unanalyzed = -2,
/// Processed - This is a node that has already been processed.
Processed = -3
// 1+ - This is a node which has this many unprocessed operands.
};
private:
/// ValueTypeActions - This is a bitvector that contains two bits for each
/// simple value type, where the two bits correspond to the LegalizeAction
/// enum from TargetLowering. This can be queried with "getTypeAction(VT)".
TargetLowering::ValueTypeActionImpl ValueTypeActions;
/// getTypeAction - Return how we should legalize values of this type.
TargetLowering::LegalizeTypeAction getTypeAction(EVT VT) const {
return TLI.getTypeAction(*DAG.getContext(), VT);
}
/// isTypeLegal - Return true if this type is legal on this target.
bool isTypeLegal(EVT VT) const {
return TLI.getTypeAction(*DAG.getContext(), VT) == TargetLowering::TypeLegal;
}
EVT getSetCCResultType(EVT VT) const {
return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
/// IgnoreNodeResults - Pretend all of this node's results are legal.
bool IgnoreNodeResults(SDNode *N) const {
return N->getOpcode() == ISD::TargetConstant;
}
/// PromotedIntegers - For integer nodes that are below legal width, this map
/// indicates what promoted value to use.
SmallDenseMap<SDValue, SDValue, 8> PromotedIntegers;
/// ExpandedIntegers - For integer nodes that need to be expanded this map
/// indicates which operands are the expanded version of the input.
SmallDenseMap<SDValue, std::pair<SDValue, SDValue>, 8> ExpandedIntegers;
/// SoftenedFloats - For floating point nodes converted to integers of
/// the same size, this map indicates the converted value to use.
SmallDenseMap<SDValue, SDValue, 8> SoftenedFloats;
/// PromotedFloats - For floating point nodes that have a smaller precision
/// than the smallest supported precision, this map indicates what promoted
/// value to use.
SmallDenseMap<SDValue, SDValue, 8> PromotedFloats;
/// ExpandedFloats - For float nodes that need to be expanded this map
/// indicates which operands are the expanded version of the input.
SmallDenseMap<SDValue, std::pair<SDValue, SDValue>, 8> ExpandedFloats;
/// ScalarizedVectors - For nodes that are <1 x ty>, this map indicates the
/// scalar value of type 'ty' to use.
SmallDenseMap<SDValue, SDValue, 8> ScalarizedVectors;
/// SplitVectors - For nodes that need to be split this map indicates
/// which operands are the expanded version of the input.
SmallDenseMap<SDValue, std::pair<SDValue, SDValue>, 8> SplitVectors;
/// WidenedVectors - For vector nodes that need to be widened, indicates
/// the widened value to use.
SmallDenseMap<SDValue, SDValue, 8> WidenedVectors;
/// ReplacedValues - For values that have been replaced with another,
/// indicates the replacement value to use.
SmallDenseMap<SDValue, SDValue, 8> ReplacedValues;
/// Worklist - This defines a worklist of nodes to process. In order to be
/// pushed onto this worklist, all operands of a node must have already been
/// processed.
SmallVector<SDNode*, 128> Worklist;
public:
explicit DAGTypeLegalizer(SelectionDAG &dag)
: TLI(dag.getTargetLoweringInfo()), DAG(dag),
ValueTypeActions(TLI.getValueTypeActions()) {
static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
"Too many value types for ValueTypeActions to hold!");
}
/// run - This is the main entry point for the type legalizer. This does a
/// top-down traversal of the dag, legalizing types as it goes. Returns
/// "true" if it made any changes.
bool run();
void NoteDeletion(SDNode *Old, SDNode *New) {
ExpungeNode(Old);
ExpungeNode(New);
for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i)
ReplacedValues[SDValue(Old, i)] = SDValue(New, i);
}
SelectionDAG &getDAG() const { return DAG; }
private:
SDNode *AnalyzeNewNode(SDNode *N);
void AnalyzeNewValue(SDValue &Val);
void ExpungeNode(SDNode *N);
void PerformExpensiveChecks();
void RemapValue(SDValue &N);
// Common routines.
SDValue BitConvertToInteger(SDValue Op);
SDValue BitConvertVectorToIntegerVector(SDValue Op);
SDValue CreateStackStoreLoad(SDValue Op, EVT DestVT);
bool CustomLowerNode(SDNode *N, EVT VT, bool LegalizeResult);
bool CustomWidenLowerNode(SDNode *N, EVT VT);
/// DisintegrateMERGE_VALUES - Replace each result of the given MERGE_VALUES
/// node with the corresponding input operand, except for the result 'ResNo',
/// for which the corresponding input operand is returned.
SDValue DisintegrateMERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue GetVectorElementPointer(SDValue VecPtr, EVT EltVT, SDValue Index);
SDValue JoinIntegers(SDValue Lo, SDValue Hi);
SDValue LibCallify(RTLIB::Libcall LC, SDNode *N, bool isSigned);
std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node, bool isSigned);
std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
SDValue PromoteTargetBoolean(SDValue Bool, EVT ValVT);
void ReplaceValueWith(SDValue From, SDValue To);
void SplitInteger(SDValue Op, SDValue &Lo, SDValue &Hi);
void SplitInteger(SDValue Op, EVT LoVT, EVT HiVT,
SDValue &Lo, SDValue &Hi);
//===--------------------------------------------------------------------===//
// Integer Promotion Support: LegalizeIntegerTypes.cpp
//===--------------------------------------------------------------------===//
/// GetPromotedInteger - Given a processed operand Op which was promoted to a
/// larger integer type, this returns the promoted value. The low bits of the
/// promoted value corresponding to the original type are exactly equal to Op.
/// The extra bits contain rubbish, so the promoted value may need to be zero-
/// or sign-extended from the original type before it is usable (the helpers
/// SExtPromotedInteger and ZExtPromotedInteger can do this for you).
/// For example, if Op is an i16 and was promoted to an i32, then this method
/// returns an i32, the lower 16 bits of which coincide with Op, and the upper
/// 16 bits of which contain rubbish.
SDValue GetPromotedInteger(SDValue Op) {
SDValue &PromotedOp = PromotedIntegers[Op];
RemapValue(PromotedOp);
assert(PromotedOp.getNode() && "Operand wasn't promoted?");
return PromotedOp;
}
void SetPromotedInteger(SDValue Op, SDValue Result);
/// SExtPromotedInteger - Get a promoted operand and sign extend it to the
/// final size.
SDValue SExtPromotedInteger(SDValue Op) {
EVT OldVT = Op.getValueType();
SDLoc dl(Op);
Op = GetPromotedInteger(Op);
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Op.getValueType(), Op,
DAG.getValueType(OldVT));
}
/// ZExtPromotedInteger - Get a promoted operand and zero extend it to the
/// final size.
SDValue ZExtPromotedInteger(SDValue Op) {
EVT OldVT = Op.getValueType();
SDLoc dl(Op);
Op = GetPromotedInteger(Op);
return DAG.getZeroExtendInReg(Op, dl, OldVT.getScalarType());
}
// Integer Result Promotion.
void PromoteIntegerResult(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_AssertSext(SDNode *N);
SDValue PromoteIntRes_AssertZext(SDNode *N);
SDValue PromoteIntRes_Atomic0(AtomicSDNode *N);
SDValue PromoteIntRes_Atomic1(AtomicSDNode *N);
SDValue PromoteIntRes_AtomicCmpSwap(AtomicSDNode *N, unsigned ResNo);
SDValue PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N);
SDValue PromoteIntRes_VECTOR_SHUFFLE(SDNode *N);
SDValue PromoteIntRes_BUILD_VECTOR(SDNode *N);
SDValue PromoteIntRes_SCALAR_TO_VECTOR(SDNode *N);
SDValue PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N);
SDValue PromoteIntRes_CONCAT_VECTORS(SDNode *N);
SDValue PromoteIntRes_BITCAST(SDNode *N);
SDValue PromoteIntRes_BSWAP(SDNode *N);
SDValue PromoteIntRes_BUILD_PAIR(SDNode *N);
SDValue PromoteIntRes_Constant(SDNode *N);
SDValue PromoteIntRes_CONVERT_RNDSAT(SDNode *N);
SDValue PromoteIntRes_CTLZ(SDNode *N);
SDValue PromoteIntRes_CTPOP(SDNode *N);
SDValue PromoteIntRes_CTTZ(SDNode *N);
SDValue PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue PromoteIntRes_FP_TO_XINT(SDNode *N);
SDValue PromoteIntRes_FP_TO_FP16(SDNode *N);
SDValue PromoteIntRes_INT_EXTEND(SDNode *N);
SDValue PromoteIntRes_LOAD(LoadSDNode *N);
SDValue PromoteIntRes_MLOAD(MaskedLoadSDNode *N);
SDValue PromoteIntRes_Overflow(SDNode *N);
SDValue PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_SDIV(SDNode *N);
SDValue PromoteIntRes_SELECT(SDNode *N);
SDValue PromoteIntRes_VSELECT(SDNode *N);
SDValue PromoteIntRes_SELECT_CC(SDNode *N);
SDValue PromoteIntRes_SETCC(SDNode *N);
SDValue PromoteIntRes_SHL(SDNode *N);
SDValue PromoteIntRes_SimpleIntBinOp(SDNode *N);
SDValue PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N);
SDValue PromoteIntRes_SRA(SDNode *N);
SDValue PromoteIntRes_SRL(SDNode *N);
SDValue PromoteIntRes_TRUNCATE(SDNode *N);
SDValue PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_UDIV(SDNode *N);
SDValue PromoteIntRes_UNDEF(SDNode *N);
SDValue PromoteIntRes_VAARG(SDNode *N);
SDValue PromoteIntRes_XMULO(SDNode *N, unsigned ResNo);
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OperandNo);
SDValue PromoteIntOp_ANY_EXTEND(SDNode *N);
SDValue PromoteIntOp_ATOMIC_STORE(AtomicSDNode *N);
SDValue PromoteIntOp_BITCAST(SDNode *N);
SDValue PromoteIntOp_BUILD_PAIR(SDNode *N);
SDValue PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_BUILD_VECTOR(SDNode *N);
SDValue PromoteIntOp_CONVERT_RNDSAT(SDNode *N);
SDValue PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_EXTRACT_ELEMENT(SDNode *N);
SDValue PromoteIntOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue PromoteIntOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue PromoteIntOp_CONCAT_VECTORS(SDNode *N);
SDValue PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N);
SDValue PromoteIntOp_SELECT(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_SETCC(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_VSETCC(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_Shift(SDNode *N);
SDValue PromoteIntOp_SIGN_EXTEND(SDNode *N);
SDValue PromoteIntOp_SINT_TO_FP(SDNode *N);
SDValue PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo);
SDValue PromoteIntOp_TRUNCATE(SDNode *N);
SDValue PromoteIntOp_UINT_TO_FP(SDNode *N);
SDValue PromoteIntOp_ZERO_EXTEND(SDNode *N);
SDValue PromoteIntOp_MSTORE(MaskedStoreSDNode *N, unsigned OpNo);
SDValue PromoteIntOp_MLOAD(MaskedLoadSDNode *N, unsigned OpNo);
void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
//===--------------------------------------------------------------------===//
// Integer Expansion Support: LegalizeIntegerTypes.cpp
//===--------------------------------------------------------------------===//
/// GetExpandedInteger - Given a processed operand Op which was expanded into
/// two integers of half the size, this returns the two halves. The low bits
/// of Op are exactly equal to the bits of Lo; the high bits exactly equal Hi.
/// For example, if Op is an i64 which was expanded into two i32's, then this
/// method returns the two i32's, with Lo being equal to the lower 32 bits of
/// Op, and Hi being equal to the upper 32 bits.
void GetExpandedInteger(SDValue Op, SDValue &Lo, SDValue &Hi);
void SetExpandedInteger(SDValue Op, SDValue Lo, SDValue Hi);
// Integer Result Expansion.
void ExpandIntegerResult(SDNode *N, unsigned ResNo);
void ExpandIntRes_MERGE_VALUES (SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ANY_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_AssertSext (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_AssertZext (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_Constant (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTLZ (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTPOP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTTZ (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_LOAD (LoadSDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SIGN_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SIGN_EXTEND_INREG (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_TRUNCATE (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ZERO_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_FP_TO_SINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_FP_TO_UINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_Logical (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ADDSUB (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ADDSUBC (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ADDSUBE (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_BSWAP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_MUL (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_Shift (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_XMULO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ATOMIC_LOAD (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandShiftByConstant(SDNode *N, const APInt &Amt,
SDValue &Lo, SDValue &Hi);
bool ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
bool ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
// Integer Operand Expansion.
bool ExpandIntegerOperand(SDNode *N, unsigned OperandNo);
SDValue ExpandIntOp_BITCAST(SDNode *N);
SDValue ExpandIntOp_BR_CC(SDNode *N);
SDValue ExpandIntOp_BUILD_VECTOR(SDNode *N);
SDValue ExpandIntOp_EXTRACT_ELEMENT(SDNode *N);
SDValue ExpandIntOp_SELECT_CC(SDNode *N);
SDValue ExpandIntOp_SETCC(SDNode *N);
SDValue ExpandIntOp_Shift(SDNode *N);
SDValue ExpandIntOp_SINT_TO_FP(SDNode *N);
SDValue ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo);
SDValue ExpandIntOp_TRUNCATE(SDNode *N);
SDValue ExpandIntOp_UINT_TO_FP(SDNode *N);
SDValue ExpandIntOp_RETURNADDR(SDNode *N);
SDValue ExpandIntOp_ATOMIC_STORE(SDNode *N);
void IntegerExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, SDLoc dl);
//===--------------------------------------------------------------------===//
// Float to Integer Conversion Support: LegalizeFloatTypes.cpp
//===--------------------------------------------------------------------===//
/// GetSoftenedFloat - Given a processed operand Op which was converted to an
/// integer of the same size, this returns the integer. The integer contains
/// exactly the same bits as Op - only the type changed. For example, if Op
/// is an f32 which was softened to an i32, then this method returns an i32,
/// the bits of which coincide with those of Op.
SDValue GetSoftenedFloat(SDValue Op) {
SDValue &SoftenedOp = SoftenedFloats[Op];
RemapValue(SoftenedOp);
assert(SoftenedOp.getNode() && "Operand wasn't converted to integer?");
return SoftenedOp;
}
void SetSoftenedFloat(SDValue Op, SDValue Result);
// Result Float to Integer Conversion.
void SoftenFloatResult(SDNode *N, unsigned OpNo);
SDValue SoftenFloatRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_BITCAST(SDNode *N);
SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N);
SDValue SoftenFloatRes_ConstantFP(ConstantFPSDNode *N);
SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue SoftenFloatRes_FABS(SDNode *N);
SDValue SoftenFloatRes_FMINNUM(SDNode *N);
SDValue SoftenFloatRes_FMAXNUM(SDNode *N);
SDValue SoftenFloatRes_FADD(SDNode *N);
SDValue SoftenFloatRes_FCEIL(SDNode *N);
SDValue SoftenFloatRes_FCOPYSIGN(SDNode *N);
SDValue SoftenFloatRes_FCOS(SDNode *N);
SDValue SoftenFloatRes_FDIV(SDNode *N);
SDValue SoftenFloatRes_FEXP(SDNode *N);
SDValue SoftenFloatRes_FEXP2(SDNode *N);
SDValue SoftenFloatRes_FFLOOR(SDNode *N);
SDValue SoftenFloatRes_FLOG(SDNode *N);
SDValue SoftenFloatRes_FLOG2(SDNode *N);
SDValue SoftenFloatRes_FLOG10(SDNode *N);
SDValue SoftenFloatRes_FMA(SDNode *N);
SDValue SoftenFloatRes_FMUL(SDNode *N);
SDValue SoftenFloatRes_FNEARBYINT(SDNode *N);
SDValue SoftenFloatRes_FNEG(SDNode *N);
SDValue SoftenFloatRes_FP_EXTEND(SDNode *N);
SDValue SoftenFloatRes_FP16_TO_FP(SDNode *N);
SDValue SoftenFloatRes_FP_ROUND(SDNode *N);
SDValue SoftenFloatRes_FPOW(SDNode *N);
SDValue SoftenFloatRes_FPOWI(SDNode *N);
SDValue SoftenFloatRes_FREM(SDNode *N);
SDValue SoftenFloatRes_FRINT(SDNode *N);
SDValue SoftenFloatRes_FROUND(SDNode *N);
SDValue SoftenFloatRes_FSIN(SDNode *N);
SDValue SoftenFloatRes_FSQRT(SDNode *N);
SDValue SoftenFloatRes_FSUB(SDNode *N);
SDValue SoftenFloatRes_FTRUNC(SDNode *N);
SDValue SoftenFloatRes_LOAD(SDNode *N);
SDValue SoftenFloatRes_SELECT(SDNode *N);
SDValue SoftenFloatRes_SELECT_CC(SDNode *N);
SDValue SoftenFloatRes_UNDEF(SDNode *N);
SDValue SoftenFloatRes_VAARG(SDNode *N);
SDValue SoftenFloatRes_XINT_TO_FP(SDNode *N);
// Operand Float to Integer Conversion.
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
SDValue SoftenFloatOp_BITCAST(SDNode *N);
SDValue SoftenFloatOp_BR_CC(SDNode *N);
SDValue SoftenFloatOp_FP_EXTEND(SDNode *N);
SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N);
SDValue SoftenFloatOp_FP_TO_UINT(SDNode *N);
SDValue SoftenFloatOp_SELECT_CC(SDNode *N);
SDValue SoftenFloatOp_SETCC(SDNode *N);
SDValue SoftenFloatOp_STORE(SDNode *N, unsigned OpNo);
//===--------------------------------------------------------------------===//
// Float Expansion Support: LegalizeFloatTypes.cpp
//===--------------------------------------------------------------------===//
/// GetExpandedFloat - Given a processed operand Op which was expanded into
/// two floating point values of half the size, this returns the two halves.
/// The low bits of Op are exactly equal to the bits of Lo; the high bits
/// exactly equal Hi. For example, if Op is a ppcf128 which was expanded
/// into two f64's, then this method returns the two f64's, with Lo being
/// equal to the lower 64 bits of Op, and Hi to the upper 64 bits.
void GetExpandedFloat(SDValue Op, SDValue &Lo, SDValue &Hi);
void SetExpandedFloat(SDValue Op, SDValue Lo, SDValue Hi);
// Float Result Expansion.
void ExpandFloatResult(SDNode *N, unsigned ResNo);
void ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FABS (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FMINNUM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FMAXNUM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FADD (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FCEIL (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FCOPYSIGN (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FCOS (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FEXP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FEXP2 (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FFLOOR (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FLOG (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FLOG2 (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FLOG10 (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FMA (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FMUL (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FNEARBYINT(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FNEG (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FP_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FPOW (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FPOWI (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FRINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FROUND (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FSIN (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FSQRT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FSUB (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FTRUNC (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_LOAD (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo, SDValue &Hi);
// Float Operand Expansion.
bool ExpandFloatOperand(SDNode *N, unsigned OperandNo);
SDValue ExpandFloatOp_BR_CC(SDNode *N);
SDValue ExpandFloatOp_FCOPYSIGN(SDNode *N);
SDValue ExpandFloatOp_FP_ROUND(SDNode *N);
SDValue ExpandFloatOp_FP_TO_SINT(SDNode *N);
SDValue ExpandFloatOp_FP_TO_UINT(SDNode *N);
SDValue ExpandFloatOp_SELECT_CC(SDNode *N);
SDValue ExpandFloatOp_SETCC(SDNode *N);
SDValue ExpandFloatOp_STORE(SDNode *N, unsigned OpNo);
void FloatExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, SDLoc dl);
//===--------------------------------------------------------------------===//
// Float promotion support: LegalizeFloatTypes.cpp
//===--------------------------------------------------------------------===//
SDValue GetPromotedFloat(SDValue Op) {
SDValue &PromotedOp = PromotedFloats[Op];
RemapValue(PromotedOp);
assert(PromotedOp.getNode() && "Operand wasn't promoted?");
return PromotedOp;
}
void SetPromotedFloat(SDValue Op, SDValue Result);
void PromoteFloatResult(SDNode *N, unsigned ResNo);
SDValue PromoteFloatRes_BITCAST(SDNode *N);
SDValue PromoteFloatRes_BinOp(SDNode *N);
SDValue PromoteFloatRes_ConstantFP(SDNode *N);
SDValue PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue PromoteFloatRes_FCOPYSIGN(SDNode *N);
SDValue PromoteFloatRes_FMAD(SDNode *N);
SDValue PromoteFloatRes_FPOWI(SDNode *N);
SDValue PromoteFloatRes_FP_ROUND(SDNode *N);
SDValue PromoteFloatRes_LOAD(SDNode *N);
SDValue PromoteFloatRes_SELECT(SDNode *N);
SDValue PromoteFloatRes_SELECT_CC(SDNode *N);
SDValue PromoteFloatRes_UnaryOp(SDNode *N);
SDValue PromoteFloatRes_UNDEF(SDNode *N);
SDValue PromoteFloatRes_XINT_TO_FP(SDNode *N);
bool PromoteFloatOperand(SDNode *N, unsigned ResNo);
SDValue PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FCOPYSIGN(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FP_EXTEND(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_STORE(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_SELECT_CC(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_SETCC(SDNode *N, unsigned OpNo);
//===--------------------------------------------------------------------===//
// Scalarization Support: LegalizeVectorTypes.cpp
//===--------------------------------------------------------------------===//
/// GetScalarizedVector - Given a processed one-element vector Op which was
/// scalarized to its element type, this returns the element. For example,
/// if Op is a v1i32, Op = < i32 val >, this method returns val, an i32.
SDValue GetScalarizedVector(SDValue Op) {
SDValue &ScalarizedOp = ScalarizedVectors[Op];
RemapValue(ScalarizedOp);
assert(ScalarizedOp.getNode() && "Operand wasn't scalarized?");
return ScalarizedOp;
}
void SetScalarizedVector(SDValue Op, SDValue Result);
// Vector Result Scalarization: <1 x ty> -> ty.
void ScalarizeVectorResult(SDNode *N, unsigned OpNo);
SDValue ScalarizeVecRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue ScalarizeVecRes_BinOp(SDNode *N);
SDValue ScalarizeVecRes_TernaryOp(SDNode *N);
SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
SDValue ScalarizeVecRes_InregOp(SDNode *N);
SDValue ScalarizeVecRes_BITCAST(SDNode *N);
SDValue ScalarizeVecRes_BUILD_VECTOR(SDNode *N);
SDValue ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N);
SDValue ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N);
SDValue ScalarizeVecRes_FP_ROUND(SDNode *N);
SDValue ScalarizeVecRes_FPOWI(SDNode *N);
SDValue ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N);
SDValue ScalarizeVecRes_LOAD(LoadSDNode *N);
SDValue ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N);
SDValue ScalarizeVecRes_SIGN_EXTEND_INREG(SDNode *N);
SDValue ScalarizeVecRes_VSELECT(SDNode *N);
SDValue ScalarizeVecRes_SELECT(SDNode *N);
SDValue ScalarizeVecRes_SELECT_CC(SDNode *N);
SDValue ScalarizeVecRes_SETCC(SDNode *N);
SDValue ScalarizeVecRes_UNDEF(SDNode *N);
SDValue ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N);
SDValue ScalarizeVecRes_VSETCC(SDNode *N);
// Vector Operand Scalarization: <1 x ty> -> ty.
bool ScalarizeVectorOperand(SDNode *N, unsigned OpNo);
SDValue ScalarizeVecOp_BITCAST(SDNode *N);
SDValue ScalarizeVecOp_UnaryOp(SDNode *N);
SDValue ScalarizeVecOp_CONCAT_VECTORS(SDNode *N);
SDValue ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue ScalarizeVecOp_VSELECT(SDNode *N);
SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo);
SDValue ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo);
//===--------------------------------------------------------------------===//
// Vector Splitting Support: LegalizeVectorTypes.cpp
//===--------------------------------------------------------------------===//
/// GetSplitVector - Given a processed vector Op which was split into vectors
/// of half the size, this method returns the halves. The first elements of
/// Op coincide with the elements of Lo; the remaining elements of Op coincide
/// with the elements of Hi: Op is what you would get by concatenating Lo and
/// Hi. For example, if Op is a v8i32 that was split into two v4i32's, then
/// this method returns the two v4i32's, with Lo corresponding to the first 4
/// elements of Op, and Hi to the last 4 elements.
void GetSplitVector(SDValue Op, SDValue &Lo, SDValue &Hi);
void SetSplitVector(SDValue Op, SDValue Lo, SDValue Hi);
// Vector Result Splitting: <128 x ty> -> 2 x <64 x ty>.
void SplitVectorResult(SDNode *N, unsigned OpNo);
void SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_InregOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BUILD_PAIR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_FPOWI(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_LOAD(LoadSDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_MLOAD(MaskedLoadSDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_MGATHER(MaskedGatherSDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N, SDValue &Lo,
SDValue &Hi);
// Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>.
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_VSELECT(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_UnaryOp(SDNode *N);
SDValue SplitVecOp_TruncateHelper(SDNode *N);
SDValue SplitVecOp_BITCAST(SDNode *N);
SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
SDValue SplitVecOp_MSTORE(MaskedStoreSDNode *N, unsigned OpNo);
SDValue SplitVecOp_MSCATTER(MaskedScatterSDNode *N, unsigned OpNo);
SDValue SplitVecOp_MGATHER(MaskedGatherSDNode *N, unsigned OpNo);
SDValue SplitVecOp_CONCAT_VECTORS(SDNode *N);
SDValue SplitVecOp_VSETCC(SDNode *N);
SDValue SplitVecOp_FP_ROUND(SDNode *N);
//===--------------------------------------------------------------------===//
// Vector Widening Support: LegalizeVectorTypes.cpp
//===--------------------------------------------------------------------===//
/// GetWidenedVector - Given a processed vector Op which was widened into a
/// larger vector, this method returns the larger vector. The elements of
/// the returned vector consist of the elements of Op followed by elements
/// containing rubbish. For example, if Op is a v2i32 that was widened to a
/// v4i32, then this method returns a v4i32 for which the first two elements
/// are the same as those of Op, while the last two elements contain rubbish.
SDValue GetWidenedVector(SDValue Op) {
SDValue &WidenedOp = WidenedVectors[Op];
RemapValue(WidenedOp);
assert(WidenedOp.getNode() && "Operand wasn't widened?");
return WidenedOp;
}
void SetWidenedVector(SDValue Op, SDValue Result);
// Widen Vector Result Promotion.
void WidenVectorResult(SDNode *N, unsigned ResNo);
SDValue WidenVecRes_MERGE_VALUES(SDNode* N, unsigned ResNo);
SDValue WidenVecRes_BITCAST(SDNode* N);
SDValue WidenVecRes_BUILD_VECTOR(SDNode* N);
SDValue WidenVecRes_CONCAT_VECTORS(SDNode* N);
SDValue WidenVecRes_CONVERT_RNDSAT(SDNode* N);
SDValue WidenVecRes_EXTRACT_SUBVECTOR(SDNode* N);
SDValue WidenVecRes_INSERT_VECTOR_ELT(SDNode* N);
SDValue WidenVecRes_LOAD(SDNode* N);
SDValue WidenVecRes_MLOAD(MaskedLoadSDNode* N);
SDValue WidenVecRes_SCALAR_TO_VECTOR(SDNode* N);
SDValue WidenVecRes_SIGN_EXTEND_INREG(SDNode* N);
SDValue WidenVecRes_SELECT(SDNode* N);
SDValue WidenVecRes_SELECT_CC(SDNode* N);
SDValue WidenVecRes_SETCC(SDNode* N);
SDValue WidenVecRes_UNDEF(SDNode *N);
SDValue WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N);
SDValue WidenVecRes_VSETCC(SDNode* N);
SDValue WidenVecRes_Ternary(SDNode *N);
SDValue WidenVecRes_Binary(SDNode *N);
SDValue WidenVecRes_BinaryCanTrap(SDNode *N);
SDValue WidenVecRes_Convert(SDNode *N);
SDValue WidenVecRes_POWI(SDNode *N);
SDValue WidenVecRes_Shift(SDNode *N);
SDValue WidenVecRes_Unary(SDNode *N);
SDValue WidenVecRes_InregOp(SDNode *N);
// Widen Vector Operand.
bool WidenVectorOperand(SDNode *N, unsigned OpNo);
SDValue WidenVecOp_BITCAST(SDNode *N);
SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N);
SDValue WidenVecOp_EXTEND(SDNode *N);
SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue WidenVecOp_STORE(SDNode* N);
SDValue WidenVecOp_MSTORE(SDNode* N, unsigned OpNo);
SDValue WidenVecOp_SETCC(SDNode* N);
SDValue WidenVecOp_Convert(SDNode *N);
//===--------------------------------------------------------------------===//
// Vector Widening Utilities Support: LegalizeVectorTypes.cpp
//===--------------------------------------------------------------------===//
/// Helper GenWidenVectorLoads - Helper function to generate a set of
/// loads to load a vector with a resulting wider type. It takes
/// LdChain: list of chains for the load to be generated.
/// Ld: load to widen
SDValue GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD);
/// GenWidenVectorExtLoads - Helper function to generate a set of extension
/// loads to load a ector with a resulting wider type. It takes
/// LdChain: list of chains for the load to be generated.
/// Ld: load to widen
/// ExtType: extension element type
SDValue GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD, ISD::LoadExtType ExtType);
/// Helper genWidenVectorStores - Helper function to generate a set of
/// stores to store a widen vector into non-widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
void GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, StoreSDNode *ST);
/// Helper genWidenVectorTruncStores - Helper function to generate a set of
/// stores to store a truncate widen vector into non-widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
void GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST);
/// Modifies a vector input (widen or narrows) to a vector of NVT. The
/// input vector must have the same element type as NVT.
SDValue ModifyToType(SDValue InOp, EVT WidenVT);
//===--------------------------------------------------------------------===//
// Generic Splitting: LegalizeTypesGeneric.cpp
//===--------------------------------------------------------------------===//
// Legalization methods which only use that the illegal type is split into two
// not necessarily identical types. As such they can be used for splitting
// vectors and expanding integers and floats.
void GetSplitOp(SDValue Op, SDValue &Lo, SDValue &Hi) {
if (Op.getValueType().isVector())
GetSplitVector(Op, Lo, Hi);
else if (Op.getValueType().isInteger())
GetExpandedInteger(Op, Lo, Hi);
else
GetExpandedFloat(Op, Lo, Hi);
}
/// GetPairElements - Use ISD::EXTRACT_ELEMENT nodes to extract the low and
/// high parts of the given value.
void GetPairElements(SDValue Pair, SDValue &Lo, SDValue &Hi);
// Generic Result Splitting.
void SplitRes_MERGE_VALUES(SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi);
void SplitRes_SELECT (SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_SELECT_CC (SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_UNDEF (SDNode *N, SDValue &Lo, SDValue &Hi);
//===--------------------------------------------------------------------===//
// Generic Expansion: LegalizeTypesGeneric.cpp
//===--------------------------------------------------------------------===//
// Legalization methods which only use that the illegal type is split into two
// identical types of half the size, and that the Lo/Hi part is stored first
// in memory on little/big-endian machines, followed by the Hi/Lo part. As
// such they can be used for expanding integers and floats.
void GetExpandedOp(SDValue Op, SDValue &Lo, SDValue &Hi) {
if (Op.getValueType().isInteger())
GetExpandedInteger(Op, Lo, Hi);
else
GetExpandedFloat(Op, Lo, Hi);
}
/// This function will split the integer \p Op into \p NumElements
/// operations of type \p EltVT and store them in \p Ops.
void IntegerToVector(SDValue Op, unsigned NumElements,
SmallVectorImpl<SDValue> &Ops, EVT EltVT);
// Generic Result Expansion.
void ExpandRes_MERGE_VALUES (SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi);
void ExpandRes_BITCAST (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_BUILD_PAIR (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_EXTRACT_ELEMENT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_NormalLoad (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_VAARG (SDNode *N, SDValue &Lo, SDValue &Hi);
// Generic Operand Expansion.
SDValue ExpandOp_BITCAST (SDNode *N);
SDValue ExpandOp_BUILD_VECTOR (SDNode *N);
SDValue ExpandOp_EXTRACT_ELEMENT (SDNode *N);
SDValue ExpandOp_INSERT_VECTOR_ELT(SDNode *N);
SDValue ExpandOp_SCALAR_TO_VECTOR (SDNode *N);
SDValue ExpandOp_NormalStore (SDNode *N, unsigned OpNo);
};
} // end namespace llvm.
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the SelectionDAG class.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAG.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cmath>
#include <utility>
using namespace llvm;
/// makeVTList - Return an instance of the SDVTList struct initialized with the
/// specified members.
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
SDVTList Res = {VTs, NumVTs};
return Res;
}
// Default null implementations of the callbacks.
void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
//===----------------------------------------------------------------------===//
// ConstantFPSDNode Class
//===----------------------------------------------------------------------===//
/// isExactlyValue - We don't rely on operator== working on double values, as
/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
/// As such, this method can be used to do an exact bit-for-bit comparison of
/// two floating point values.
bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
return getValueAPF().bitwiseIsEqual(V);
}
bool ConstantFPSDNode::isValueValidForType(EVT VT,
const APFloat& Val) {
assert(VT.isFloatingPoint() && "Can only convert between FP types");
// convert modifies in place, so make a copy.
APFloat Val2 = APFloat(Val);
bool losesInfo;
(void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
APFloat::rmNearestTiesToEven,
&losesInfo);
return !losesInfo;
}
//===----------------------------------------------------------------------===//
// ISD Namespace
//===----------------------------------------------------------------------===//
/// isBuildVectorAllOnes - Return true if the specified node is a
/// BUILD_VECTOR where all of the elements are ~0 or undef.
bool ISD::isBuildVectorAllOnes(const SDNode *N) {
// Look through a bit convert.
while (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode();
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
unsigned i = 0, e = N->getNumOperands();
// Skip over all of the undef values.
while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
++i;
// Do not accept an all-undef vector.
if (i == e) return false;
// Do not accept build_vectors that aren't all constants or which have non-~0
// elements. We have to be a bit careful here, as the type of the constant
// may not be the same as the type of the vector elements due to type
// legalization (the elements are promoted to a legal type for the target and
// a vector of a type may be legal when the base element type is not).
// We only want to check enough bits to cover the vector elements, because
// we care if the resultant vector is all ones, not whether the individual
// constants are.
SDValue NotZero = N->getOperand(i);
unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
if (CN->getAPIntValue().countTrailingOnes() < EltSize)
return false;
} else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
return false;
} else
return false;
// Okay, we have at least one ~0 value, check to see if the rest match or are
// undefs. Even with the above element type twiddling, this should be OK, as
// the same type legalization should have applied to all the elements.
for (++i; i != e; ++i)
if (N->getOperand(i) != NotZero &&
N->getOperand(i).getOpcode() != ISD::UNDEF)
return false;
return true;
}
/// isBuildVectorAllZeros - Return true if the specified node is a
/// BUILD_VECTOR where all of the elements are 0 or undef.
bool ISD::isBuildVectorAllZeros(const SDNode *N) {
// Look through a bit convert.
while (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode();
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
bool IsAllUndef = true;
for (const SDValue &Op : N->op_values()) {
if (Op.getOpcode() == ISD::UNDEF)
continue;
IsAllUndef = false;
// Do not accept build_vectors that aren't all constants or which have non-0
// elements. We have to be a bit careful here, as the type of the constant
// may not be the same as the type of the vector elements due to type
// legalization (the elements are promoted to a legal type for the target
// and a vector of a type may be legal when the base element type is not).
// We only want to check enough bits to cover the vector elements, because
// we care if the resultant vector is all zeros, not whether the individual
// constants are.
unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
if (CN->getAPIntValue().countTrailingZeros() < EltSize)
return false;
} else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
return false;
} else
return false;
}
// Do not accept an all-undef vector.
if (IsAllUndef)
return false;
return true;
}
/// \brief Return true if the specified node is a BUILD_VECTOR node of
/// all ConstantSDNode or undef.
bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
if (N->getOpcode() != ISD::BUILD_VECTOR)
return false;
for (const SDValue &Op : N->op_values()) {
if (Op.getOpcode() == ISD::UNDEF)
continue;
if (!isa<ConstantSDNode>(Op))
return false;
}
return true;
}
/// \brief Return true if the specified node is a BUILD_VECTOR node of
/// all ConstantFPSDNode or undef.
bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
if (N->getOpcode() != ISD::BUILD_VECTOR)
return false;
for (const SDValue &Op : N->op_values()) {
if (Op.getOpcode() == ISD::UNDEF)
continue;
if (!isa<ConstantFPSDNode>(Op))
return false;
}
return true;
}
/// isScalarToVector - Return true if the specified node is a
/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
/// element is not an undef.
bool ISD::isScalarToVector(const SDNode *N) {
if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
return true;
if (N->getOpcode() != ISD::BUILD_VECTOR)
return false;
if (N->getOperand(0).getOpcode() == ISD::UNDEF)
return false;
unsigned NumElems = N->getNumOperands();
if (NumElems == 1)
return false;
for (unsigned i = 1; i < NumElems; ++i) {
SDValue V = N->getOperand(i);
if (V.getOpcode() != ISD::UNDEF)
return false;
}
return true;
}
/// allOperandsUndef - Return true if the node has at least one operand
/// and all operands of the specified node are ISD::UNDEF.
bool ISD::allOperandsUndef(const SDNode *N) {
// Return false if the node has no operands.
// This is "logically inconsistent" with the definition of "all" but
// is probably the desired behavior.
if (N->getNumOperands() == 0)
return false;
for (const SDValue &Op : N->op_values())
if (Op.getOpcode() != ISD::UNDEF)
return false;
return true;
}
ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
switch (ExtType) {
case ISD::EXTLOAD:
return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
case ISD::SEXTLOAD:
return ISD::SIGN_EXTEND;
case ISD::ZEXTLOAD:
return ISD::ZERO_EXTEND;
default:
break;
}
llvm_unreachable("Invalid LoadExtType");
}
/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
/// when given the operation for (X op Y).
ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
// To perform this operation, we just need to swap the L and G bits of the
// operation.
unsigned OldL = (Operation >> 2) & 1;
unsigned OldG = (Operation >> 1) & 1;
return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
(OldL << 1) | // New G bit
(OldG << 2)); // New L bit.
}
/// getSetCCInverse - Return the operation corresponding to !(X op Y), where
/// 'op' is a valid SetCC operation.
ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
unsigned Operation = Op;
if (isInteger)
Operation ^= 7; // Flip L, G, E bits, but not U.
else
Operation ^= 15; // Flip all of the condition bits.
if (Operation > ISD::SETTRUE2)
Operation &= ~8; // Don't let N and U bits get set.
return ISD::CondCode(Operation);
}
/// isSignedOp - For an integer comparison, return 1 if the comparison is a
/// signed operation and 2 if the result is an unsigned comparison. Return zero
/// if the operation does not depend on the sign of the input (setne and seteq).
static int isSignedOp(ISD::CondCode Opcode) {
switch (Opcode) {
default: llvm_unreachable("Illegal integer setcc operation!");
case ISD::SETEQ:
case ISD::SETNE: return 0;
case ISD::SETLT:
case ISD::SETLE:
case ISD::SETGT:
case ISD::SETGE: return 1;
case ISD::SETULT:
case ISD::SETULE:
case ISD::SETUGT:
case ISD::SETUGE: return 2;
}
}
/// getSetCCOrOperation - Return the result of a logical OR between different
/// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
/// returns SETCC_INVALID if it is not possible to represent the resultant
/// comparison.
ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
bool isInteger) {
if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
// Cannot fold a signed integer setcc with an unsigned integer setcc.
return ISD::SETCC_INVALID;
unsigned Op = Op1 | Op2; // Combine all of the condition bits.
// If the N and U bits get set then the resultant comparison DOES suddenly
// care about orderedness, and is true when ordered.
if (Op > ISD::SETTRUE2)
Op &= ~16; // Clear the U bit if the N bit is set.
// Canonicalize illegal integer setcc's.
if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
Op = ISD::SETNE;
return ISD::CondCode(Op);
}
/// getSetCCAndOperation - Return the result of a logical AND between different
/// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
/// function returns zero if it is not possible to represent the resultant
/// comparison.
ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
bool isInteger) {
if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
// Cannot fold a signed setcc with an unsigned setcc.
return ISD::SETCC_INVALID;
// Combine all of the condition bits.
ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
// Canonicalize illegal integer setcc's.
if (isInteger) {
switch (Result) {
default: break;
case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
case ISD::SETOEQ: // SETEQ & SETU[LG]E
case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
}
}
return Result;
}
//===----------------------------------------------------------------------===//
// SDNode Profile Support
//===----------------------------------------------------------------------===//
/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
///
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
ID.AddInteger(OpC);
}
/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
/// solely with their pointer.
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
ID.AddPointer(VTList.VTs);
}
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
///
static void AddNodeIDOperands(FoldingSetNodeID &ID,
ArrayRef<SDValue> Ops) {
for (auto& Op : Ops) {
ID.AddPointer(Op.getNode());
ID.AddInteger(Op.getResNo());
}
}
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
///
static void AddNodeIDOperands(FoldingSetNodeID &ID,
ArrayRef<SDUse> Ops) {
for (auto& Op : Ops) {
ID.AddPointer(Op.getNode());
ID.AddInteger(Op.getResNo());
}
}
/// Add logical or fast math flag values to FoldingSetNodeID value.
static void AddNodeIDFlags(FoldingSetNodeID &ID, unsigned Opcode,
const SDNodeFlags *Flags) {
if (!Flags || !isBinOpWithFlags(Opcode))
return;
unsigned RawFlags = Flags->getRawFlags();
// If no flags are set, do not alter the ID. We must match the ID of nodes
// that were created without explicitly specifying flags. This also saves time
// and allows a gradual increase in API usage of the optional optimization
// flags.
if (RawFlags != 0)
ID.AddInteger(RawFlags);
}
static void AddNodeIDFlags(FoldingSetNodeID &ID, const SDNode *N) {
if (auto *Node = dyn_cast<BinaryWithFlagsSDNode>(N))
AddNodeIDFlags(ID, Node->getOpcode(), &Node->Flags);
}
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
SDVTList VTList, ArrayRef<SDValue> OpList) {
AddNodeIDOpcode(ID, OpC);
AddNodeIDValueTypes(ID, VTList);
AddNodeIDOperands(ID, OpList);
}
/// If this is an SDNode with special info, add this info to the NodeID data.
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
switch (N->getOpcode()) {
case ISD::TargetExternalSymbol:
case ISD::ExternalSymbol:
case ISD::MCSymbol:
llvm_unreachable("Should only be used on nodes with operands");
default: break; // Normal nodes don't need extra info.
case ISD::TargetConstant:
case ISD::Constant: {
const ConstantSDNode *C = cast<ConstantSDNode>(N);
ID.AddPointer(C->getConstantIntValue());
ID.AddBoolean(C->isOpaque());
break;
}
case ISD::TargetConstantFP:
case ISD::ConstantFP: {
ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
break;
}
case ISD::TargetGlobalAddress:
case ISD::GlobalAddress:
case ISD::TargetGlobalTLSAddress:
case ISD::GlobalTLSAddress: {
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
ID.AddPointer(GA->getGlobal());
ID.AddInteger(GA->getOffset());
ID.AddInteger(GA->getTargetFlags());
ID.AddInteger(GA->getAddressSpace());
break;
}
case ISD::BasicBlock:
ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
break;
case ISD::Register:
ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
break;
case ISD::RegisterMask:
ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
break;
case ISD::SRCVALUE:
ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
break;
case ISD::FrameIndex:
case ISD::TargetFrameIndex:
ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
break;
case ISD::JumpTable:
case ISD::TargetJumpTable:
ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
break;
case ISD::ConstantPool:
case ISD::TargetConstantPool: {
const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
ID.AddInteger(CP->getAlignment());
ID.AddInteger(CP->getOffset());
if (CP->isMachineConstantPoolEntry())
CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
else
ID.AddPointer(CP->getConstVal());
ID.AddInteger(CP->getTargetFlags());
break;
}
case ISD::TargetIndex: {
const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
ID.AddInteger(TI->getIndex());
ID.AddInteger(TI->getOffset());
ID.AddInteger(TI->getTargetFlags());
break;
}
case ISD::LOAD: {
const LoadSDNode *LD = cast<LoadSDNode>(N);
ID.AddInteger(LD->getMemoryVT().getRawBits());
ID.AddInteger(LD->getRawSubclassData());
ID.AddInteger(LD->getPointerInfo().getAddrSpace());
break;
}
case ISD::STORE: {
const StoreSDNode *ST = cast<StoreSDNode>(N);
ID.AddInteger(ST->getMemoryVT().getRawBits());
ID.AddInteger(ST->getRawSubclassData());
ID.AddInteger(ST->getPointerInfo().getAddrSpace());
break;
}
case ISD::ATOMIC_CMP_SWAP:
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE: {
const AtomicSDNode *AT = cast<AtomicSDNode>(N);
ID.AddInteger(AT->getMemoryVT().getRawBits());
ID.AddInteger(AT->getRawSubclassData());
ID.AddInteger(AT->getPointerInfo().getAddrSpace());
break;
}
case ISD::PREFETCH: {
const MemSDNode *PF = cast<MemSDNode>(N);
ID.AddInteger(PF->getPointerInfo().getAddrSpace());
break;
}
case ISD::VECTOR_SHUFFLE: {
const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
i != e; ++i)
ID.AddInteger(SVN->getMaskElt(i));
break;
}
case ISD::TargetBlockAddress:
case ISD::BlockAddress: {
const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
ID.AddPointer(BA->getBlockAddress());
ID.AddInteger(BA->getOffset());
ID.AddInteger(BA->getTargetFlags());
break;
}
} // end switch (N->getOpcode())
AddNodeIDFlags(ID, N);
// Target specific memory nodes could also have address spaces to check.
if (N->isTargetMemoryOpcode())
ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
}
/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
/// data.
static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
AddNodeIDOpcode(ID, N->getOpcode());
// Add the return value info.
AddNodeIDValueTypes(ID, N->getVTList());
// Add the operand info.
AddNodeIDOperands(ID, N->ops());
// Handle SDNode leafs with special info.
AddNodeIDCustom(ID, N);
}
/// encodeMemSDNodeFlags - Generic routine for computing a value for use in
/// the CSE map that carries volatility, temporalness, indexing mode, and
/// extension/truncation information.
///
static inline unsigned
encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
bool isNonTemporal, bool isInvariant) {
assert((ConvType & 3) == ConvType &&
"ConvType may not require more than 2 bits!");
assert((AM & 7) == AM &&
"AM may not require more than 3 bits!");
return ConvType |
(AM << 2) |
(isVolatile << 5) |
(isNonTemporal << 6) |
(isInvariant << 7);
}
//===----------------------------------------------------------------------===//
// SelectionDAG Class
//===----------------------------------------------------------------------===//
/// doNotCSE - Return true if CSE should not be performed for this node.
static bool doNotCSE(SDNode *N) {
if (N->getValueType(0) == MVT::Glue)
return true; // Never CSE anything that produces a flag.
switch (N->getOpcode()) {
default: break;
case ISD::HANDLENODE:
case ISD::EH_LABEL:
return true; // Never CSE these nodes.
}
// Check that remaining values produced are not flags.
for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
if (N->getValueType(i) == MVT::Glue)
return true; // Never CSE anything that produces a flag.
return false;
}
/// RemoveDeadNodes - This method deletes all unreachable nodes in the
/// SelectionDAG.
void SelectionDAG::RemoveDeadNodes() {
// Create a dummy node (which is not added to allnodes), that adds a reference
// to the root node, preventing it from being deleted.
HandleSDNode Dummy(getRoot());
SmallVector<SDNode*, 128> DeadNodes;
// Add all obviously-dead nodes to the DeadNodes worklist.
for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
if (I->use_empty())
DeadNodes.push_back(I);
RemoveDeadNodes(DeadNodes);
// If the root changed (e.g. it was a dead load, update the root).
setRoot(Dummy.getValue());
}
/// RemoveDeadNodes - This method deletes the unreachable nodes in the
/// given list, and any nodes that become unreachable as a result.
void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
// Process the worklist, deleting the nodes and adding their uses to the
// worklist.
while (!DeadNodes.empty()) {
SDNode *N = DeadNodes.pop_back_val();
for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
DUL->NodeDeleted(N, nullptr);
// Take the node out of the appropriate CSE map.
RemoveNodeFromCSEMaps(N);
// Next, brutally remove the operand list. This is safe to do, as there are
// no cycles in the graph.
for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
SDUse &Use = *I++;
SDNode *Operand = Use.getNode();
Use.set(SDValue());
// Now that we removed this operand, see if there are no uses of it left.
if (Operand->use_empty())
DeadNodes.push_back(Operand);
}
DeallocateNode(N);
}
}
void SelectionDAG::RemoveDeadNode(SDNode *N){
SmallVector<SDNode*, 16> DeadNodes(1, N);
// Create a dummy node that adds a reference to the root node, preventing
// it from being deleted. (This matters if the root is an operand of the
// dead node.)
HandleSDNode Dummy(getRoot());
RemoveDeadNodes(DeadNodes);
}
void SelectionDAG::DeleteNode(SDNode *N) {
// First take this out of the appropriate CSE map.
RemoveNodeFromCSEMaps(N);
// Finally, remove uses due to operands of this node, remove from the
// AllNodes list, and delete the node.
DeleteNodeNotInCSEMaps(N);
}
void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
assert(N != AllNodes.begin() && "Cannot delete the entry node!");
assert(N->use_empty() && "Cannot delete a node that is not dead!");
// Drop all of the operands and decrement used node's use counts.
N->DropOperands();
DeallocateNode(N);
}
void SDDbgInfo::erase(const SDNode *Node) {
DbgValMapType::iterator I = DbgValMap.find(Node);
if (I == DbgValMap.end())
return;
for (auto &Val: I->second)
Val->setIsInvalidated();
DbgValMap.erase(I);
}
void SelectionDAG::DeallocateNode(SDNode *N) {
if (N->OperandsNeedDelete)
delete[] N->OperandList;
// Set the opcode to DELETED_NODE to help catch bugs when node
// memory is reallocated.
N->NodeType = ISD::DELETED_NODE;
NodeAllocator.Deallocate(AllNodes.remove(N));
// If any of the SDDbgValue nodes refer to this SDNode, invalidate
// them and forget about that node.
DbgInfo->erase(N);
}
#ifndef NDEBUG
/// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
static void VerifySDNode(SDNode *N) {
switch (N->getOpcode()) {
default:
break;
case ISD::BUILD_PAIR: {
EVT VT = N->getValueType(0);
assert(N->getNumValues() == 1 && "Too many results!");
assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
"Wrong return type!");
assert(N->getNumOperands() == 2 && "Wrong number of operands!");
assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
"Mismatched operand types!");
assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
"Wrong operand type!");
assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
"Wrong return type size");
break;
}
case ISD::BUILD_VECTOR: {
assert(N->getNumValues() == 1 && "Too many results!");
assert(N->getValueType(0).isVector() && "Wrong return type!");
assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
"Wrong number of operands!");
EVT EltVT = N->getValueType(0).getVectorElementType();
for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
assert((I->getValueType() == EltVT ||
(EltVT.isInteger() && I->getValueType().isInteger() &&
EltVT.bitsLE(I->getValueType()))) &&
"Wrong operand type!");
assert(I->getValueType() == N->getOperand(0).getValueType() &&
"Operands must all have the same type");
}
break;
}
}
}
#endif // NDEBUG
/// \brief Insert a newly allocated node into the DAG.
///
/// Handles insertion into the all nodes list and CSE map, as well as
/// verification and other common operations when a new node is allocated.
void SelectionDAG::InsertNode(SDNode *N) {
AllNodes.push_back(N);
#ifndef NDEBUG
VerifySDNode(N);
#endif
}
/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
/// correspond to it. This is useful when we're about to delete or repurpose
/// the node. We don't want future request for structurally identical nodes
/// to return N anymore.
bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
bool Erased = false;
switch (N->getOpcode()) {
case ISD::HANDLENODE: return false; // noop.
case ISD::CONDCODE:
assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
"Cond code doesn't exist!");
Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
break;
case ISD::ExternalSymbol:
Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
break;
case ISD::TargetExternalSymbol: {
ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
Erased = TargetExternalSymbols.erase(
std::pair<std::string,unsigned char>(ESN->getSymbol(),
ESN->getTargetFlags()));
break;
}
case ISD::MCSymbol: {
auto *MCSN = cast<MCSymbolSDNode>(N);
Erased = MCSymbols.erase(MCSN->getMCSymbol());
break;
}
case ISD::VALUETYPE: {
EVT VT = cast<VTSDNode>(N)->getVT();
if (VT.isExtended()) {
Erased = ExtendedValueTypeNodes.erase(VT);
} else {
Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
}
break;
}
default:
// Remove it from the CSE Map.
assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
Erased = CSEMap.RemoveNode(N);
break;
}
#ifndef NDEBUG
// Verify that the node was actually in one of the CSE maps, unless it has a
// flag result (which cannot be CSE'd) or is one of the special cases that are
// not subject to CSE.
if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
!N->isMachineOpcode() && !doNotCSE(N)) {
N->dump(this);
dbgs() << "\n";
llvm_unreachable("Node is not in map!");
}
#endif
return Erased;
}
/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
/// maps and modified in place. Add it back to the CSE maps, unless an identical
/// node already exists, in which case transfer all its users to the existing
/// node. This transfer can potentially trigger recursive merging.
///
void
SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
// For node types that aren't CSE'd, just act as if no identical node
// already exists.
if (!doNotCSE(N)) {
SDNode *Existing = CSEMap.GetOrInsertNode(N);
if (Existing != N) {
// If there was already an existing matching node, use ReplaceAllUsesWith
// to replace the dead one with the existing one. This can cause
// recursive merging of other unrelated nodes down the line.
ReplaceAllUsesWith(N, Existing);
// N is now dead. Inform the listeners and delete it.
for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
DUL->NodeDeleted(N, Existing);
DeleteNodeNotInCSEMaps(N);
return;
}
}
// If the node doesn't already exist, we updated it. Inform listeners.
for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
DUL->NodeUpdated(N);
}
/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
/// were replaced with those specified. If this node is never memoized,
/// return null, otherwise return a pointer to the slot it would take. If a
/// node already exists with these operands, the slot will be non-null.
SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
void *&InsertPos) {
if (doNotCSE(N))
return nullptr;
SDValue Ops[] = { Op };
FoldingSetNodeID ID;
AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
AddNodeIDCustom(ID, N);
SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
return Node;
}
/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
/// were replaced with those specified. If this node is never memoized,
/// return null, otherwise return a pointer to the slot it would take. If a
/// node already exists with these operands, the slot will be non-null.
SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
SDValue Op1, SDValue Op2,
void *&InsertPos) {
if (doNotCSE(N))
return nullptr;
SDValue Ops[] = { Op1, Op2 };
FoldingSetNodeID ID;
AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
AddNodeIDCustom(ID, N);
SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
return Node;
}
/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
/// were replaced with those specified. If this node is never memoized,
/// return null, otherwise return a pointer to the slot it would take. If a
/// node already exists with these operands, the slot will be non-null.
SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
void *&InsertPos) {
if (doNotCSE(N))
return nullptr;
FoldingSetNodeID ID;
AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
AddNodeIDCustom(ID, N);
SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
return Node;
}
/// getEVTAlignment - Compute the default alignment value for the
/// given type.
///
unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
Type *Ty = VT == MVT::iPTR ?
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext());
return getDataLayout().getABITypeAlignment(Ty);
}
// EntryNode could meaningfully have debug info if we can find it...
SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
: TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
UpdateListeners(nullptr) {
AllNodes.push_back(&EntryNode);
DbgInfo = new SDDbgInfo();
}
void SelectionDAG::init(MachineFunction &mf) {
MF = &mf;
TLI = getSubtarget().getTargetLowering();
TSI = getSubtarget().getSelectionDAGInfo();
Context = &mf.getFunction()->getContext();
}
SelectionDAG::~SelectionDAG() {
assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
allnodes_clear();
delete DbgInfo;
}
void SelectionDAG::allnodes_clear() {
assert(&*AllNodes.begin() == &EntryNode);
AllNodes.remove(AllNodes.begin());
while (!AllNodes.empty())
DeallocateNode(AllNodes.begin());
}
BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
SDVTList VTs, SDValue N1,
SDValue N2,
const SDNodeFlags *Flags) {
if (isBinOpWithFlags(Opcode)) {
// If no flags were passed in, use a default flags object.
SDNodeFlags F;
if (Flags == nullptr)
Flags = &F;
BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, *Flags);
return FN;
}
BinarySDNode *N = new (NodeAllocator)
BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
return N;
}
SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
void *&InsertPos) {
SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
if (N) {
switch (N->getOpcode()) {
default: break;
case ISD::Constant:
case ISD::ConstantFP:
llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
"debug location. Use another overload.");
}
}
return N;
}
SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
DebugLoc DL, void *&InsertPos) {
SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
if (N) {
switch (N->getOpcode()) {
default: break; // Process only regular (non-target) constant nodes.
case ISD::Constant:
case ISD::ConstantFP:
// Erase debug location from the node if the node is used at several
// different places to do not propagate one location to all uses as it
// leads to incorrect debug info.
if (N->getDebugLoc() != DL)
N->setDebugLoc(DebugLoc());
break;
}
}
return N;
}
void SelectionDAG::clear() {
allnodes_clear();
OperandAllocator.Reset();
CSEMap.clear();
ExtendedValueTypeNodes.clear();
ExternalSymbols.clear();
TargetExternalSymbols.clear();
MCSymbols.clear();
std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
static_cast<CondCodeSDNode*>(nullptr));
std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
static_cast<SDNode*>(nullptr));
EntryNode.UseList = nullptr;
AllNodes.push_back(&EntryNode);
Root = getEntryNode();
DbgInfo->clear();
}
SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
return VT.bitsGT(Op.getValueType()) ?
getNode(ISD::ANY_EXTEND, DL, VT, Op) :
getNode(ISD::TRUNCATE, DL, VT, Op);
}
SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
return VT.bitsGT(Op.getValueType()) ?
getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
getNode(ISD::TRUNCATE, DL, VT, Op);
}
SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
return VT.bitsGT(Op.getValueType()) ?
getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
getNode(ISD::TRUNCATE, DL, VT, Op);
}
SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT,
EVT OpVT) {
if (VT.bitsLE(Op.getValueType()))
return getNode(ISD::TRUNCATE, SL, VT, Op);
TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
}
SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
assert(!VT.isVector() &&
"getZeroExtendInReg should use the vector element type instead of "
"the vector type!");
if (Op.getValueType() == VT) return Op;
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
APInt Imm = APInt::getLowBitsSet(BitWidth,
VT.getSizeInBits());
return getNode(ISD::AND, DL, Op.getValueType(), Op,
getConstant(Imm, DL, Op.getValueType()));
}
SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
assert(VT.isVector() && "This DAG node is restricted to vector types.");
assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
"The sizes of the input and result must match in order to perform the "
"extend in-register.");
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
"The destination vector type must have fewer lanes than the input.");
return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
}
SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
assert(VT.isVector() && "This DAG node is restricted to vector types.");
assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
"The sizes of the input and result must match in order to perform the "
"extend in-register.");
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
"The destination vector type must have fewer lanes than the input.");
return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
}
SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
assert(VT.isVector() && "This DAG node is restricted to vector types.");
assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
"The sizes of the input and result must match in order to perform the "
"extend in-register.");
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
"The destination vector type must have fewer lanes than the input.");
return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
}
/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
///
SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
EVT EltVT = VT.getScalarType();
SDValue NegOne =
getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
return getNode(ISD::XOR, DL, VT, Val, NegOne);
}
SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) {
EVT EltVT = VT.getScalarType();
SDValue TrueValue;
switch (TLI->getBooleanContents(VT)) {
case TargetLowering::ZeroOrOneBooleanContent:
case TargetLowering::UndefinedBooleanContent:
TrueValue = getConstant(1, DL, VT);
break;
case TargetLowering::ZeroOrNegativeOneBooleanContent:
TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL,
VT);
break;
}
return getNode(ISD::XOR, DL, VT, Val, TrueValue);
}
SDValue SelectionDAG::getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isT,
bool isO) {
EVT EltVT = VT.getScalarType();
assert((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
"getConstant with a uint64_t value that doesn't fit in the type!");
return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
}
SDValue SelectionDAG::getConstant(const APInt &Val, SDLoc DL, EVT VT, bool isT,
bool isO)
{
return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
}
SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
bool isT, bool isO) {
assert(VT.isInteger() && "Cannot create FP integer constant!");
EVT EltVT = VT.getScalarType();
const ConstantInt *Elt = &Val;
// In some cases the vector type is legal but the element type is illegal and
// needs to be promoted, for example v8i8 on ARM. In this case, promote the
// inserted value (the type does not need to match the vector element type).
// Any extra bits introduced will be truncated away.
if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
TargetLowering::TypePromoteInteger) {
EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
Elt = ConstantInt::get(*getContext(), NewVal);
}
// In other cases the element type is illegal and needs to be expanded, for
// example v2i64 on MIPS32. In this case, find the nearest legal type, split
// the value into n parts and use a vector type with n-times the elements.
// Then bitcast to the type requested.
// Legalizing constants too early makes the DAGCombiner's job harder so we
// only legalize if the DAG tells us we must produce legal types.
else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
TLI->getTypeAction(*getContext(), EltVT) ==
TargetLowering::TypeExpandInteger) {
APInt NewVal = Elt->getValue();
EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
// Check the temporary vector is the correct size. If this fails then
// getTypeToTransformTo() probably returned a type whose size (in bits)
// isn't a power-of-2 factor of the requested type size.
assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
SmallVector<SDValue, 2> EltParts;
for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
.trunc(ViaEltSizeInBits), DL,
ViaEltVT, isT, isO));
}
// EltParts is currently in little endian order. If we actually want
// big-endian order then reverse it now.
if (getDataLayout().isBigEndian())
std::reverse(EltParts.begin(), EltParts.end());
// The elements must be reversed when the element order is different
// to the endianness of the elements (because the BITCAST is itself a
// vector shuffle in this situation). However, we do not need any code to
// perform this reversal because getConstant() is producing a vector
// splat.
// This situation occurs in MIPS MSA.
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
Ops));
return Result;
}
assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
"APInt size does not match type size!");
unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
ID.AddPointer(Elt);
ID.AddBoolean(isO);
void *IP = nullptr;
SDNode *N = nullptr;
if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)))
if (!VT.isVector())
return SDValue(N, 0);
if (!N) {
N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, DL.getDebugLoc(),
EltVT);
CSEMap.InsertNode(N, IP);
InsertNode(N);
}
SDValue Result(N, 0);
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
}
return Result;
}
SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget) {
return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
}
SDValue SelectionDAG::getConstantFP(const APFloat& V, SDLoc DL, EVT VT,
bool isTarget) {
return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
}
SDValue SelectionDAG::getConstantFP(const ConstantFP& V, SDLoc DL, EVT VT,
bool isTarget){
assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
EVT EltVT = VT.getScalarType();
// Do the map lookup using the actual bit pattern for the floating point
// value, so that we don't have problems with 0.0 comparing equal to -0.0, and
// we don't have issues with SNANs.
unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
ID.AddPointer(&V);
void *IP = nullptr;
SDNode *N = nullptr;
if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)))
if (!VT.isVector())
return SDValue(N, 0);
if (!N) {
N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, DL.getDebugLoc(),
EltVT);
CSEMap.InsertNode(N, IP);
InsertNode(N);
}
SDValue Result(N, 0);
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
}
return Result;
}
SDValue SelectionDAG::getConstantFP(double Val, SDLoc DL, EVT VT,
bool isTarget) {
EVT EltVT = VT.getScalarType();
if (EltVT==MVT::f32)
return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
else if (EltVT==MVT::f64)
return getConstantFP(APFloat(Val), DL, VT, isTarget);
else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
EltVT==MVT::f16) {
bool ignored;
APFloat apf = APFloat(Val);
apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
&ignored);
return getConstantFP(apf, DL, VT, isTarget);
} else
llvm_unreachable("Unsupported type in getConstantFP");
}
SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
EVT VT, int64_t Offset,
bool isTargetGA,
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTargetGA) &&
"Cannot set target flags on target-independent globals");
// Truncate (with sign-extension) the offset value to the pointer size.
unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
if (BitWidth < 64)
Offset = SignExtend64(Offset, BitWidth);
unsigned Opc;
if (GV->isThreadLocal())
Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
else
Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddPointer(GV);
ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
ID.AddInteger(GV->getType()->getAddressSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
DL.getDebugLoc(), GV, VT,
Offset, TargetFlags);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(FI);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent jump tables");
unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(JTI);
ID.AddInteger(TargetFlags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
TargetFlags);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
unsigned Alignment, int Offset,
bool isTarget,
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(Alignment);
ID.AddInteger(Offset);
ID.AddPointer(C);
ID.AddInteger(TargetFlags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
Alignment, TargetFlags);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
unsigned Alignment, int Offset,
bool isTarget,
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(Alignment);
ID.AddInteger(Offset);
C->addSelectionDAGCSEId(ID);
ID.AddInteger(TargetFlags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
Alignment, TargetFlags);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
unsigned char TargetFlags) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
ID.AddInteger(Index);
ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
TargetFlags);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
ID.AddPointer(MBB);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getValueType(EVT VT) {
if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
ValueTypeNodes.size())
ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
SDNode *&N = VT.isExtended() ?
ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
if (N) return SDValue(N, 0);
N = new (NodeAllocator) VTSDNode(VT);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
SDNode *&N = ExternalSymbols[Sym];
if (N) return SDValue(N, 0);
N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
SDNode *&N = MCSymbols[Sym];
if (N)
return SDValue(N, 0);
N = new (NodeAllocator) MCSymbolSDNode(Sym, VT);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
unsigned char TargetFlags) {
SDNode *&N =
TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
TargetFlags)];
if (N) return SDValue(N, 0);
N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
if ((unsigned)Cond >= CondCodeNodes.size())
CondCodeNodes.resize(Cond+1);
if (!CondCodeNodes[Cond]) {
CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
CondCodeNodes[Cond] = N;
InsertNode(N);
}
return SDValue(CondCodeNodes[Cond], 0);
}
// commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
// the shuffle mask M that point at N1 to point at N2, and indices that point
// N2 to point at N1.
static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
std::swap(N1, N2);
ShuffleVectorSDNode::commuteMask(M);
}
SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
SDValue N2, const int *Mask) {
assert(VT == N1.getValueType() && VT == N2.getValueType() &&
"Invalid VECTOR_SHUFFLE");
// Canonicalize shuffle undef, undef -> undef
if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
return getUNDEF(VT);
// Validate that all indices in Mask are within the range of the elements
// input to the shuffle.
unsigned NElts = VT.getVectorNumElements();
SmallVector<int, 8> MaskVec;
for (unsigned i = 0; i != NElts; ++i) {
assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
MaskVec.push_back(Mask[i]);
}
// Canonicalize shuffle v, v -> v, undef
if (N1 == N2) {
N2 = getUNDEF(VT);
for (unsigned i = 0; i != NElts; ++i)
if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
}
// Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
if (N1.getOpcode() == ISD::UNDEF)
commuteShuffle(N1, N2, MaskVec);
// If shuffling a splat, try to blend the splat instead. We do this here so
// that even when this arises during lowering we don't have to re-handle it.
auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
BitVector UndefElements;
SDValue Splat = BV->getSplatValue(&UndefElements);
if (!Splat)
return;
for (int i = 0; i < (int)NElts; ++i) {
if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + (int)NElts))
continue;
// If this input comes from undef, mark it as such.
if (UndefElements[MaskVec[i] - Offset]) {
MaskVec[i] = -1;
continue;
}
// If we can blend a non-undef lane, use that instead.
if (!UndefElements[i])
MaskVec[i] = i + Offset;
}
};
if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
BlendSplat(N1BV, 0);
if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
BlendSplat(N2BV, NElts);
// Canonicalize all index into lhs, -> shuffle lhs, undef
// Canonicalize all index into rhs, -> shuffle rhs, undef
bool AllLHS = true, AllRHS = true;
bool N2Undef = N2.getOpcode() == ISD::UNDEF;
for (unsigned i = 0; i != NElts; ++i) {
if (MaskVec[i] >= (int)NElts) {
if (N2Undef)
MaskVec[i] = -1;
else
AllLHS = false;
} else if (MaskVec[i] >= 0) {
AllRHS = false;
}
}
if (AllLHS && AllRHS)
return getUNDEF(VT);
if (AllLHS && !N2Undef)
N2 = getUNDEF(VT);
if (AllRHS) {
N1 = getUNDEF(VT);
commuteShuffle(N1, N2, MaskVec);
}
// Reset our undef status after accounting for the mask.
N2Undef = N2.getOpcode() == ISD::UNDEF;
// Re-check whether both sides ended up undef.
if (N1.getOpcode() == ISD::UNDEF && N2Undef)
return getUNDEF(VT);
// If Identity shuffle return that node.
bool Identity = true, AllSame = true;
for (unsigned i = 0; i != NElts; ++i) {
if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
if (MaskVec[i] != MaskVec[0]) AllSame = false;
}
if (Identity && NElts)
return N1;
// Shuffling a constant splat doesn't change the result.
if (N2Undef) {
SDValue V = N1;
// Look through any bitcasts. We check that these don't change the number
// (and size) of elements and just changes their types.
while (V.getOpcode() == ISD::BITCAST)
V = V->getOperand(0);
// A splat should always show up as a build vector node.
if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
BitVector UndefElements;
SDValue Splat = BV->getSplatValue(&UndefElements);
// If this is a splat of an undef, shuffling it is also undef.
if (Splat && Splat.getOpcode() == ISD::UNDEF)
return getUNDEF(VT);
bool SameNumElts =
V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
// We only have a splat which can skip shuffles if there is a splatted
// value and no undef lanes rearranged by the shuffle.
if (Splat && UndefElements.none()) {
// Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
// number of elements match or the value splatted is a zero constant.
if (SameNumElts)
return N1;
if (auto *C = dyn_cast<ConstantSDNode>(Splat))
if (C->isNullValue())
return N1;
}
// If the shuffle itself creates a splat, build the vector directly.
if (AllSame && SameNumElts) {
const SDValue &Splatted = BV->getOperand(MaskVec[0]);
SmallVector<SDValue, 8> Ops(NElts, Splatted);
EVT BuildVT = BV->getValueType(0);
SDValue NewBV = getNode(ISD::BUILD_VECTOR, dl, BuildVT, Ops);
// We may have jumped through bitcasts, so the type of the
// BUILD_VECTOR may not match the type of the shuffle.
if (BuildVT != VT)
NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
return NewBV;
}
}
}
FoldingSetNodeID ID;
SDValue Ops[2] = { N1, N2 };
AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
for (unsigned i = 0; i != NElts; ++i)
ID.AddInteger(MaskVec[i]);
void* IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
return SDValue(E, 0);
// Allocate the mask array for the node out of the BumpPtrAllocator, since
// SDNode doesn't have access to it. This memory will be "leaked" when
// the node is deallocated, but recovered when the NodeAllocator is released.
int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
ShuffleVectorSDNode *N =
new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
dl.getDebugLoc(), N1, N2,
MaskAlloc);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
MVT VT = SV.getSimpleValueType(0);
SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
ShuffleVectorSDNode::commuteMask(MaskVec);
SDValue Op0 = SV.getOperand(0);
SDValue Op1 = SV.getOperand(1);
return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]);
}
SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
SDValue Val, SDValue DTy,
SDValue STy, SDValue Rnd, SDValue Sat,
ISD::CvtCode Code) {
// If the src and dest types are the same and the conversion is between
// integer types of the same sign or two floats, no conversion is necessary.
if (DTy == STy &&
(Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
return Val;
FoldingSetNodeID ID;
SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
void* IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
return SDValue(E, 0);
CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
dl.getDebugLoc(),
Ops, Code);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
ID.AddInteger(RegNo);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
ID.AddPointer(RegMask);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
FoldingSetNodeID ID;
SDValue Ops[] = { Root };
AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
ID.AddPointer(Label);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
dl.getDebugLoc(), Root, Label);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
int64_t Offset,
bool isTarget,
unsigned char TargetFlags) {
unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddPointer(BA);
ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
TargetFlags);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getSrcValue(const Value *V) {
assert((!V || V->getType()->isPointerTy()) &&
"SrcValue is not a pointer?");
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
ID.AddPointer(V);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
/// getMDNode - Return an MDNodeSDNode which holds an MDNode.
SDValue SelectionDAG::getMDNode(const MDNode *MD) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
ID.AddPointer(MD);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
if (VT == V.getValueType())
return V;
return getNode(ISD::BITCAST, SDLoc(V), VT, V);
}
/// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
unsigned SrcAS, unsigned DestAS) {
SDValue Ops[] = {Ptr};
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
ID.AddInteger(SrcAS);
ID.AddInteger(DestAS);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
dl.getDebugLoc(),
VT, Ptr, SrcAS, DestAS);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
/// getShiftAmountOperand - Return the specified value casted to
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
EVT OpTy = Op.getValueType();
EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
if (OpTy == ShTy || OpTy.isVector()) return Op;
ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
return getNode(Opcode, SDLoc(Op), ShTy, Op);
}
/// CreateStackTemporary - Create a stack temporary, suitable for holding the
/// specified value type.
SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
unsigned ByteSize = VT.getStoreSize();
Type *Ty = VT.getTypeForEVT(*getContext());
unsigned StackAlign =
std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
}
/// CreateStackTemporary - Create a stack temporary suitable for holding
/// either of the specified value types.
SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
VT2.getStoreSizeInBits())/8;
Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext());
const DataLayout &DL = getDataLayout();
unsigned Align =
std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
}
SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
SDValue N2, ISD::CondCode Cond, SDLoc dl) {
// These setcc operations always fold.
switch (Cond) {
default: break;
case ISD::SETFALSE:
case ISD::SETFALSE2: return getConstant(0, dl, VT);
case ISD::SETTRUE:
case ISD::SETTRUE2: {
TargetLowering::BooleanContent Cnt =
TLI->getBooleanContents(N1->getValueType(0));
return getConstant(
Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
VT);
}
case ISD::SETOEQ:
case ISD::SETOGT:
case ISD::SETOGE:
case ISD::SETOLT:
case ISD::SETOLE:
case ISD::SETONE:
case ISD::SETO:
case ISD::SETUO:
case ISD::SETUEQ:
case ISD::SETUNE:
assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
break;
}
if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
const APInt &C2 = N2C->getAPIntValue();
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
const APInt &C1 = N1C->getAPIntValue();
switch (Cond) {
default: llvm_unreachable("Unknown integer setcc!");
case ISD::SETEQ: return getConstant(C1 == C2, dl, VT);
case ISD::SETNE: return getConstant(C1 != C2, dl, VT);
case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT);
case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT);
case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT);
case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT);
case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT);
case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT);
case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT);
case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT);
}
}
}
if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
switch (Cond) {
default: break;
case ISD::SETEQ: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
// fall through
case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT);
case ISD::SETNE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
// fall through
case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
R==APFloat::cmpLessThan, dl, VT);
case ISD::SETLT: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
// fall through
case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT);
case ISD::SETGT: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
// fall through
case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT);
case ISD::SETLE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
// fall through
case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
R==APFloat::cmpEqual, dl, VT);
case ISD::SETGE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
// fall through
case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
R==APFloat::cmpEqual, dl, VT);
case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT);
case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT);
case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
R==APFloat::cmpEqual, dl, VT);
case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT);
case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
R==APFloat::cmpLessThan, dl, VT);
case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
R==APFloat::cmpUnordered, dl, VT);
case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT);
case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT);
}
} else {
// Ensure that the constant occurs on the RHS.
ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
MVT CompVT = N1.getValueType().getSimpleVT();
if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
return SDValue();
return getSetCC(dl, VT, N2, N1, SwappedCond);
}
}
// Could not fold it.
return SDValue();
}
/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
/// use this predicate to simplify operations downstream.
bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
// This predicate is not safe for vector operations.
if (Op.getValueType().isVector())
return false;
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
}
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be zero
/// for bits that V cannot have.
bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
unsigned Depth) const {
APInt KnownZero, KnownOne;
computeKnownBits(Op, KnownZero, KnownOne, Depth);
return (KnownZero & Mask) == Mask;
}
/// Determine which bits of Op are known to be either zero or one and return
/// them in the KnownZero/KnownOne bitsets.
void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
APInt &KnownOne, unsigned Depth) const {
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
if (Depth == 6)
return; // Limit search depth.
APInt KnownZero2, KnownOne2;
switch (Op.getOpcode()) {
case ISD::Constant:
// We know all of the bits for a constant!
KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
KnownZero = ~KnownOne;
break;
case ISD::AND:
// If either the LHS or the RHS are Zero, the result is zero.
computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Output known-1 bits are only known if set in both the LHS & RHS.
KnownOne &= KnownOne2;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
KnownZero |= KnownZero2;
break;
case ISD::OR:
computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Output known-0 bits are only known if clear in both the LHS & RHS.
KnownZero &= KnownZero2;
// Output known-1 are known to be set if set in either the LHS | RHS.
KnownOne |= KnownOne2;
break;
case ISD::XOR: {
computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Output known-0 bits are known if clear or set in both the LHS & RHS.
APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
KnownZero = KnownZeroOut;
break;
}
case ISD::MUL: {
computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// If low bits are zero in either operand, output low known-0 bits.
// Also compute a conserative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
KnownOne.clearAllBits();
unsigned TrailZ = KnownZero.countTrailingOnes() +
KnownZero2.countTrailingOnes();
unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
KnownZero2.countLeadingOnes(),
BitWidth) - BitWidth;
TrailZ = std::min(TrailZ, BitWidth);
LeadZ = std::min(LeadZ, BitWidth);
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
APInt::getHighBitsSet(BitWidth, LeadZ);
break;
}
case ISD::UDIV: {
// For the purposes of computing leading zeros we can conservatively
// treat a udiv as a logical right shift by the power of 2 known to
// be less than the denominator.
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
KnownOne2.clearAllBits();
KnownZero2.clearAllBits();
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
LeadZ = std::min(BitWidth,
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
break;
}
case ISD::SELECT:
computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
break;
case ISD::SELECT_CC:
computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
break;
case ISD::SADDO:
case ISD::UADDO:
case ISD::SSUBO:
case ISD::USUBO:
case ISD::SMULO:
case ISD::UMULO:
if (Op.getResNo() != 1)
break;
// The boolean result conforms to getBooleanContents.
// If we know the result of a setcc has the top bits zero, use this info.
// We know that we have an integer-based boolean since these operations
// are only available for integer.
if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
TargetLowering::ZeroOrOneBooleanContent &&
BitWidth > 1)
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
case ISD::SETCC:
// If we know the result of a setcc has the top bits zero, use this info.
if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
TargetLowering::ZeroOrOneBooleanContent &&
BitWidth > 1)
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
case ISD::SHL:
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
unsigned ShAmt = SA->getZExtValue();
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero <<= ShAmt;
KnownOne <<= ShAmt;
// low bits known zero.
KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
}
break;
case ISD::SRL:
// (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
unsigned ShAmt = SA->getZExtValue();
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
KnownZero |= HighBits; // High bits known zero.
}
break;
case ISD::SRA:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
unsigned ShAmt = SA->getZExtValue();
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
// If any of the demanded bits are produced by the sign extension, we also
// demand the input sign bit.
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
// Handle the sign bits.
APInt SignBit = APInt::getSignBit(BitWidth);
SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
if (KnownZero.intersects(SignBit)) {
KnownZero |= HighBits; // New bits are known zero.
} else if (KnownOne.intersects(SignBit)) {
KnownOne |= HighBits; // New bits are known one.
}
}
break;
case ISD::SIGN_EXTEND_INREG: {
EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
unsigned EBits = EVT.getScalarType().getSizeInBits();
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
APInt InSignBit = APInt::getSignBit(EBits);
APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
// If the sign extended bits are demanded, we know that the sign
// bit is demanded.
InSignBit = InSignBit.zext(BitWidth);
if (NewBits.getBoolValue())
InputDemandedBits |= InSignBit;
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownOne &= InputDemandedBits;
KnownZero &= InputDemandedBits;
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
KnownZero |= NewBits;
KnownOne &= ~NewBits;
} else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
KnownOne |= NewBits;
KnownZero &= ~NewBits;
} else { // Input sign bit unknown
KnownZero &= ~NewBits;
KnownOne &= ~NewBits;
}
break;
}
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTPOP: {
unsigned LowBits = Log2_32(BitWidth)+1;
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
KnownOne.clearAllBits();
break;
}
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(Op);
// If this is a ZEXTLoad and we are looking at the loaded value.
if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
EVT VT = LD->getMemoryVT();
unsigned MemBits = VT.getScalarType().getSizeInBits();
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
} else if (const MDNode *Ranges = LD->getRanges()) {
computeKnownBitsFromRangeMetadata(*Ranges, KnownZero);
}
break;
}
case ISD::ZERO_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
KnownZero |= NewBits;
break;
}
case ISD::SIGN_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
// Note if the sign bit is known to be zero or one.
bool SignBitKnownZero = KnownZero.isNegative();
bool SignBitKnownOne = KnownOne.isNegative();
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
// If the sign bit is known zero or one, the top bits match.
if (SignBitKnownZero)
KnownZero |= NewBits;
else if (SignBitKnownOne)
KnownOne |= NewBits;
break;
}
case ISD::ANY_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
KnownZero = KnownZero.zext(InBits);
KnownOne = KnownOne.zext(InBits);
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.trunc(BitWidth);
KnownOne = KnownOne.trunc(BitWidth);
break;
}
case ISD::AssertZext: {
EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero |= (~InMask);
KnownOne &= (~KnownZero);
break;
}
case ISD::FGETSIGN:
// All bits are zero except the low bit.
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
case ISD::SUB: {
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
// We know that the top bits of C-X are clear if X contains less bits
// than C (i.e. no wrap-around can happen). For example, 20-X is
// positive if we can prove that X is >= 0 and < 16.
if (CLHS->getAPIntValue().isNonNegative()) {
unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
// NLZ can't be BitWidth with no sign bit
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
// If all of the MaskV bits are known to be zero, then we know the
// output top bits are zero, because we now know that the output is
// from [0-C].
if ((KnownZero2 & MaskV) == MaskV) {
unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
// Top bits known zero.
KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
}
}
}
}
// fall through
case ISD::ADD:
case ISD::ADDE: {
// Output known-0 bits are known if clear or set in both the low clear bits
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
// low 3 bits clear.
// Output known-0 bits are also known if the top bits of each input are
// known to be clear. For example, if one input has the top 10 bits clear
// and the other has the top 8 bits clear, we know the top 7 bits of the
// output must be clear.
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
unsigned KnownZeroHigh = KnownZero2.countLeadingOnes();
unsigned KnownZeroLow = KnownZero2.countTrailingOnes();
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
KnownZeroHigh = std::min(KnownZeroHigh,
KnownZero2.countLeadingOnes());
KnownZeroLow = std::min(KnownZeroLow,
KnownZero2.countTrailingOnes());
if (Op.getOpcode() == ISD::ADD) {
KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow);
if (KnownZeroHigh > 1)
KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1);
break;
}
// With ADDE, a carry bit may be added in, so we can only use this
// information if we know (at least) that the low two bits are clear. We
// then return to the caller that the low bit is unknown but that other bits
// are known zero.
if (KnownZeroLow >= 2) // ADDE
KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow);
break;
}
case ISD::SREM:
if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
const APInt &RA = Rem->getAPIntValue().abs();
if (RA.isPowerOf2()) {
APInt LowBits = RA - 1;
computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
// The low bits of the first operand are unchanged by the srem.
KnownZero = KnownZero2 & LowBits;
KnownOne = KnownOne2 & LowBits;
// If the first operand is non-negative or has all low bits zero, then
// the upper bits are all zero.
if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
KnownZero |= ~LowBits;
// If the first operand is negative and not all low bits are zero, then
// the upper bits are all one.
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
KnownOne |= ~LowBits;
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
}
}
break;
case ISD::UREM: {
if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
const APInt &RA = Rem->getAPIntValue();
if (RA.isPowerOf2()) {
APInt LowBits = (RA - 1);
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1);
// The upper bits are all zero, the lower ones are unchanged.
KnownZero = KnownZero2 | ~LowBits;
KnownOne = KnownOne2 & LowBits;
break;
}
}
// Since the result is less than or equal to either operand, any leading
// zero bits in either operand must also exist in the result.
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
KnownOne.clearAllBits();
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
break;
}
case ISD::EXTRACT_ELEMENT: {
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
const unsigned Index =
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
const unsigned BitWidth = Op.getValueType().getSizeInBits();
// Remove low part of known bits mask
KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth);
// Remove high part of known bit mask
KnownZero = KnownZero.trunc(BitWidth);
KnownOne = KnownOne.trunc(BitWidth);
break;
}
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX: {
APInt Op0Zero, Op0One;
APInt Op1Zero, Op1One;
computeKnownBits(Op.getOperand(0), Op0Zero, Op0One, Depth);
computeKnownBits(Op.getOperand(1), Op1Zero, Op1One, Depth);
KnownZero = Op0Zero & Op1Zero;
KnownOne = Op0One & Op1One;
break;
}
case ISD::FrameIndex:
case ISD::TargetFrameIndex:
if (unsigned Align = InferPtrAlignment(Op)) {
// The low bits are known zero if the pointer is aligned.
KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
break;
}
break;
default:
if (Op.getOpcode() < ISD::BUILTIN_OP_END)
break;
// Fallthrough
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
// Allow the target to implement this method for its nodes.
TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
break;
}
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
/// ComputeNumSignBits - Return the number of times the sign bit of the
/// register is replicated into the other bits. We know that at least 1 bit
/// is always equal to the sign bit (itself), but other cases can give us
/// information. For example, immediately after an "SRA X, 2", we know that
/// the top 3 bits are all equal to each other, so we return 3.
unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
EVT VT = Op.getValueType();
assert(VT.isInteger() && "Invalid VT!");
unsigned VTBits = VT.getScalarType().getSizeInBits();
unsigned Tmp, Tmp2;
unsigned FirstAnswer = 1;
if (Depth == 6)
return 1; // Limit search depth.
switch (Op.getOpcode()) {
default: break;
case ISD::AssertSext:
Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
return VTBits-Tmp+1;
case ISD::AssertZext:
Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
return VTBits-Tmp;
case ISD::Constant: {
const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
return Val.getNumSignBits();
}
case ISD::SIGN_EXTEND:
Tmp =
VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
case ISD::SIGN_EXTEND_INREG:
// Max of the input and what this extends.
Tmp =
cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
Tmp = VTBits-Tmp+1;
Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
return std::max(Tmp, Tmp2);
case ISD::SRA:
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
// SRA X, C -> adds C sign bits.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
Tmp += C->getZExtValue();
if (Tmp > VTBits) Tmp = VTBits;
}
return Tmp;
case ISD::SHL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
// shl destroys sign bits.
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (C->getZExtValue() >= VTBits || // Bad shift.
C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
return Tmp - C->getZExtValue();
}
break;
case ISD::AND:
case ISD::OR:
case ISD::XOR: // NOT is handled here.
// Logical binary ops preserve the number of sign bits at the worst.
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (Tmp != 1) {
Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
FirstAnswer = std::min(Tmp, Tmp2);
// We computed what we know about the sign bits as our first
// answer. Now proceed to the generic code that uses
// computeKnownBits, and pick whichever answer is better.
}
break;
case ISD::SELECT:
Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
if (Tmp == 1) return 1; // Early out.
Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
return std::min(Tmp, Tmp2);
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX:
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
if (Tmp == 1)
return 1; // Early out.
Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
return std::min(Tmp, Tmp2);
case ISD::SADDO:
case ISD::UADDO:
case ISD::SSUBO:
case ISD::USUBO:
case ISD::SMULO:
case ISD::UMULO:
if (Op.getResNo() != 1)
break;
// The boolean result conforms to getBooleanContents. Fall through.
// If setcc returns 0/-1, all bits are sign bits.
// We know that we have an integer-based boolean since these operations
// are only available for integer.
if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
TargetLowering::ZeroOrNegativeOneBooleanContent)
return VTBits;
break;
case ISD::SETCC:
// If setcc returns 0/-1, all bits are sign bits.
if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
TargetLowering::ZeroOrNegativeOneBooleanContent)
return VTBits;
break;
case ISD::ROTL:
case ISD::ROTR:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
unsigned RotAmt = C->getZExtValue() & (VTBits-1);
// Handle rotate right by N like a rotate left by 32-N.
if (Op.getOpcode() == ISD::ROTR)
RotAmt = (VTBits-RotAmt) & (VTBits-1);
// If we aren't rotating out all of the known-in sign bits, return the
// number that are left. This handles rotl(sext(x), 1) for example.
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (Tmp > RotAmt+1) return Tmp-RotAmt;
}
break;
case ISD::ADD:
// Add can have at most one carry bit. Thus we know that the output
// is, at worst, one more bit than the inputs.
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (Tmp == 1) return 1; // Early out.
// Special case decrementing a value (ADD X, -1):
if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
if (CRHS->isAllOnesValue()) {
APInt KnownZero, KnownOne;
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
return VTBits;
// If we are subtracting one from a positive number, there is no carry
// out of the result.
if (KnownZero.isNegative())
return Tmp;
}
Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
if (Tmp2 == 1) return 1;
return std::min(Tmp, Tmp2)-1;
case ISD::SUB:
Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
if (Tmp2 == 1) return 1;
// Handle NEG.
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
if (CLHS->isNullValue()) {
APInt KnownZero, KnownOne;
computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
return VTBits;
// If the input is known to be positive (the sign bit is known clear),
// the output of the NEG has the same number of sign bits as the input.
if (KnownZero.isNegative())
return Tmp2;
// Otherwise, we treat this like a SUB.
}
// Sub can have at most one carry bit. Thus we know that the output
// is, at worst, one more bit than the inputs.
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (Tmp == 1) return 1; // Early out.
return std::min(Tmp, Tmp2)-1;
case ISD::TRUNCATE:
// FIXME: it's tricky to do anything useful for this, but it is an important
// case for targets like X86.
break;
case ISD::EXTRACT_ELEMENT: {
const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
const int BitWidth = Op.getValueType().getSizeInBits();
const int Items =
Op.getOperand(0).getValueType().getSizeInBits() / BitWidth;
// Get reverse index (starting from 1), Op1 value indexes elements from
// little end. Sign starts at big end.
const int rIndex = Items - 1 -
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
// If the sign portion ends in our element the substraction gives correct
// result. Otherwise it gives either negative or > bitwidth result
return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
}
}
// If we are looking at the loaded value of the SDNode.
if (Op.getResNo() == 0) {
// Handle LOADX separately here. EXTLOAD case will fallthrough.
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
unsigned ExtType = LD->getExtensionType();
switch (ExtType) {
default: break;
case ISD::SEXTLOAD: // '17' bits known
Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
return VTBits-Tmp+1;
case ISD::ZEXTLOAD: // '16' bits known
Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
return VTBits-Tmp;
}
}
}
// Allow the target to implement this method for its nodes.
if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_VOID) {
unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
}
// Finally, if we can prove that the top bits of the result are 0's or 1's,
// use this information.
APInt KnownZero, KnownOne;
computeKnownBits(Op, KnownZero, KnownOne, Depth);
APInt Mask;
if (KnownZero.isNegative()) { // sign bit is 0
Mask = KnownZero;
} else if (KnownOne.isNegative()) { // sign bit is 1;
Mask = KnownOne;
} else {
// Nothing known.
return FirstAnswer;
}
// Okay, we know that the sign bit in Mask is set. Use CLZ to determine
// the number of identical bits in the top of the input value.
Mask = ~Mask;
Mask <<= Mask.getBitWidth()-VTBits;
// Return # leading zeros. We use 'min' here in case Val was zero before
// shifting. We don't want to return '64' as for an i32 "0".
return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
}
/// isBaseWithConstantOffset - Return true if the specified operand is an
/// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
/// ISD::OR with a ConstantSDNode that is guaranteed to have the same
/// semantics as an ADD. This handles the equivalence:
/// X|Cst == X+Cst iff X&Cst = 0.
bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
!isa<ConstantSDNode>(Op.getOperand(1)))
return false;
if (Op.getOpcode() == ISD::OR &&
!MaskedValueIsZero(Op.getOperand(0),
cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
return false;
return true;
}
bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
// If we're told that NaNs won't happen, assume they won't.
if (getTarget().Options.NoNaNsFPMath)
return true;
// If the value is a constant, we can obviously see if it is a NaN or not.
if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
return !C->getValueAPF().isNaN();
// TODO: Recognize more cases here.
return false;
}
bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
// If the value is a constant, we can obviously see if it is a zero or not.
if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
return !C->isZero();
// TODO: Recognize more cases here.
switch (Op.getOpcode()) {
default: break;
case ISD::OR:
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
return !C->isNullValue();
break;
}
return false;
}
bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
// Check the obvious case.
if (A == B) return true;
// For for negative and positive zero.
if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
if (CA->isZero() && CB->isZero()) return true;
// Otherwise they may not be equal.
return false;
}
/// getNode - Gets or creates the specified node.
///
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, getVTList(VT), None);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), getVTList(VT));
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
EVT VT, SDValue Operand) {
// Constant fold unary operations with an integer constant operand. Even
// opaque constant will be folded, because the folding of unary operations
// doesn't create new constants with different values. Nevertheless, the
// opaque flag is preserved during folding to prevent future folding with
// other constants.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
const APInt &Val = C->getAPIntValue();
switch (Opcode) {
default: break;
case ISD::SIGN_EXTEND:
return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
C->isTargetOpcode(), C->isOpaque());
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::TRUNCATE:
return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
C->isTargetOpcode(), C->isOpaque());
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
APFloat apf(EVTToAPFloatSemantics(VT),
APInt::getNullValue(VT.getSizeInBits()));
(void)apf.convertFromAPInt(Val,
Opcode==ISD::SINT_TO_FP,
APFloat::rmNearestTiesToEven);
return getConstantFP(apf, DL, VT);
}
case ISD::BITCAST:
if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
return getConstantFP(APFloat(APFloat::IEEEhalf, Val), DL, VT);
if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
return getConstantFP(APFloat(APFloat::IEEEsingle, Val), DL, VT);
else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
return getConstantFP(APFloat(APFloat::IEEEdouble, Val), DL, VT);
break;
case ISD::BSWAP:
return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
C->isOpaque());
case ISD::CTPOP:
return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
C->isOpaque());
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
C->isOpaque());
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
C->isOpaque());
}
}
// Constant fold unary operations with a floating point constant operand.
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
APFloat V = C->getValueAPF(); // make copy
switch (Opcode) {
case ISD::FNEG:
V.changeSign();
return getConstantFP(V, DL, VT);
case ISD::FABS:
V.clearSign();
return getConstantFP(V, DL, VT);
case ISD::FCEIL: {
APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
if (fs == APFloat::opOK || fs == APFloat::opInexact)
return getConstantFP(V, DL, VT);
break;
}
case ISD::FTRUNC: {
APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
if (fs == APFloat::opOK || fs == APFloat::opInexact)
return getConstantFP(V, DL, VT);
break;
}
case ISD::FFLOOR: {
APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
if (fs == APFloat::opOK || fs == APFloat::opInexact)
return getConstantFP(V, DL, VT);
break;
}
case ISD::FP_EXTEND: {
bool ignored;
// This can return overflow, underflow, or inexact; we don't care.
// FIXME need to be more flexible about rounding mode.
(void)V.convert(EVTToAPFloatSemantics(VT),
APFloat::rmNearestTiesToEven, &ignored);
return getConstantFP(V, DL, VT);
}
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: {
integerPart x[2];
bool ignored;
static_assert(integerPartWidth >= 64, "APFloat parts too small!");
// FIXME need to be more flexible about rounding mode.
APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
Opcode==ISD::FP_TO_SINT,
APFloat::rmTowardZero, &ignored);
if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
break;
APInt api(VT.getSizeInBits(), x);
return getConstant(api, DL, VT);
}
case ISD::BITCAST:
if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
break;
}
}
// Constant fold unary operations with a vector integer or float operand.
if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
if (BV->isConstant()) {
switch (Opcode) {
default:
// FIXME: Entirely reasonable to perform folding of other unary
// operations here as the need arises.
break;
case ISD::FNEG:
case ISD::FABS:
case ISD::FCEIL:
case ISD::FTRUNC:
case ISD::FFLOOR:
case ISD::FP_EXTEND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::TRUNCATE:
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP:
case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTPOP: {
EVT SVT = VT.getScalarType();
EVT InVT = BV->getValueType(0);
EVT InSVT = InVT.getScalarType();
// Find legal integer scalar type for constant promotion and
// ensure that its scalar size is at least as large as source.
EVT LegalSVT = SVT;
if (SVT.isInteger()) {
LegalSVT = TLI->getTypeToTransformTo(*getContext(), SVT);
if (LegalSVT.bitsLT(SVT)) break;
}
// Let the above scalar folding handle the folding of each element.
SmallVector<SDValue, 8> Ops;
for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
SDValue OpN = BV->getOperand(i);
EVT OpVT = OpN.getValueType();
// Build vector (integer) scalar operands may need implicit
// truncation - do this before constant folding.
if (OpVT.isInteger() && OpVT.bitsGT(InSVT))
OpN = getNode(ISD::TRUNCATE, DL, InSVT, OpN);
OpN = getNode(Opcode, DL, SVT, OpN);
// Legalize the (integer) scalar constant if necessary.
if (LegalSVT != SVT)
OpN = getNode(ISD::ANY_EXTEND, DL, LegalSVT, OpN);
if (OpN.getOpcode() != ISD::UNDEF &&
OpN.getOpcode() != ISD::Constant &&
OpN.getOpcode() != ISD::ConstantFP)
break;
Ops.push_back(OpN);
}
if (Ops.size() == VT.getVectorNumElements())
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
break;
}
}
}
}
unsigned OpOpcode = Operand.getNode()->getOpcode();
switch (Opcode) {
case ISD::TokenFactor:
case ISD::MERGE_VALUES:
case ISD::CONCAT_VECTORS:
return Operand; // Factor, merge or concat of one node? No need.
case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
case ISD::FP_EXTEND:
assert(VT.isFloatingPoint() &&
Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
if (Operand.getValueType() == VT) return Operand; // noop conversion.
assert((!VT.isVector() ||
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
if (Operand.getOpcode() == ISD::UNDEF)
return getUNDEF(VT);
break;
case ISD::SIGN_EXTEND:
assert(VT.isInteger() && Operand.getValueType().isInteger() &&
"Invalid SIGN_EXTEND!");
if (Operand.getValueType() == VT) return Operand; // noop extension
assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
"Invalid sext node, dst < src!");
assert((!VT.isVector() ||
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
else if (OpOpcode == ISD::UNDEF)
// sext(undef) = 0, because the top bits will all be the same.
return getConstant(0, DL, VT);
break;
case ISD::ZERO_EXTEND:
assert(VT.isInteger() && Operand.getValueType().isInteger() &&
"Invalid ZERO_EXTEND!");
if (Operand.getValueType() == VT) return Operand; // noop extension
assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
"Invalid zext node, dst < src!");
assert((!VT.isVector() ||
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
return getNode(ISD::ZERO_EXTEND, DL, VT,
Operand.getNode()->getOperand(0));
else if (OpOpcode == ISD::UNDEF)
// zext(undef) = 0, because the top bits will be zero.
return getConstant(0, DL, VT);
break;
case ISD::ANY_EXTEND:
assert(VT.isInteger() && Operand.getValueType().isInteger() &&
"Invalid ANY_EXTEND!");
if (Operand.getValueType() == VT) return Operand; // noop extension
assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
"Invalid anyext node, dst < src!");
assert((!VT.isVector() ||
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
OpOpcode == ISD::ANY_EXTEND)
// (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
else if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
// (ext (trunx x)) -> x
if (OpOpcode == ISD::TRUNCATE) {
SDValue OpOp = Operand.getNode()->getOperand(0);
if (OpOp.getValueType() == VT)
return OpOp;
}
break;
case ISD::TRUNCATE:
assert(VT.isInteger() && Operand.getValueType().isInteger() &&
"Invalid TRUNCATE!");
if (Operand.getValueType() == VT) return Operand; // noop truncate
assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
"Invalid truncate node, src < dst!");
assert((!VT.isVector() ||
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
if (OpOpcode == ISD::TRUNCATE)
return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
OpOpcode == ISD::ANY_EXTEND) {
// If the source is smaller than the dest, we still need an extend.
if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
.bitsLT(VT.getScalarType()))
return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
return Operand.getNode()->getOperand(0);
}
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
break;
case ISD::BSWAP:
assert(VT.isInteger() && VT == Operand.getValueType() &&
"Invalid BSWAP!");
assert((VT.getScalarSizeInBits() % 16 == 0) &&
"BSWAP types must be a multiple of 16 bits!");
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
break;
case ISD::BITCAST:
// Basic sanity checking.
assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
&& "Cannot BITCAST between types of different sizes!");
if (VT == Operand.getValueType()) return Operand; // noop conversion.
if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
break;
case ISD::SCALAR_TO_VECTOR:
assert(VT.isVector() && !Operand.getValueType().isVector() &&
(VT.getVectorElementType() == Operand.getValueType() ||
(VT.getVectorElementType().isInteger() &&
Operand.getValueType().isInteger() &&
VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!");
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
// scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
isa<ConstantSDNode>(Operand.getOperand(1)) &&
Operand.getConstantOperandVal(1) == 0 &&
Operand.getOperand(0).getValueType() == VT)
return Operand.getOperand(0);
break;
case ISD::FNEG:
// -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
Operand.getNode()->getOperand(0));
if (OpOpcode == ISD::FNEG) // --X -> X
return Operand.getNode()->getOperand(0);
break;
case ISD::FABS:
if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
break;
}
SDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != MVT::Glue) { // Don't CSE flag producing nodes
FoldingSetNodeID ID;
SDValue Ops[1] = { Operand };
AddNodeIDNode(ID, Opcode, VTs, Ops);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTs, Operand);
CSEMap.InsertNode(N, IP);
} else {
N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTs, Operand);
}
InsertNode(N);
return SDValue(N, 0);
}
static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
const APInt &C2) {
switch (Opcode) {
case ISD::ADD: return std::make_pair(C1 + C2, true);
case ISD::SUB: return std::make_pair(C1 - C2, true);
case ISD::MUL: return std::make_pair(C1 * C2, true);
case ISD::AND: return std::make_pair(C1 & C2, true);
case ISD::OR: return std::make_pair(C1 | C2, true);
case ISD::XOR: return std::make_pair(C1 ^ C2, true);
case ISD::SHL: return std::make_pair(C1 << C2, true);
case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
case ISD::UDIV:
if (!C2.getBoolValue())
break;
return std::make_pair(C1.udiv(C2), true);
case ISD::UREM:
if (!C2.getBoolValue())
break;
return std::make_pair(C1.urem(C2), true);
case ISD::SDIV:
if (!C2.getBoolValue())
break;
return std::make_pair(C1.sdiv(C2), true);
case ISD::SREM:
if (!C2.getBoolValue())
break;
return std::make_pair(C1.srem(C2), true);
}
return std::make_pair(APInt(1, 0), false);
}
SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
const ConstantSDNode *Cst1,
const ConstantSDNode *Cst2) {
if (Cst1->isOpaque() || Cst2->isOpaque())
return SDValue();
std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
Cst2->getAPIntValue());
if (!Folded.second)
return SDValue();
return getConstant(Folded.first, DL, VT);
}
SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
SDNode *Cst1, SDNode *Cst2) {
// If the opcode is a target-specific ISD node, there's nothing we can
// do here and the operand rules may not line up with the below, so
// bail early.
if (Opcode >= ISD::BUILTIN_OP_END)
return SDValue();
// Handle the case of two scalars.
if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
if (SDValue Folded =
FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2)) {
if (!VT.isVector())
return Folded;
SmallVector<SDValue, 4> Outputs;
// We may have a vector type but a scalar result. Create a splat.
Outputs.resize(VT.getVectorNumElements(), Outputs.back());
// Build a big vector out of the scalar elements we generated.
return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
} else {
return SDValue();
}
}
}
// For vectors extract each constant element into Inputs so we can constant
// fold them individually.
BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
if (!BV1 || !BV2)
return SDValue();
assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
EVT SVT = VT.getScalarType();
SmallVector<SDValue, 4> Outputs;
for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
if (!V1 || !V2) // Not a constant, bail.
return SDValue();
if (V1->isOpaque() || V2->isOpaque())
return SDValue();
// Avoid BUILD_VECTOR nodes that perform implicit truncation.
// FIXME: This is valid and could be handled by truncating the APInts.
if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
return SDValue();
// Fold one vector element.
std::pair<APInt, bool> Folded = FoldValue(Opcode, V1->getAPIntValue(),
V2->getAPIntValue());
if (!Folded.second)
return SDValue();
Outputs.push_back(getConstant(Folded.first, DL, SVT));
}
assert(VT.getVectorNumElements() == Outputs.size() &&
"Vector size mismatch!");
// We may have a vector type but a scalar result. Create a splat.
Outputs.resize(VT.getVectorNumElements(), Outputs.back());
// Build a big vector out of the scalar elements we generated.
return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
SDValue N2, const SDNodeFlags *Flags) {
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
switch (Opcode) {
default: break;
case ISD::TokenFactor:
assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
N2.getValueType() == MVT::Other && "Invalid token factor!");
// Fold trivial token factors.
if (N1.getOpcode() == ISD::EntryToken) return N2;
if (N2.getOpcode() == ISD::EntryToken) return N1;
if (N1 == N2) return N1;
break;
case ISD::CONCAT_VECTORS:
// Concat of UNDEFs is UNDEF.
if (N1.getOpcode() == ISD::UNDEF &&
N2.getOpcode() == ISD::UNDEF)
return getUNDEF(VT);
// A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
// one big BUILD_VECTOR.
if (N1.getOpcode() == ISD::BUILD_VECTOR &&
N2.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
N1.getNode()->op_end());
Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
// BUILD_VECTOR requires all inputs to be of the same type, find the
// maximum type and extend them all.
EVT SVT = VT.getScalarType();
for (SDValue Op : Elts)
SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
if (SVT.bitsGT(VT.getScalarType()))
for (SDValue &Op : Elts)
Op = TLI->isZExtFree(Op.getValueType(), SVT)
? getZExtOrTrunc(Op, DL, SVT)
: getSExtOrTrunc(Op, DL, SVT);
return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
}
break;
case ISD::AND:
assert(VT.isInteger() && "This operator does not apply to FP types!");
assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
// (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
// worth handling here.
if (N2C && N2C->isNullValue())
return N2;
if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
return N1;
break;
case ISD::OR:
case ISD::XOR:
case ISD::ADD:
case ISD::SUB:
assert(VT.isInteger() && "This operator does not apply to FP types!");
assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
// (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
// it's worth handling here.
if (N2C && N2C->isNullValue())
return N1;
break;
case ISD::UDIV:
case ISD::UREM:
case ISD::MULHU:
case ISD::MULHS:
case ISD::MUL:
case ISD::SDIV:
case ISD::SREM:
assert(VT.isInteger() && "This operator does not apply to FP types!");
assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
break;
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
if (getTarget().Options.UnsafeFPMath) {
if (Opcode == ISD::FADD) {
// 0+x --> x
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
if (CFP->getValueAPF().isZero())
return N2;
// x+0 --> x
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
if (CFP->getValueAPF().isZero())
return N1;
} else if (Opcode == ISD::FSUB) {
// x-0 --> x
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
if (CFP->getValueAPF().isZero())
return N1;
} else if (Opcode == ISD::FMUL) {
ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
SDValue V = N2;
// If the first operand isn't the constant, try the second
if (!CFP) {
CFP = dyn_cast<ConstantFPSDNode>(N2);
V = N1;
}
if (CFP) {
// 0*x --> 0
if (CFP->isZero())
return SDValue(CFP,0);
// 1*x --> x
if (CFP->isExactlyValue(1.0))
return V;
}
}
}
assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
break;
case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
assert(N1.getValueType() == VT &&
N1.getValueType().isFloatingPoint() &&
N2.getValueType().isFloatingPoint() &&
"Invalid FCOPYSIGN!");
break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR:
assert(VT == N1.getValueType() &&
"Shift operators return type must be the same as their first arg");
assert(VT.isInteger() && N2.getValueType().isInteger() &&
"Shifts only work on integers");
assert((!VT.isVector() || VT == N2.getValueType()) &&
"Vector shift amounts must be in the same as their first arg");
// Verify that the shift amount VT is bit enough to hold valid shift
// amounts. This catches things like trying to shift an i1024 value by an
// i8, which is easy to fall into in generic code that uses
// TLI.getShiftAmount().
assert(N2.getValueType().getSizeInBits() >=
Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
"Invalid use of small shift amount with oversized value!");
// Always fold shifts of i1 values so the code generator doesn't need to
// handle them. Since we know the size of the shift has to be less than the
// size of the value, the shift/rotate count is guaranteed to be zero.
if (VT == MVT::i1)
return N1;
if (N2C && N2C->isNullValue())
return N1;
break;
case ISD::FP_ROUND_INREG: {
EVT EVT = cast<VTSDNode>(N2)->getVT();
assert(VT == N1.getValueType() && "Not an inreg round!");
assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
"Cannot FP_ROUND_INREG integer types");
assert(EVT.isVector() == VT.isVector() &&
"FP_ROUND_INREG type should be vector iff the operand "
"type is vector!");
assert((!EVT.isVector() ||
EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
"Vector element counts must match in FP_ROUND_INREG");
assert(EVT.bitsLE(VT) && "Not rounding down!");
(void)EVT;
if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
break;
}
case ISD::FP_ROUND:
assert(VT.isFloatingPoint() &&
N1.getValueType().isFloatingPoint() &&
VT.bitsLE(N1.getValueType()) &&
isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
if (N1.getValueType() == VT) return N1; // noop conversion.
break;
case ISD::AssertSext:
case ISD::AssertZext: {
EVT EVT = cast<VTSDNode>(N2)->getVT();
assert(VT == N1.getValueType() && "Not an inreg extend!");
assert(VT.isInteger() && EVT.isInteger() &&
"Cannot *_EXTEND_INREG FP types");
assert(!EVT.isVector() &&
"AssertSExt/AssertZExt type should be the vector element type "
"rather than the vector type!");
assert(EVT.bitsLE(VT) && "Not extending!");
if (VT == EVT) return N1; // noop assertion.
break;
}
case ISD::SIGN_EXTEND_INREG: {
EVT EVT = cast<VTSDNode>(N2)->getVT();
assert(VT == N1.getValueType() && "Not an inreg extend!");
assert(VT.isInteger() && EVT.isInteger() &&
"Cannot *_EXTEND_INREG FP types");
assert(EVT.isVector() == VT.isVector() &&
"SIGN_EXTEND_INREG type should be vector iff the operand "
"type is vector!");
assert((!EVT.isVector() ||
EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
"Vector element counts must match in SIGN_EXTEND_INREG");
assert(EVT.bitsLE(VT) && "Not extending!");
if (EVT == VT) return N1; // Not actually extending
auto SignExtendInReg = [&](APInt Val) {
unsigned FromBits = EVT.getScalarType().getSizeInBits();
Val <<= Val.getBitWidth() - FromBits;
Val = Val.ashr(Val.getBitWidth() - FromBits);
return getConstant(Val, DL, VT.getScalarType());
};
if (N1C) {
APInt Val = N1C->getAPIntValue();
return SignExtendInReg(Val);
}
if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
SmallVector<SDValue, 8> Ops;
for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
SDValue Op = N1.getOperand(i);
if (Op.getValueType() != VT.getScalarType()) break;
if (Op.getOpcode() == ISD::UNDEF) {
Ops.push_back(Op);
continue;
}
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
APInt Val = C->getAPIntValue();
Ops.push_back(SignExtendInReg(Val));
continue;
}
break;
}
if (Ops.size() == VT.getVectorNumElements())
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
break;
}
case ISD::EXTRACT_VECTOR_ELT:
// EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
if (N1.getOpcode() == ISD::UNDEF)
return getUNDEF(VT);
// EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements())
return getUNDEF(VT);
// EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
// expanding copies of large vectors from registers.
if (N2C &&
N1.getOpcode() == ISD::CONCAT_VECTORS &&
N1.getNumOperands() > 0) {
unsigned Factor =
N1.getOperand(0).getValueType().getVectorNumElements();
return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
N1.getOperand(N2C->getZExtValue() / Factor),
getConstant(N2C->getZExtValue() % Factor, DL,
N2.getValueType()));
}
// EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
// expanding large vector constants.
if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
SDValue Elt = N1.getOperand(N2C->getZExtValue());
if (VT != Elt.getValueType())
// If the vector element type is not legal, the BUILD_VECTOR operands
// are promoted and implicitly truncated, and the result implicitly
// extended. Make that explicit here.
Elt = getAnyExtOrTrunc(Elt, DL, VT);
return Elt;
}
// EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
// operations are lowered to scalars.
if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
// If the indices are the same, return the inserted element else
// if the indices are known different, extract the element from
// the original vector.
SDValue N1Op2 = N1.getOperand(2);
ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
if (N1Op2C && N2C) {
if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
if (VT == N1.getOperand(1).getValueType())
return N1.getOperand(1);
else
return getSExtOrTrunc(N1.getOperand(1), DL, VT);
}
return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
}
}
break;
case ISD::EXTRACT_ELEMENT:
assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
assert(!N1.getValueType().isVector() && !VT.isVector() &&
(N1.getValueType().isInteger() == VT.isInteger()) &&
N1.getValueType() != VT &&
"Wrong types for EXTRACT_ELEMENT!");
// EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
// 64-bit integers into 32-bit parts. Instead of building the extract of
// the BUILD_PAIR, only to have legalize rip it apart, just do it now.
if (N1.getOpcode() == ISD::BUILD_PAIR)
return N1.getOperand(N2C->getZExtValue());
// EXTRACT_ELEMENT of a constant int is also very common.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
unsigned ElementSize = VT.getSizeInBits();
unsigned Shift = ElementSize * N2C->getZExtValue();
APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
}
break;
case ISD::EXTRACT_SUBVECTOR: {
SDValue Index = N2;
if (VT.isSimple() && N1.getValueType().isSimple()) {
assert(VT.isVector() && N1.getValueType().isVector() &&
"Extract subvector VTs must be a vectors!");
assert(VT.getVectorElementType() ==
N1.getValueType().getVectorElementType() &&
"Extract subvector VTs must have the same element type!");
assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
"Extract subvector must be from larger vector to smaller vector!");
if (isa<ConstantSDNode>(Index)) {
assert((VT.getVectorNumElements() +
cast<ConstantSDNode>(Index)->getZExtValue()
<= N1.getValueType().getVectorNumElements())
&& "Extract subvector overflow!");
}
// Trivial extraction.
if (VT.getSimpleVT() == N1.getSimpleValueType())
return N1;
}
break;
}
}
// Perform trivial constant folding.
if (SDValue SV =
FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
return SV;
// Canonicalize constant to RHS if commutative.
if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
std::swap(N1C, N2C);
std::swap(N1, N2);
}
// Constant fold FP operations.
bool HasFPExceptions = TLI->hasFloatingPointExceptions();
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
if (N1CFP) {
if (!N2CFP && isCommutativeBinOp(Opcode)) {
// Canonicalize constant to RHS if commutative.
std::swap(N1CFP, N2CFP);
std::swap(N1, N2);
} else if (N2CFP) {
APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
APFloat::opStatus s;
switch (Opcode) {
case ISD::FADD:
s = V1.add(V2, APFloat::rmNearestTiesToEven);
if (!HasFPExceptions || s != APFloat::opInvalidOp)
return getConstantFP(V1, DL, VT);
break;
case ISD::FSUB:
s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
if (!HasFPExceptions || s!=APFloat::opInvalidOp)
return getConstantFP(V1, DL, VT);
break;
case ISD::FMUL:
s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
if (!HasFPExceptions || s!=APFloat::opInvalidOp)
return getConstantFP(V1, DL, VT);
break;
case ISD::FDIV:
s = V1.divide(V2, APFloat::rmNearestTiesToEven);
if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
s!=APFloat::opDivByZero)) {
return getConstantFP(V1, DL, VT);
}
break;
case ISD::FREM :
s = V1.mod(V2, APFloat::rmNearestTiesToEven);
if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
s!=APFloat::opDivByZero)) {
return getConstantFP(V1, DL, VT);
}
break;
case ISD::FCOPYSIGN:
V1.copySign(V2);
return getConstantFP(V1, DL, VT);
default: break;
}
}
if (Opcode == ISD::FP_ROUND) {
APFloat V = N1CFP->getValueAPF(); // make copy
bool ignored;
// This can return overflow, underflow, or inexact; we don't care.
// FIXME need to be more flexible about rounding mode.
(void)V.convert(EVTToAPFloatSemantics(VT),
APFloat::rmNearestTiesToEven, &ignored);
return getConstantFP(V, DL, VT);
}
}
// Canonicalize an UNDEF to the RHS, even over a constant.
if (N1.getOpcode() == ISD::UNDEF) {
if (isCommutativeBinOp(Opcode)) {
std::swap(N1, N2);
} else {
switch (Opcode) {
case ISD::FP_ROUND_INREG:
case ISD::SIGN_EXTEND_INREG:
case ISD::SUB:
case ISD::FSUB:
case ISD::FDIV:
case ISD::FREM:
case ISD::SRA:
return N1; // fold op(undef, arg2) -> undef
case ISD::UDIV:
case ISD::SDIV:
case ISD::UREM:
case ISD::SREM:
case ISD::SRL:
case ISD::SHL:
if (!VT.isVector())
return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
// For vectors, we can't easily build an all zero vector, just return
// the LHS.
return N2;
}
}
}
// Fold a bunch of operators when the RHS is undef.
if (N2.getOpcode() == ISD::UNDEF) {
switch (Opcode) {
case ISD::XOR:
if (N1.getOpcode() == ISD::UNDEF)
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return getConstant(0, DL, VT);
// fallthrough
case ISD::ADD:
case ISD::ADDC:
case ISD::ADDE:
case ISD::SUB:
case ISD::UDIV:
case ISD::SDIV:
case ISD::UREM:
case ISD::SREM:
return N2; // fold op(arg1, undef) -> undef
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
if (getTarget().Options.UnsafeFPMath)
return N2;
break;
case ISD::MUL:
case ISD::AND:
case ISD::SRL:
case ISD::SHL:
if (!VT.isVector())
return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
// For vectors, we can't easily build an all zero vector, just return
// the LHS.
return N1;
case ISD::OR:
if (!VT.isVector())
return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
// For vectors, we can't easily build an all one vector, just return
// the LHS.
return N1;
case ISD::SRA:
return N1;
}
}
// Memoize this node if possible.
BinarySDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != MVT::Glue) {
SDValue Ops[] = {N1, N2};
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops);
AddNodeIDFlags(ID, Opcode, Flags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
CSEMap.InsertNode(N, IP);
} else {
N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
}
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3) {
// Perform various simplifications.
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
switch (Opcode) {
case ISD::FMA: {
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
if (N1CFP && N2CFP && N3CFP) {
APFloat V1 = N1CFP->getValueAPF();
const APFloat &V2 = N2CFP->getValueAPF();
const APFloat &V3 = N3CFP->getValueAPF();
APFloat::opStatus s =
V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
return getConstantFP(V1, DL, VT);
}
break;
}
case ISD::CONCAT_VECTORS:
// A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
// one big BUILD_VECTOR.
if (N1.getOpcode() == ISD::BUILD_VECTOR &&
N2.getOpcode() == ISD::BUILD_VECTOR &&
N3.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
N1.getNode()->op_end());
Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
}
break;
case ISD::SETCC: {
// Use FoldSetCC to simplify SETCC's.
SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
if (Simp.getNode()) return Simp;
break;
}
case ISD::SELECT:
if (N1C) {
if (N1C->getZExtValue())
return N2; // select true, X, Y -> X
return N3; // select false, X, Y -> Y
}
if (N2 == N3) return N2; // select C, X, X -> X
break;
case ISD::VECTOR_SHUFFLE:
llvm_unreachable("should use getVectorShuffle constructor!");
case ISD::INSERT_SUBVECTOR: {
SDValue Index = N3;
if (VT.isSimple() && N1.getValueType().isSimple()
&& N2.getValueType().isSimple()) {
assert(VT.isVector() && N1.getValueType().isVector() &&
N2.getValueType().isVector() &&
"Insert subvector VTs must be a vectors");
assert(VT == N1.getValueType() &&
"Dest and insert subvector source types must match!");
assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
"Insert subvector must be from smaller vector to larger vector!");
if (isa<ConstantSDNode>(Index)) {
assert((N2.getValueType().getVectorNumElements() +
cast<ConstantSDNode>(Index)->getZExtValue()
<= VT.getVectorNumElements())
&& "Insert subvector overflow!");
}
// Trivial insertion.
if (VT.getSimpleVT() == N2.getSimpleValueType())
return N2;
}
break;
}
case ISD::BITCAST:
// Fold bit_convert nodes from a type to themselves.
if (N1.getValueType() == VT)
return N1;
break;
}
// Memoize node if it doesn't produce a flag.
SDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != MVT::Glue) {
SDValue Ops[] = { N1, N2, N3 };
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTs, N1, N2, N3);
CSEMap.InsertNode(N, IP);
} else {
N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTs, N1, N2, N3);
}
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4) {
SDValue Ops[] = { N1, N2, N3, N4 };
return getNode(Opcode, DL, VT, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4, SDValue N5) {
SDValue Ops[] = { N1, N2, N3, N4, N5 };
return getNode(Opcode, DL, VT, Ops);
}
/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
/// the incoming stack arguments to be loaded from the stack.
SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
SmallVector<SDValue, 8> ArgChains;
// Include the original chain at the beginning of the list. When this is
// used by target LowerCall hooks, this helps legalize find the
// CALLSEQ_BEGIN node.
ArgChains.push_back(Chain);
// Add a chain value for each stack argument.
for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
if (FI->getIndex() < 0)
ArgChains.push_back(SDValue(L, 1));
// Build a tokenfactor for all the chains.
return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
}
/// getMemsetValue - Vectorized representation of the memset value
/// operand.
static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
SDLoc dl) {
assert(Value.getOpcode() != ISD::UNDEF);
unsigned NumBits = VT.getScalarType().getSizeInBits();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
assert(C->getAPIntValue().getBitWidth() == 8);
APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
if (VT.isInteger())
return DAG.getConstant(Val, dl, VT);
return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
VT);
}
assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
EVT IntVT = VT.getScalarType();
if (!IntVT.isInteger())
IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
if (NumBits > 8) {
// Use a multiplication with 0x010101... to extend the input to the
// required length.
APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
DAG.getConstant(Magic, dl, IntVT));
}
if (VT != Value.getValueType() && !VT.isInteger())
Value = DAG.getNode(ISD::BITCAST, dl, VT.getScalarType(), Value);
if (VT != Value.getValueType()) {
assert(VT.getVectorElementType() == Value.getValueType() &&
"value type should be one vector element here");
SmallVector<SDValue, 8> BVOps(VT.getVectorNumElements(), Value);
Value = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BVOps);
}
return Value;
}
/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
/// used when a memcpy is turned into a memset when the source is a constant
/// string ptr.
static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
const TargetLowering &TLI, StringRef Str) {
// Handle vector with all elements zero.
if (Str.empty()) {
if (VT.isInteger())
return DAG.getConstant(0, dl, VT);
else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
return DAG.getConstantFP(0.0, dl, VT);
else if (VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements();
MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getConstant(0, dl,
EVT::getVectorVT(*DAG.getContext(),
EltVT, NumElts)));
} else
llvm_unreachable("Expected type!");
}
assert(!VT.isVector() && "Can't handle vector type here!");
unsigned NumVTBits = VT.getSizeInBits();
unsigned NumVTBytes = NumVTBits / 8;
unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
APInt Val(NumVTBits, 0);
if (DAG.getDataLayout().isLittleEndian()) {
for (unsigned i = 0; i != NumBytes; ++i)
Val |= (uint64_t)(unsigned char)Str[i] << i*8;
} else {
for (unsigned i = 0; i != NumBytes; ++i)
Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
}
// If the "cost" of materializing the integer immediate is less than the cost
// of a load, then it is cost effective to turn the load into the immediate.
Type *Ty = VT.getTypeForEVT(*DAG.getContext());
if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
return DAG.getConstant(Val, dl, VT);
return SDValue(nullptr, 0);
}
/// getMemBasePlusOffset - Returns base and offset node for the
///
static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
SelectionDAG &DAG) {
EVT VT = Base.getValueType();
return DAG.getNode(ISD::ADD, dl,
VT, Base, DAG.getConstant(Offset, dl, VT));
}
/// isMemSrcFromString - Returns true if memcpy source is a string constant.
///
static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
unsigned SrcDelta = 0;
GlobalAddressSDNode *G = nullptr;
if (Src.getOpcode() == ISD::GlobalAddress)
G = cast<GlobalAddressSDNode>(Src);
else if (Src.getOpcode() == ISD::ADD &&
Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
Src.getOperand(1).getOpcode() == ISD::Constant) {
G = cast<GlobalAddressSDNode>(Src.getOperand(0));
SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
}
if (!G)
return false;
return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
}
/// Determines the optimal series of memory ops to replace the memset / memcpy.
/// Return true if the number of memory ops is below the threshold (Limit).
/// It returns the types of the sequence of memory ops to perform
/// memset / memcpy by reference.
static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
unsigned Limit, uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
bool IsMemset,
bool ZeroMemset,
bool MemcpyStrSrc,
bool AllowOverlap,
SelectionDAG &DAG,
const TargetLowering &TLI) {
assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
"Expecting memcpy / memset source to meet alignment requirement!");
// If 'SrcAlign' is zero, that means the memory operation does not need to
// load the value, i.e. memset or memcpy from constant string. Otherwise,
// it's the inferred alignment of the source. 'DstAlign', on the other hand,
// is the specified alignment of the memory operation. If it is zero, that
// means it's possible to change the alignment of the destination.
// 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
// not need to be loaded.
EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
IsMemset, ZeroMemset, MemcpyStrSrc,
DAG.getMachineFunction());
if (VT == MVT::Other) {
unsigned AS = 0;
if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) ||
TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
VT = TLI.getPointerTy(DAG.getDataLayout());
} else {
switch (DstAlign & 7) {
case 0: VT = MVT::i64; break;
case 4: VT = MVT::i32; break;
case 2: VT = MVT::i16; break;
default: VT = MVT::i8; break;
}
}
MVT LVT = MVT::i64;
while (!TLI.isTypeLegal(LVT))
LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
assert(LVT.isInteger());
if (VT.bitsGT(LVT))
VT = LVT;
}
unsigned NumMemOps = 0;
while (Size != 0) {
unsigned VTSize = VT.getSizeInBits() / 8;
while (VTSize > Size) {
// For now, only use non-vector load / store's for the left-over pieces.
EVT NewVT = VT;
unsigned NewVTSize;
bool Found = false;
if (VT.isVector() || VT.isFloatingPoint()) {
NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
TLI.isSafeMemOpType(NewVT.getSimpleVT()))
Found = true;
else if (NewVT == MVT::i64 &&
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
TLI.isSafeMemOpType(MVT::f64)) {
// i64 is usually not legal on 32-bit targets, but f64 may be.
NewVT = MVT::f64;
Found = true;
}
}
if (!Found) {
do {
NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
if (NewVT == MVT::i8)
break;
} while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
}
NewVTSize = NewVT.getSizeInBits() / 8;
// If the new VT cannot cover all of the remaining bits, then consider
// issuing a (or a pair of) unaligned and overlapping load / store.
// FIXME: Only does this for 64-bit or more since we don't have proper
// cost model for unaligned load / store.
bool Fast;
unsigned AS = 0;
if (NumMemOps && AllowOverlap &&
VTSize >= 8 && NewVTSize < Size &&
TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast)
VTSize = Size;
else {
VT = NewVT;
VTSize = NewVTSize;
}
}
if (++NumMemOps > Limit)
return false;
MemOps.push_back(VT);
Size -= VTSize;
}
return true;
}
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
unsigned Align, bool isVol,
bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
// Turn a memcpy of undef to nop.
if (Src.getOpcode() == ISD::UNDEF)
return Chain;
// Expand memcpy to a series of load and store ops if the size operand falls
// below a certain threshold.
// TODO: In the AlwaysInline case, if the size is big then generate a loop
// rather than maybe a humongous number of loads and stores.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
bool DstAlignCanChange = false;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
if (Align > SrcAlign)
SrcAlign = Align;
StringRef Str;
bool CopyFromStr = isMemSrcFromString(Src, Str);
bool isZeroStr = CopyFromStr && Str.empty();
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
(isZeroStr ? 0 : SrcAlign),
false, false, CopyFromStr, true, DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
// Don't promote to an alignment that would require dynamic stack
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
while (NewAlign > Align &&
DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign))
NewAlign /= 2;
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
MFI->setObjectAlignment(FI->getIndex(), NewAlign);
Align = NewAlign;
}
}
SmallVector<SDValue, 8> OutChains;
unsigned NumMemOps = MemOps.size();
uint64_t SrcOff = 0, DstOff = 0;
for (unsigned i = 0; i != NumMemOps; ++i) {
EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value, Store;
if (VTSize > Size) {
// Issuing an unaligned load / store pair that overlaps with the previous
// pair. Adjust the offset accordingly.
assert(i == NumMemOps-1 && i != 0);
SrcOff -= VTSize - Size;
DstOff -= VTSize - Size;
}
if (CopyFromStr &&
(isZeroStr || (VT.isInteger() && !VT.isVector()))) {
// It's unlikely a store of a vector immediate can be done in a single
// instruction. It would require a load from a constantpool first.
// We only handle zero vectors here.
// FIXME: Handle other cases where store of vector immediate is done in
// a single instruction.
Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
if (Value.getNode())
Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, dl, DAG),
DstPtrInfo.getWithOffset(DstOff), isVol,
false, Align);
}
if (!Store.getNode()) {
// The type might not be legal for the target. This should only happen
// if the type is smaller than a legal type, as on PPC, so the right
// thing to do is generate a LoadExt/StoreTrunc pair. These simplify
// to Load/Store if NVT==VT.
// FIXME does the case above also need this?
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
assert(NVT.bitsGE(VT));
Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
getMemBasePlusOffset(Src, SrcOff, dl, DAG),
SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
false, MinAlign(SrcAlign, SrcOff));
Store = DAG.getTruncStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, dl, DAG),
DstPtrInfo.getWithOffset(DstOff), VT, isVol,
false, Align);
}
OutChains.push_back(Store);
SrcOff += VTSize;
DstOff += VTSize;
Size -= VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
unsigned Align, bool isVol,
bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
// Turn a memmove of undef to nop.
if (Src.getOpcode() == ISD::UNDEF)
return Chain;
// Expand memmove to a series of load and store ops if the size operand falls
// below a certain threshold.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
bool DstAlignCanChange = false;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
if (Align > SrcAlign)
SrcAlign = Align;
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align), SrcAlign,
false, false, false, false, DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
MFI->setObjectAlignment(FI->getIndex(), NewAlign);
Align = NewAlign;
}
}
uint64_t SrcOff = 0, DstOff = 0;
SmallVector<SDValue, 8> LoadValues;
SmallVector<SDValue, 8> LoadChains;
SmallVector<SDValue, 8> OutChains;
unsigned NumMemOps = MemOps.size();
for (unsigned i = 0; i < NumMemOps; i++) {
EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value;
Value = DAG.getLoad(VT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, dl, DAG),
SrcPtrInfo.getWithOffset(SrcOff), isVol,
false, false, SrcAlign);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
}
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
OutChains.clear();
for (unsigned i = 0; i < NumMemOps; i++) {
EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Store;
Store = DAG.getStore(Chain, dl, LoadValues[i],
getMemBasePlusOffset(Dst, DstOff, dl, DAG),
DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
OutChains.push_back(Store);
DstOff += VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
/// \brief Lower the call to 'memset' intrinsic function into a series of store
/// operations.
///
/// \param DAG Selection DAG where lowered code is placed.
/// \param dl Link to corresponding IR location.
/// \param Chain Control flow dependency.
/// \param Dst Pointer to destination memory location.
/// \param Src Value of byte to write into the memory.
/// \param Size Number of bytes to write.
/// \param Align Alignment of the destination in bytes.
/// \param isVol True if destination is volatile.
/// \param DstPtrInfo IR information on the memory pointer.
/// \returns New head in the control flow, if lowering was successful, empty
/// SDValue otherwise.
///
/// The function tries to replace 'llvm.memset' intrinsic with several store
/// operations and value calculation code. This is usually profitable for small
/// memory size.
static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
unsigned Align, bool isVol,
MachinePointerInfo DstPtrInfo) {
// Turn a memset of undef to nop.
if (Src.getOpcode() == ISD::UNDEF)
return Chain;
// Expand memset to a series of load/store ops if the size operand
// falls below a certain threshold.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
bool DstAlignCanChange = false;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
bool IsZeroVal =
isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
Size, (DstAlignCanChange ? 0 : Align), 0,
true, IsZeroVal, false, true, DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
MFI->setObjectAlignment(FI->getIndex(), NewAlign);
Align = NewAlign;
}
}
SmallVector<SDValue, 8> OutChains;
uint64_t DstOff = 0;
unsigned NumMemOps = MemOps.size();
// Find the largest store and generate the bit pattern for it.
EVT LargestVT = MemOps[0];
for (unsigned i = 1; i < NumMemOps; i++)
if (MemOps[i].bitsGT(LargestVT))
LargestVT = MemOps[i];
SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
for (unsigned i = 0; i < NumMemOps; i++) {
EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
if (VTSize > Size) {
// Issuing an unaligned load / store pair that overlaps with the previous
// pair. Adjust the offset accordingly.
assert(i == NumMemOps-1 && i != 0);
DstOff -= VTSize - Size;
}
// If this store is smaller than the largest store see whether we can get
// the smaller value for free with a truncate.
SDValue Value = MemSetValue;
if (VT.bitsLT(LargestVT)) {
if (!LargestVT.isVector() && !VT.isVector() &&
TLI.isTruncateFree(LargestVT, VT))
Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
else
Value = getMemsetValue(Src, VT, DAG, dl);
}
assert(Value.getValueType() == VT && "Value with wrong type.");
SDValue Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, dl, DAG),
DstPtrInfo.getWithOffset(DstOff),
isVol, false, Align);
OutChains.push_back(Store);
DstOff += VT.getSizeInBits() / 8;
Size -= VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVol, bool AlwaysInline,
bool isTailCall, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
// Check to see if we should lower the memcpy to loads and stores first.
// For cases within the target-specified limits, this is the best choice.
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
if (ConstantSize) {
// Memcpy with size zero? Just return the original chain.
if (ConstantSize->isNullValue())
return Chain;
SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(),Align,
isVol, false, DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
}
// Then check to see if we should lower the memcpy with target-specific
// code. If the target chooses to do this, this is the next best.
if (TSI) {
SDValue Result = TSI->EmitTargetCodeForMemcpy(
*this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
}
// If we really need inline code and the target declined to provide it,
// use a (potentially long) sequence of loads and stores.
if (AlwaysInline) {
assert(ConstantSize && "AlwaysInline requires a constant size!");
return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(), Align, isVol,
true, DstPtrInfo, SrcPtrInfo);
}
// FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
// memcpy is not guaranteed to be safe. libc memcpys aren't required to
// respect volatile, so they may do things like read or write memory
// beyond the given memory regions. But fixing this isn't easy, and most
// people don't care.
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = getDataLayout().getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in SDLoc
TargetLowering::CallLoweringInfo CLI(*this);
CLI.setDebugLoc(dl)
.setChain(Chain)
.setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
Type::getVoidTy(*getContext()),
getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
TLI->getPointerTy(getDataLayout())),
std::move(Args), 0)
.setDiscardResult()
.setTailCall(isTailCall);
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
// Check to see if we should lower the memmove to loads and stores first.
// For cases within the target-specified limits, this is the best choice.
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
if (ConstantSize) {
// Memmove with size zero? Just return the original chain.
if (ConstantSize->isNullValue())
return Chain;
SDValue Result =
getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(), Align, isVol,
false, DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
}
// Then check to see if we should lower the memmove with target-specific
// code. If the target chooses to do this, this is the next best.
if (TSI) {
SDValue Result = TSI->EmitTargetCodeForMemmove(
*this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
}
// FIXME: If the memmove is volatile, lowering it to plain libc memmove may
// not be safe. See memcpy above for more details.
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = getDataLayout().getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in SDLoc
TargetLowering::CallLoweringInfo CLI(*this);
CLI.setDebugLoc(dl)
.setChain(Chain)
.setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
Type::getVoidTy(*getContext()),
getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
TLI->getPointerTy(getDataLayout())),
std::move(Args), 0)
.setDiscardResult()
.setTailCall(isTailCall);
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
// Check to see if we should lower the memset to stores first.
// For cases within the target-specified limits, this is the best choice.
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
if (ConstantSize) {
// Memset with size zero? Just return the original chain.
if (ConstantSize->isNullValue())
return Chain;
SDValue Result =
getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
Align, isVol, DstPtrInfo);
if (Result.getNode())
return Result;
}
// Then check to see if we should lower the memset with target-specific
// code. If the target chooses to do this, this is the next best.
if (TSI) {
SDValue Result = TSI->EmitTargetCodeForMemset(
*this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
if (Result.getNode())
return Result;
}
// Emit a library call.
Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
Args.push_back(Entry);
Entry.Node = Src;
Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
Args.push_back(Entry);
Entry.Node = Size;
Entry.Ty = IntPtrTy;
Args.push_back(Entry);
// FIXME: pass in SDLoc
TargetLowering::CallLoweringInfo CLI(*this);
CLI.setDebugLoc(dl)
.setChain(Chain)
.setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
Type::getVoidTy(*getContext()),
getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
TLI->getPointerTy(getDataLayout())),
std::move(Args), 0)
.setDiscardResult()
.setTailCall(isTailCall);
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDVTList VTList, ArrayRef<SDValue> Ops,
MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
FoldingSetNodeID ID;
ID.AddInteger(MemVT.getRawBits());
AddNodeIDNode(ID, Opcode, VTList, Ops);
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void* IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
// Allocate the operands array for the node out of the BumpPtrAllocator, since
// SDNode doesn't have access to it. This memory will be "leaked" when
// the node is deallocated, but recovered when the allocator is released.
// If the number of operands is less than 5 we use AtomicSDNode's internal
// storage.
unsigned NumOps = Ops.size();
SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps)
: nullptr;
SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
dl.getDebugLoc(), VTList, MemVT,
Ops.data(), DynOps, NumOps, MMO,
SuccessOrdering, FailureOrdering,
SynchScope);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDVTList VTList, ArrayRef<SDValue> Ops,
MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope) {
return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering,
Ordering, SynchScope);
}
SDValue SelectionDAG::getAtomicCmpSwap(
unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain,
SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment, AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
unsigned Flags = MachineMemOperand::MOVolatile;
Flags |= MachineMemOperand::MOLoad;
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO,
SuccessOrdering, FailureOrdering, SynchScope);
}
SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT,
SDVTList VTs, SDValue Chain, SDValue Ptr,
SDValue Cmp, SDValue Swp,
MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO,
SuccessOrdering, FailureOrdering, SynchScope);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDValue Chain,
SDValue Ptr, SDValue Val,
const Value* PtrVal,
unsigned Alignment,
AtomicOrdering Ordering,
SynchronizationScope SynchScope) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
// An atomic store does not load. An atomic load does not store.
// (An atomicrmw obviously both loads and stores.)
// For now, atomics are considered to be volatile always, and they are
// chained as such.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
unsigned Flags = MachineMemOperand::MOVolatile;
if (Opcode != ISD::ATOMIC_STORE)
Flags |= MachineMemOperand::MOLoad;
if (Opcode != ISD::ATOMIC_LOAD)
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
MemVT.getStoreSize(), Alignment);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
Ordering, SynchScope);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDValue Chain,
SDValue Ptr, SDValue Val,
MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope) {
assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
Opcode == ISD::ATOMIC_LOAD_SUB ||
Opcode == ISD::ATOMIC_LOAD_AND ||
Opcode == ISD::ATOMIC_LOAD_OR ||
Opcode == ISD::ATOMIC_LOAD_XOR ||
Opcode == ISD::ATOMIC_LOAD_NAND ||
Opcode == ISD::ATOMIC_LOAD_MIN ||
Opcode == ISD::ATOMIC_LOAD_MAX ||
Opcode == ISD::ATOMIC_LOAD_UMIN ||
Opcode == ISD::ATOMIC_LOAD_UMAX ||
Opcode == ISD::ATOMIC_SWAP ||
Opcode == ISD::ATOMIC_STORE) &&
"Invalid Atomic Op");
EVT VT = Val.getValueType();
SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
getVTList(VT, MVT::Other);
SDValue Ops[] = {Chain, Ptr, Val};
return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
EVT VT, SDValue Chain,
SDValue Ptr,
MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope) {
assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = {Chain, Ptr};
return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
}
/// getMergeValues - Create a MERGE_VALUES node from the given operands.
SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
if (Ops.size() == 1)
return Ops[0];
SmallVector<EVT, 4> VTs;
VTs.reserve(Ops.size());
for (unsigned i = 0; i < Ops.size(); ++i)
VTs.push_back(Ops[i].getValueType());
return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
}
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align, bool Vol,
bool ReadMem, bool WriteMem, unsigned Size) {
if (Align == 0) // Ensure that codegen never sees alignment 0
Align = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
unsigned Flags = 0;
if (WriteMem)
Flags |= MachineMemOperand::MOStore;
if (ReadMem)
Flags |= MachineMemOperand::MOLoad;
if (Vol)
Flags |= MachineMemOperand::MOVolatile;
if (!Size)
Size = MemVT.getStoreSize();
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
}
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
ArrayRef<SDValue> Ops, EVT MemVT,
MachineMemOperand *MMO) {
assert((Opcode == ISD::INTRINSIC_VOID ||
Opcode == ISD::INTRINSIC_W_CHAIN ||
Opcode == ISD::PREFETCH ||
Opcode == ISD::LIFETIME_START ||
Opcode == ISD::LIFETIME_END ||
(Opcode <= INT_MAX &&
(int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
"Opcode is not a memory-accessing opcode!");
// Memoize the node unless it returns a flag.
MemIntrinsicSDNode *N;
if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops);
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
dl.getDebugLoc(), VTList, Ops,
MemVT, MMO);
CSEMap.InsertNode(N, IP);
} else {
N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
dl.getDebugLoc(), VTList, Ops,
MemVT, MMO);
}
InsertNode(N);
return SDValue(N, 0);
}
/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
/// MachinePointerInfo record from it. This is particularly useful because the
/// code generator has many cases where it doesn't bother passing in a
/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
// If this is FI+Offset, we can model it.
if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
// If this is (FI+Offset1)+Offset2, we can model it.
if (Ptr.getOpcode() != ISD::ADD ||
!isa<ConstantSDNode>(Ptr.getOperand(1)) ||
!isa<FrameIndexSDNode>(Ptr.getOperand(0)))
return MachinePointerInfo();
int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
return MachinePointerInfo::getFixedStack(FI, Offset+
cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
}
/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
/// MachinePointerInfo record from it. This is particularly useful because the
/// code generator has many cases where it doesn't bother passing in a
/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
// If the 'Offset' value isn't a constant, we can't handle this.
if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
if (OffsetOp.getOpcode() == ISD::UNDEF)
return InferPointerInfo(Ptr);
return MachinePointerInfo();
}
SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, SDLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT,
bool isVolatile, bool isNonTemporal, bool isInvariant,
unsigned Alignment, const AAMDNodes &AAInfo,
const MDNode *Ranges) {
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
unsigned Flags = MachineMemOperand::MOLoad;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
if (isInvariant)
Flags |= MachineMemOperand::MOInvariant;
// If we don't have a PtrInfo, infer the trivial frame index case to simplify
// clients.
if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(Ptr, Offset);
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
AAInfo, Ranges);
return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
}
SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, SDLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset, EVT MemVT,
MachineMemOperand *MMO) {
if (VT == MemVT) {
ExtType = ISD::NON_EXTLOAD;
} else if (ExtType == ISD::NON_EXTLOAD) {
assert(VT == MemVT && "Non-extending load from different memory type!");
} else {
// Extending load.
assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
"Should only be an extending load, not truncating!");
assert(VT.isInteger() == MemVT.isInteger() &&
"Cannot convert from FP to Int or Int -> FP!");
assert(VT.isVector() == MemVT.isVector() &&
"Cannot use an ext load to convert to or from a vector!");
assert((!VT.isVector() ||
VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
"Cannot use an ext load to change the number of vector elements!");
}
bool Indexed = AM != ISD::UNINDEXED;
assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
"Unindexed load with an offset!");
SDVTList VTs = Indexed ?
getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, Ptr, Offset };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
ID.AddInteger(MemVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
MMO->isNonTemporal(),
MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<LoadSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
dl.getDebugLoc(), VTs, AM, ExtType,
MemVT, MMO);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo,
bool isVolatile, bool isNonTemporal,
bool isInvariant, unsigned Alignment,
const AAMDNodes &AAInfo,
const MDNode *Ranges) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
AAInfo, Ranges);
}
SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr,
MachineMemOperand *MMO) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
VT, MMO);
}
SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo, EVT MemVT,
bool isVolatile, bool isNonTemporal,
bool isInvariant, unsigned Alignment,
const AAMDNodes &AAInfo) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
PtrInfo, MemVT, isVolatile, isNonTemporal, isInvariant,
Alignment, AAInfo);
}
SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue Chain, SDValue Ptr, EVT MemVT,
MachineMemOperand *MMO) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
MemVT, MMO);
}
SDValue
SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM) {
LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
"Load is already a indexed load!");
return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
LD->getChain(), Base, Offset, LD->getPointerInfo(),
LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
false, LD->getAlignment());
}
SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Ptr, MachinePointerInfo PtrInfo,
bool isVolatile, bool isNonTemporal,
unsigned Alignment, const AAMDNodes &AAInfo) {
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(Val.getValueType());
unsigned Flags = MachineMemOperand::MOStore;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(Ptr);
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags,
Val.getValueType().getStoreSize(), Alignment,
AAInfo);
return getStore(Chain, dl, Val, Ptr, MMO);
}
SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Ptr, MachineMemOperand *MMO) {
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
EVT VT = Val.getValueType();
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
dl.getDebugLoc(), VTs,
ISD::UNINDEXED, false, VT, MMO);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Ptr, MachinePointerInfo PtrInfo,
EVT SVT,bool isVolatile, bool isNonTemporal,
unsigned Alignment,
const AAMDNodes &AAInfo) {
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(SVT);
unsigned Flags = MachineMemOperand::MOStore;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(Ptr);
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
AAInfo);
return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
}
SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Ptr, EVT SVT,
MachineMemOperand *MMO) {
EVT VT = Val.getValueType();
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (VT == SVT)
return getStore(Chain, dl, Val, Ptr, MMO);
assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
"Should only be a truncating store, not extending!");
assert(VT.isInteger() == SVT.isInteger() &&
"Can't do FP-INT conversion!");
assert(VT.isVector() == SVT.isVector() &&
"Cannot use trunc store to convert to or from a vector!");
assert((!VT.isVector() ||
VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
"Cannot use trunc store to change the number of vector elements!");
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
ID.AddInteger(SVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
dl.getDebugLoc(), VTs,
ISD::UNINDEXED, true, SVT, MMO);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue
SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM) {
StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
"Store is already a indexed store!");
SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
ID.AddInteger(ST->getMemoryVT().getRawBits());
ID.AddInteger(ST->getRawSubclassData());
ID.AddInteger(ST->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
return SDValue(E, 0);
SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
dl.getDebugLoc(), VTs, AM,
ST->isTruncatingStore(),
ST->getMemoryVT(),
ST->getMemOperand());
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue
SelectionDAG::getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain,
SDValue Ptr, SDValue Mask, SDValue Src0, EVT MemVT,
MachineMemOperand *MMO, ISD::LoadExtType ExtTy) {
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(ExtTy, ISD::UNINDEXED,
MMO->isVolatile(),
MMO->isNonTemporal(),
MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = new (NodeAllocator) MaskedLoadSDNode(dl.getIROrder(),
dl.getDebugLoc(), Ops, 4, VTs,
ExtTy, MemVT, MMO);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Ptr, SDValue Mask, EVT MemVT,
MachineMemOperand *MMO, bool isTrunc) {
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
EVT VT = Val.getValueType();
SDVTList VTs = getVTList(MVT::Other);
SDValue Ops[] = { Chain, Ptr, Mask, Val };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = new (NodeAllocator) MaskedStoreSDNode(dl.getIROrder(),
dl.getDebugLoc(), Ops, 4,
VTs, isTrunc, MemVT, MMO);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue
SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, SDLoc dl,
ArrayRef<SDValue> Ops,
MachineMemOperand *MMO) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(ISD::NON_EXTLOAD, ISD::UNINDEXED,
MMO->isVolatile(),
MMO->isNonTemporal(),
MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
MaskedGatherSDNode *N =
new (NodeAllocator) MaskedGatherSDNode(dl.getIROrder(), dl.getDebugLoc(),
Ops, VTs, VT, MMO);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, SDLoc dl,
ArrayRef<SDValue> Ops,
MachineMemOperand *MMO) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(),
MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N =
new (NodeAllocator) MaskedScatterSDNode(dl.getIROrder(), dl.getDebugLoc(),
Ops, VTs, VT, MMO);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr,
SDValue SV,
unsigned Align) {
SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
ArrayRef<SDUse> Ops) {
switch (Ops.size()) {
case 0: return getNode(Opcode, DL, VT);
case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
default: break;
}
// Copy from an SDUse array into an SDValue array for use with
// the regular getNode logic.
SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
return getNode(Opcode, DL, VT, NewOps);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
ArrayRef<SDValue> Ops) {
unsigned NumOps = Ops.size();
switch (NumOps) {
case 0: return getNode(Opcode, DL, VT);
case 1: return getNode(Opcode, DL, VT, Ops[0]);
case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
default: break;
}
switch (Opcode) {
default: break;
case ISD::SELECT_CC: {
assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
assert(Ops[0].getValueType() == Ops[1].getValueType() &&
"LHS and RHS of condition must have same type!");
assert(Ops[2].getValueType() == Ops[3].getValueType() &&
"True and False arms of SelectCC must have same type!");
assert(Ops[2].getValueType() == VT &&
"select_cc node must be of same type as true and false value!");
break;
}
case ISD::BR_CC: {
assert(NumOps == 5 && "BR_CC takes 5 operands!");
assert(Ops[2].getValueType() == Ops[3].getValueType() &&
"LHS/RHS of comparison should match types!");
break;
}
}
// Memoize nodes.
SDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
VTs, Ops);
CSEMap.InsertNode(N, IP);
} else {
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
VTs, Ops);
}
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
return getNode(Opcode, DL, getVTList(ResultTys), Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
ArrayRef<SDValue> Ops) {
if (VTList.NumVTs == 1)
return getNode(Opcode, DL, VTList.VTs[0], Ops);
#if 0
switch (Opcode) {
// FIXME: figure out how to safely handle things like
// int foo(int x) { return 1 << (x & 255); }
// int bar() { return foo(256); }
case ISD::SRA_PARTS:
case ISD::SRL_PARTS:
case ISD::SHL_PARTS:
if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
else if (N3.getOpcode() == ISD::AND)
if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
// If the and is only masking out bits that cannot effect the shift,
// eliminate the and.
unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
}
break;
}
#endif
// Memoize the node unless it returns a flag.
SDNode *N;
unsigned NumOps = Ops.size();
if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
if (NumOps == 1) {
N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTList, Ops[0]);
} else if (NumOps == 2) {
N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTList, Ops[0],
Ops[1]);
} else if (NumOps == 3) {
N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTList, Ops[0],
Ops[1], Ops[2]);
} else {
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
VTList, Ops);
}
CSEMap.InsertNode(N, IP);
} else {
if (NumOps == 1) {
N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTList, Ops[0]);
} else if (NumOps == 2) {
N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTList, Ops[0],
Ops[1]);
} else if (NumOps == 3) {
N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTList, Ops[0],
Ops[1], Ops[2]);
} else {
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
VTList, Ops);
}
}
InsertNode(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
return getNode(Opcode, DL, VTList, None);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1) {
SDValue Ops[] = { N1 };
return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2) {
SDValue Ops[] = { N1, N2 };
return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2, SDValue N3) {
SDValue Ops[] = { N1, N2, N3 };
return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4) {
SDValue Ops[] = { N1, N2, N3, N4 };
return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4, SDValue N5) {
SDValue Ops[] = { N1, N2, N3, N4, N5 };
return getNode(Opcode, DL, VTList, Ops);
}
SDVTList SelectionDAG::getVTList(EVT VT) {
return makeVTList(SDNode::getValueTypeList(VT), 1);
}
SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
FoldingSetNodeID ID;
ID.AddInteger(2U);
ID.AddInteger(VT1.getRawBits());
ID.AddInteger(VT2.getRawBits());
void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(2);
Array[0] = VT1;
Array[1] = VT2;
Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
VTListMap.InsertNode(Result, IP);
}
return Result->getSDVTList();
}
SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
FoldingSetNodeID ID;
ID.AddInteger(3U);
ID.AddInteger(VT1.getRawBits());
ID.AddInteger(VT2.getRawBits());
ID.AddInteger(VT3.getRawBits());
void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(3);
Array[0] = VT1;
Array[1] = VT2;
Array[2] = VT3;
Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
VTListMap.InsertNode(Result, IP);
}
return Result->getSDVTList();
}
SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
FoldingSetNodeID ID;
ID.AddInteger(4U);
ID.AddInteger(VT1.getRawBits());
ID.AddInteger(VT2.getRawBits());
ID.AddInteger(VT3.getRawBits());
ID.AddInteger(VT4.getRawBits());
void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(4);
Array[0] = VT1;
Array[1] = VT2;
Array[2] = VT3;
Array[3] = VT4;
Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
VTListMap.InsertNode(Result, IP);
}
return Result->getSDVTList();
}
SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
unsigned NumVTs = VTs.size();
FoldingSetNodeID ID;
ID.AddInteger(NumVTs);
for (unsigned index = 0; index < NumVTs; index++) {
ID.AddInteger(VTs[index].getRawBits());
}
void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(NumVTs);
std::copy(VTs.begin(), VTs.end(), Array);
Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
VTListMap.InsertNode(Result, IP);
}
return Result->getSDVTList();
}
/// UpdateNodeOperands - *Mutate* the specified node in-place to have the
/// specified operands. If the resultant node already exists in the DAG,
/// this does not modify the specified node, instead it returns the node that
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
// Check to see if there is no change.
if (Op == N->getOperand(0)) return N;
// See if the modified node already exists.
void *InsertPos = nullptr;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
if (!RemoveNodeFromCSEMaps(N))
InsertPos = nullptr;
// Now we update the operands.
N->OperandList[0].set(Op);
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
return N;
}
SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
// Check to see if there is no change.
if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
return N; // No operands changed, just return the input node.
// See if the modified node already exists.
void *InsertPos = nullptr;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
if (!RemoveNodeFromCSEMaps(N))
InsertPos = nullptr;
// Now we update the operands.
if (N->OperandList[0] != Op1)
N->OperandList[0].set(Op1);
if (N->OperandList[1] != Op2)
N->OperandList[1].set(Op2);
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
return N;
}
SDNode *SelectionDAG::
UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
SDValue Ops[] = { Op1, Op2, Op3 };
return UpdateNodeOperands(N, Ops);
}
SDNode *SelectionDAG::
UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4) {
SDValue Ops[] = { Op1, Op2, Op3, Op4 };
return UpdateNodeOperands(N, Ops);
}
SDNode *SelectionDAG::
UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5) {
SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
return UpdateNodeOperands(N, Ops);
}
SDNode *SelectionDAG::
UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
unsigned NumOps = Ops.size();
assert(N->getNumOperands() == NumOps &&
"Update with wrong number of operands");
// If no operands changed just return the input node.
if (Ops.empty() || std::equal(Ops.begin(), Ops.end(), N->op_begin()))
return N;
// See if the modified node already exists.
void *InsertPos = nullptr;
if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
if (!RemoveNodeFromCSEMaps(N))
InsertPos = nullptr;
// Now we update the operands.
for (unsigned i = 0; i != NumOps; ++i)
if (N->OperandList[i] != Ops[i])
N->OperandList[i].set(Ops[i]);
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
return N;
}
/// DropOperands - Release the operands and set this node to have
/// zero operands.
void SDNode::DropOperands() {
// Unlike the code in MorphNodeTo that does this, we don't need to
// watch for dead nodes here.
for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
SDUse &Use = *I++;
Use.set(SDValue());
}
}
/// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
/// machine opcode.
///
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT) {
SDVTList VTs = getVTList(VT);
return SelectNodeTo(N, MachineOpc, VTs, None);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT, SDValue Op1) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1 };
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT, SDValue Op1,
SDValue Op2) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2 };
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT, SDValue Op1,
SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2, Op3 };
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT, ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT);
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2);
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2) {
SDVTList VTs = getVTList(VT1, VT2);
return SelectNodeTo(N, MachineOpc, VTs, None);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2, EVT VT3,
ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2, EVT VT3, EVT VT4,
ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2,
SDValue Op1) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1 };
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2 };
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2, Op3 };
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2, EVT VT3,
SDValue Op1, SDValue Op2,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2, Op3 };
return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
SDVTList VTs,ArrayRef<SDValue> Ops) {
N = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
// Reset the NodeID to -1.
N->setNodeId(-1);
return N;
}
/// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
/// the line number information on the merged node since it is not possible to
/// preserve the information that operation is associated with multiple lines.
/// This will make the debugger working better at -O0, were there is a higher
/// probability having other instructions associated with that line.
///
/// For IROrder, we keep the smaller of the two
SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
DebugLoc NLoc = N->getDebugLoc();
if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
N->setDebugLoc(DebugLoc());
}
unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
N->setIROrder(Order);
return N;
}
/// MorphNodeTo - This *mutates* the specified node to have the specified
/// return type, opcode, and operands.
///
/// Note that MorphNodeTo returns the resultant node. If there is already a
/// node of the specified opcode and operands, it returns that node instead of
/// the current one. Note that the SDLoc need not be the same.
///
/// Using MorphNodeTo is faster than creating a new node and swapping it in
/// with ReplaceAllUsesWith both because it often avoids allocating a new
/// node, and because it doesn't require CSE recalculation for any of
/// the node's users.
///
/// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
/// As a consequence it isn't appropriate to use from within the DAG combiner or
/// the legalizer which maintain worklists that would need to be updated when
/// deleting things.
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
SDVTList VTs, ArrayRef<SDValue> Ops) {
unsigned NumOps = Ops.size();
// If an identical node already exists, use it.
void *IP = nullptr;
if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, VTs, Ops);
if (SDNode *ON = FindNodeOrInsertPos(ID, N->getDebugLoc(), IP))
return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
}
if (!RemoveNodeFromCSEMaps(N))
IP = nullptr;
// Start the morphing.
N->NodeType = Opc;
N->ValueList = VTs.VTs;
N->NumValues = VTs.NumVTs;
// Clear the operands list, updating used nodes to remove this from their
// use list. Keep track of any operands that become dead as a result.
SmallPtrSet<SDNode*, 16> DeadNodeSet;
for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
SDUse &Use = *I++;
SDNode *Used = Use.getNode();
Use.set(SDValue());
if (Used->use_empty())
DeadNodeSet.insert(Used);
}
if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
// Initialize the memory references information.
MN->setMemRefs(nullptr, nullptr);
// If NumOps is larger than the # of operands we can have in a
// MachineSDNode, reallocate the operand list.
if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
if (MN->OperandsNeedDelete)
delete[] MN->OperandList;
if (NumOps > array_lengthof(MN->LocalOperands))
// We're creating a final node that will live unmorphed for the
// remainder of the current SelectionDAG iteration, so we can allocate
// the operands directly out of a pool with no recycling metadata.
MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
Ops.data(), NumOps);
else
MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps);
MN->OperandsNeedDelete = false;
} else
MN->InitOperands(MN->OperandList, Ops.data(), NumOps);
} else {
// If NumOps is larger than the # of operands we currently have, reallocate
// the operand list.
if (NumOps > N->NumOperands) {
if (N->OperandsNeedDelete)
delete[] N->OperandList;
N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps);
N->OperandsNeedDelete = true;
} else
N->InitOperands(N->OperandList, Ops.data(), NumOps);
}
// Delete any nodes that are still dead after adding the uses for the
// new operands.
if (!DeadNodeSet.empty()) {
SmallVector<SDNode *, 16> DeadNodes;
for (SDNode *N : DeadNodeSet)
if (N->use_empty())
DeadNodes.push_back(N);
RemoveDeadNodes(DeadNodes);
}
if (IP)
CSEMap.InsertNode(N, IP); // Memoize the new node.
return N;
}
/// getMachineNode - These are used for target selectors to create a new node
/// with specified return type(s), MachineInstr opcode, and operands.
///
/// Note that getMachineNode returns the resultant node. If there is already a
/// node of the specified opcode and operands, it returns that node instead of
/// the current one.
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
SDVTList VTs = getVTList(VT);
return getMachineNode(Opcode, dl, VTs, None);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2, Op3 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT);
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
SDVTList VTs = getVTList(VT1, VT2);
return getMachineNode(Opcode, dl, VTs, None);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
EVT VT1, EVT VT2, SDValue Op1) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
EVT VT1, EVT VT2, SDValue Op1,
SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2, Op3 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
EVT VT1, EVT VT2,
ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2);
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
EVT VT1, EVT VT2, EVT VT3,
SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
EVT VT1, EVT VT2, EVT VT3,
SDValue Op1, SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2, Op3 };
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
EVT VT1, EVT VT2, EVT VT3,
ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
EVT VT2, EVT VT3, EVT VT4,
ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(ResultTys);
return getMachineNode(Opcode, dl, VTs, Ops);
}
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
ArrayRef<SDValue> OpsArray) {
bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
MachineSDNode *N;
void *IP = nullptr;
const SDValue *Ops = OpsArray.data();
unsigned NumOps = OpsArray.size();
if (DoCSE) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ~Opcode, VTs, OpsArray);
IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) {
return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
}
}
// Allocate a new MachineSDNode.
N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
DL.getDebugLoc(), VTs);
// Initialize the operands list.
if (NumOps > array_lengthof(N->LocalOperands))
// We're creating a final node that will live unmorphed for the
// remainder of the current SelectionDAG iteration, so we can allocate
// the operands directly out of a pool with no recycling metadata.
N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
Ops, NumOps);
else
N->InitOperands(N->LocalOperands, Ops, NumOps);
N->OperandsNeedDelete = false;
if (DoCSE)
CSEMap.InsertNode(N, IP);
InsertNode(N);
return N;
}
/// getTargetExtractSubreg - A convenience function for creating
/// TargetOpcode::EXTRACT_SUBREG nodes.
SDValue
SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
SDValue Operand) {
SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
VT, Operand, SRIdxVal);
return SDValue(Subreg, 0);
}
/// getTargetInsertSubreg - A convenience function for creating
/// TargetOpcode::INSERT_SUBREG nodes.
SDValue
SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
SDValue Operand, SDValue Subreg) {
SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
VT, Operand, Subreg, SRIdxVal);
return SDValue(Result, 0);
}
/// getNodeIfExists - Get the specified node if it's already available, or
/// else return NULL.
SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
ArrayRef<SDValue> Ops,
const SDNodeFlags *Flags) {
if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops);
AddNodeIDFlags(ID, Opcode, Flags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP))
return E;
}
return nullptr;
}
/// getDbgValue - Creates a SDDbgValue node.
///
/// SDNode
SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
unsigned R, bool IsIndirect, uint64_t Off,
DebugLoc DL, unsigned O) {
assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
return new (DbgInfo->getAlloc())
SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
}
/// Constant
SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
const Value *C, uint64_t Off,
DebugLoc DL, unsigned O) {
assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O);
}
/// FrameIndex
SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
unsigned FI, uint64_t Off,
DebugLoc DL, unsigned O) {
assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O);
}
namespace {
/// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
/// pointed to by a use iterator is deleted, increment the use iterator
/// so that it doesn't dangle.
///
class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
SDNode::use_iterator &UI;
SDNode::use_iterator &UE;
void NodeDeleted(SDNode *N, SDNode *E) override {
// Increment the iterator as needed.
while (UI != UE && N == *UI)
++UI;
}
public:
RAUWUpdateListener(SelectionDAG &d,
SDNode::use_iterator &ui,
SDNode::use_iterator &ue)
: SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
};
}
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG.
///
/// This version assumes From has a single result value.
///
void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
SDNode *From = FromN.getNode();
assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
"Cannot replace with this method!");
assert(From != To.getNode() && "Cannot replace uses of with self");
// Iterate over all the existing uses of From. New uses will be added
// to the beginning of the use list, which we avoid visiting.
// This specifically avoids visiting uses of From that arise while the
// replacement is happening, because any such uses would be the result
// of CSE: If an existing node looks like From after one of its operands
// is replaced by To, we don't want to replace of all its users with To
// too. See PR3018 for more info.
SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
// This node is about to morph, remove its old self from the CSE maps.
RemoveNodeFromCSEMaps(User);
// A user can appear in a use list multiple times, and when this
// happens the uses are usually next to each other in the list.
// To help reduce the number of CSE recomputations, process all
// the uses of this user that we can find this way.
do {
SDUse &Use = UI.getUse();
++UI;
Use.set(To);
} while (UI != UE && *UI == User);
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
if (FromN == getRoot())
setRoot(To);
}
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG.
///
/// This version assumes that for each value of From, there is a
/// corresponding value in To in the same position with the same type.
///
void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
#ifndef NDEBUG
for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
assert((!From->hasAnyUseOfValue(i) ||
From->getValueType(i) == To->getValueType(i)) &&
"Cannot use this version of ReplaceAllUsesWith!");
#endif
// Handle the trivial case.
if (From == To)
return;
// Iterate over just the existing users of From. See the comments in
// the ReplaceAllUsesWith above.
SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
// This node is about to morph, remove its old self from the CSE maps.
RemoveNodeFromCSEMaps(User);
// A user can appear in a use list multiple times, and when this
// happens the uses are usually next to each other in the list.
// To help reduce the number of CSE recomputations, process all
// the uses of this user that we can find this way.
do {
SDUse &Use = UI.getUse();
++UI;
Use.setNode(To);
} while (UI != UE && *UI == User);
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
if (From == getRoot().getNode())
setRoot(SDValue(To, getRoot().getResNo()));
}
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG.
///
/// This version can replace From with any result values. To must match the
/// number and types of values returned by From.
void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
if (From->getNumValues() == 1) // Handle the simple case efficiently.
return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
// Iterate over just the existing users of From. See the comments in
// the ReplaceAllUsesWith above.
SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
// This node is about to morph, remove its old self from the CSE maps.
RemoveNodeFromCSEMaps(User);
// A user can appear in a use list multiple times, and when this
// happens the uses are usually next to each other in the list.
// To help reduce the number of CSE recomputations, process all
// the uses of this user that we can find this way.
do {
SDUse &Use = UI.getUse();
const SDValue &ToOp = To[Use.getResNo()];
++UI;
Use.set(ToOp);
} while (UI != UE && *UI == User);
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
if (From == getRoot().getNode())
setRoot(SDValue(To[getRoot().getResNo()]));
}
/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
/// uses of other values produced by From.getNode() alone. The Deleted
/// vector is handled the same way as for ReplaceAllUsesWith.
void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
// Handle the really simple, really trivial case efficiently.
if (From == To) return;
// Handle the simple, trivial, case efficiently.
if (From.getNode()->getNumValues() == 1) {
ReplaceAllUsesWith(From, To);
return;
}
// Iterate over just the existing users of From. See the comments in
// the ReplaceAllUsesWith above.
SDNode::use_iterator UI = From.getNode()->use_begin(),
UE = From.getNode()->use_end();
RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
bool UserRemovedFromCSEMaps = false;
// A user can appear in a use list multiple times, and when this
// happens the uses are usually next to each other in the list.
// To help reduce the number of CSE recomputations, process all
// the uses of this user that we can find this way.
do {
SDUse &Use = UI.getUse();
// Skip uses of different values from the same node.
if (Use.getResNo() != From.getResNo()) {
++UI;
continue;
}
// If this node hasn't been modified yet, it's still in the CSE maps,
// so remove its old self from the CSE maps.
if (!UserRemovedFromCSEMaps) {
RemoveNodeFromCSEMaps(User);
UserRemovedFromCSEMaps = true;
}
++UI;
Use.set(To);
} while (UI != UE && *UI == User);
// We are iterating over all uses of the From node, so if a use
// doesn't use the specific value, no changes are made.
if (!UserRemovedFromCSEMaps)
continue;
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
if (From == getRoot())
setRoot(To);
}
namespace {
/// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
/// to record information about a use.
struct UseMemo {
SDNode *User;
unsigned Index;
SDUse *Use;
};
/// operator< - Sort Memos by User.
bool operator<(const UseMemo &L, const UseMemo &R) {
return (intptr_t)L.User < (intptr_t)R.User;
}
}
/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
/// uses of other values produced by From.getNode() alone. The same value
/// may appear in both the From and To list. The Deleted vector is
/// handled the same way as for ReplaceAllUsesWith.
void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
const SDValue *To,
unsigned Num){
// Handle the simple, trivial case efficiently.
if (Num == 1)
return ReplaceAllUsesOfValueWith(*From, *To);
// Read up all the uses and make records of them. This helps
// processing new uses that are introduced during the
// replacement process.
SmallVector<UseMemo, 4> Uses;
for (unsigned i = 0; i != Num; ++i) {
unsigned FromResNo = From[i].getResNo();
SDNode *FromNode = From[i].getNode();
for (SDNode::use_iterator UI = FromNode->use_begin(),
E = FromNode->use_end(); UI != E; ++UI) {
SDUse &Use = UI.getUse();
if (Use.getResNo() == FromResNo) {
UseMemo Memo = { *UI, i, &Use };
Uses.push_back(Memo);
}
}
}
// Sort the uses, so that all the uses from a given User are together.
std::sort(Uses.begin(), Uses.end());
for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
UseIndex != UseIndexEnd; ) {
// We know that this user uses some value of From. If it is the right
// value, update it.
SDNode *User = Uses[UseIndex].User;
// This node is about to morph, remove its old self from the CSE maps.
RemoveNodeFromCSEMaps(User);
// The Uses array is sorted, so all the uses for a given User
// are next to each other in the list.
// To help reduce the number of CSE recomputations, process all
// the uses of this user that we can find this way.
do {
unsigned i = Uses[UseIndex].Index;
SDUse &Use = *Uses[UseIndex].Use;
++UseIndex;
Use.set(To[i]);
} while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User);
}
}
/// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
/// based on their topological order. It returns the maximum id and a vector
/// of the SDNodes* in assigned order by reference.
unsigned SelectionDAG::AssignTopologicalOrder() {
unsigned DAGSize = 0;
// SortedPos tracks the progress of the algorithm. Nodes before it are
// sorted, nodes after it are unsorted. When the algorithm completes
// it is at the end of the list.
allnodes_iterator SortedPos = allnodes_begin();
// Visit all the nodes. Move nodes with no operands to the front of
// the list immediately. Annotate nodes that do have operands with their
// operand count. Before we do this, the Node Id fields of the nodes
// may contain arbitrary values. After, the Node Id fields for nodes
// before SortedPos will contain the topological sort index, and the
// Node Id fields for nodes At SortedPos and after will contain the
// count of outstanding operands.
for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
SDNode *N = I++;
checkForCycles(N, this);
unsigned Degree = N->getNumOperands();
if (Degree == 0) {
// A node with no uses, add it to the result array immediately.
N->setNodeId(DAGSize++);
allnodes_iterator Q = N;
if (Q != SortedPos)
SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
assert(SortedPos != AllNodes.end() && "Overran node list");
++SortedPos;
} else {
// Temporarily use the Node Id as scratch space for the degree count.
N->setNodeId(Degree);
}
}
// Visit all the nodes. As we iterate, move nodes into sorted order,
// such that by the time the end is reached all nodes will be sorted.
for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
SDNode *N = I;
checkForCycles(N, this);
// N is in sorted position, so all its uses have one less operand
// that needs to be sorted.
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
UI != UE; ++UI) {
SDNode *P = *UI;
unsigned Degree = P->getNodeId();
assert(Degree != 0 && "Invalid node degree");
--Degree;
if (Degree == 0) {
// All of P's operands are sorted, so P may sorted now.
P->setNodeId(DAGSize++);
if (P != SortedPos)
SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
assert(SortedPos != AllNodes.end() && "Overran node list");
++SortedPos;
} else {
// Update P's outstanding operand count.
P->setNodeId(Degree);
}
}
if (I == SortedPos) {
#ifndef NDEBUG
SDNode *S = ++I;
dbgs() << "Overran sorted position:\n";
S->dumprFull(this); dbgs() << "\n";
dbgs() << "Checking if this is due to cycles\n";
checkForCycles(this, true);
#endif
llvm_unreachable(nullptr);
}
}
assert(SortedPos == AllNodes.end() &&
"Topological sort incomplete!");
assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
"First node in topological sort is not the entry token!");
assert(AllNodes.front().getNodeId() == 0 &&
"First node in topological sort has non-zero id!");
assert(AllNodes.front().getNumOperands() == 0 &&
"First node in topological sort has operands!");
assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
"Last node in topologic sort has unexpected id!");
assert(AllNodes.back().use_empty() &&
"Last node in topologic sort has users!");
assert(DAGSize == allnodes_size() && "Node count mismatch!");
return DAGSize;
}
/// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
/// value is produced by SD.
void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
if (SD) {
assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
SD->setHasDebugValue(true);
}
DbgInfo->add(DB, SD, isParameter);
}
/// TransferDbgValues - Transfer SDDbgValues.
void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
if (From == To || !From.getNode()->getHasDebugValue())
return;
SDNode *FromNode = From.getNode();
SDNode *ToNode = To.getNode();
ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
SmallVector<SDDbgValue *, 2> ClonedDVs;
for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
I != E; ++I) {
SDDbgValue *Dbg = *I;
if (Dbg->getKind() == SDDbgValue::SDNODE) {
SDDbgValue *Clone =
getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
Dbg->getDebugLoc(), Dbg->getOrder());
ClonedDVs.push_back(Clone);
}
}
for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
E = ClonedDVs.end(); I != E; ++I)
AddDbgValue(*I, ToNode, false);
}
//===----------------------------------------------------------------------===//
// SDNode Class
//===----------------------------------------------------------------------===//
HandleSDNode::~HandleSDNode() {
DropOperands();
}
GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
DebugLoc DL, const GlobalValue *GA,
EVT VT, int64_t o, unsigned char TF)
: SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
TheGlobal = GA;
}
AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
SDValue X, unsigned SrcAS,
unsigned DestAS)
: UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
EVT memvt, MachineMemOperand *mmo)
: SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant());
assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
assert(isNonTemporal() == MMO->isNonTemporal() &&
"Non-temporal encoding error!");
// We check here that the size of the memory operand fits within the size of
// the MMO. This is because the MMO might indicate only a possible address
// range instead of specifying the affected memory addresses precisely.
assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
}
MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
: SDNode(Opc, Order, dl, VTs, Ops),
MemoryVT(memvt), MMO(mmo) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant());
assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
}
/// Profile - Gather unique data for the node.
///
void SDNode::Profile(FoldingSetNodeID &ID) const {
AddNodeIDNode(ID, this);
}
namespace {
struct EVTArray {
std::vector<EVT> VTs;
EVTArray() {
VTs.reserve(MVT::LAST_VALUETYPE);
for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
VTs.push_back(MVT((MVT::SimpleValueType)i));
}
};
}
static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
static ManagedStatic<EVTArray> SimpleVTArray;
static ManagedStatic<sys::SmartMutex<true> > VTMutex;
/// getValueTypeList - Return a pointer to the specified value type.
///
const EVT *SDNode::getValueTypeList(EVT VT) {
if (VT.isExtended()) {
sys::SmartScopedLock<true> Lock(*VTMutex);
return &(*EVTs->insert(VT).first);
} else {
assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Value type out of range!");
return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
}
}
/// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
/// indicated value. This method ignores uses of other values defined by this
/// operation.
bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
assert(Value < getNumValues() && "Bad value!");
// TODO: Only iterate over uses of a given value of the node
for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
if (UI.getUse().getResNo() == Value) {
if (NUses == 0)
return false;
--NUses;
}
}
// Found exactly the right number of uses?
return NUses == 0;
}
/// hasAnyUseOfValue - Return true if there are any use of the indicated
/// value. This method ignores uses of other values defined by this operation.
bool SDNode::hasAnyUseOfValue(unsigned Value) const {
assert(Value < getNumValues() && "Bad value!");
for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
if (UI.getUse().getResNo() == Value)
return true;
return false;
}
/// isOnlyUserOf - Return true if this node is the only use of N.
///
bool SDNode::isOnlyUserOf(const SDNode *N) const {
bool Seen = false;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
SDNode *User = *I;
if (User == this)
Seen = true;
else
return false;
}
return Seen;
}
/// isOperand - Return true if this node is an operand of N.
///
bool SDValue::isOperandOf(const SDNode *N) const {
for (const SDValue &Op : N->op_values())
if (*this == Op)
return true;
return false;
}
bool SDNode::isOperandOf(const SDNode *N) const {
for (const SDValue &Op : N->op_values())
if (this == Op.getNode())
return true;
return false;
}
/// reachesChainWithoutSideEffects - Return true if this operand (which must
/// be a chain) reaches the specified operand without crossing any
/// side-effecting instructions on any chain path. In practice, this looks
/// through token factors and non-volatile loads. In order to remain efficient,
/// this only looks a couple of nodes in, it does not do an exhaustive search.
bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
unsigned Depth) const {
if (*this == Dest) return true;
// Don't search too deeply, we just want to be able to see through
// TokenFactor's etc.
if (Depth == 0) return false;
// If this is a token factor, all inputs to the TF happen in parallel. If any
// of the operands of the TF does not reach dest, then we cannot do the xform.
if (getOpcode() == ISD::TokenFactor) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
return false;
return true;
}
// Loads don't have side effects, look through them.
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
if (!Ld->isVolatile())
return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
}
return false;
}
/// hasPredecessor - Return true if N is a predecessor of this node.
/// N is either an operand of this node, or can be reached by recursively
/// traversing up the operands.
/// NOTE: This is an expensive method. Use it carefully.
bool SDNode::hasPredecessor(const SDNode *N) const {
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 16> Worklist;
return hasPredecessorHelper(N, Visited, Worklist);
}
bool
SDNode::hasPredecessorHelper(const SDNode *N,
SmallPtrSetImpl<const SDNode *> &Visited,
SmallVectorImpl<const SDNode *> &Worklist) const {
if (Visited.empty()) {
Worklist.push_back(this);
} else {
// Take a look in the visited set. If we've already encountered this node
// we needn't search further.
if (Visited.count(N))
return true;
}
// Haven't visited N yet. Continue the search.
while (!Worklist.empty()) {
const SDNode *M = Worklist.pop_back_val();
for (const SDValue &OpV : M->op_values()) {
SDNode *Op = OpV.getNode();
if (Visited.insert(Op).second)
Worklist.push_back(Op);
if (Op == N)
return true;
}
}
return false;
}
uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
assert(Num < NumOperands && "Invalid child # of SDNode!");
return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
}
SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
assert(N->getNumValues() == 1 &&
"Can't unroll a vector with multiple results!");
EVT VT = N->getValueType(0);
unsigned NE = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
SDLoc dl(N);
SmallVector<SDValue, 8> Scalars;
SmallVector<SDValue, 4> Operands(N->getNumOperands());
// If ResNE is 0, fully unroll the vector op.
if (ResNE == 0)
ResNE = NE;
else if (NE > ResNE)
NE = ResNE;
unsigned i;
for (i= 0; i != NE; ++i) {
for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
SDValue Operand = N->getOperand(j);
EVT OperandVT = Operand.getValueType();
if (OperandVT.isVector()) {
// A vector operand; extract a single element.
EVT OperandEltVT = OperandVT.getVectorElementType();
Operands[j] =
getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
} else {
// A scalar operand; just use it as is.
Operands[j] = Operand;
}
}
switch (N->getOpcode()) {
default:
Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
break;
case ISD::VSELECT:
Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR:
Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
getShiftAmountOperand(Operands[0].getValueType(),
Operands[1])));
break;
case ISD::SIGN_EXTEND_INREG:
case ISD::FP_ROUND_INREG: {
EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
Operands[0],
getValueType(ExtVT)));
}
}
}
for (; i < ResNE; ++i)
Scalars.push_back(getUNDEF(EltVT));
return getNode(ISD::BUILD_VECTOR, dl,
EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
}
/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
/// location that is 'Dist' units away from the location that the 'Base' load
/// is loading from.
bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
unsigned Bytes, int Dist) const {
if (LD->getChain() != Base->getChain())
return false;
EVT VT = LD->getValueType(0);
if (VT.getSizeInBits() / 8 != Bytes)
return false;
SDValue Loc = LD->getOperand(1);
SDValue BaseLoc = Base->getOperand(1);
if (Loc.getOpcode() == ISD::FrameIndex) {
if (BaseLoc.getOpcode() != ISD::FrameIndex)
return false;
const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
int FS = MFI->getObjectSize(FI);
int BFS = MFI->getObjectSize(BFI);
if (FS != BFS || FS != (int)Bytes) return false;
return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
}
// Handle X + C.
if (isBaseWithConstantOffset(Loc)) {
int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
if (Loc.getOperand(0) == BaseLoc) {
// If the base location is a simple address with no offset itself, then
// the second load's first add operand should be the base address.
if (LocOffset == Dist * (int)Bytes)
return true;
} else if (isBaseWithConstantOffset(BaseLoc)) {
// The base location itself has an offset, so subtract that value from the
// second load's offset before comparing to distance * size.
int64_t BOffset =
cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
if ((LocOffset - BOffset) == Dist * (int)Bytes)
return true;
}
}
}
const GlobalValue *GV1 = nullptr;
const GlobalValue *GV2 = nullptr;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
if (isGA1 && isGA2 && GV1 == GV2)
return Offset1 == (Offset2 + Dist*Bytes);
return false;
}
/// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
/// it cannot be inferred.
unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
// If this is a GlobalAddress + cst, return the alignment.
const GlobalValue *GV;
int64_t GVOffset = 0;
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
getDataLayout());
unsigned AlignBits = KnownZero.countTrailingOnes();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
if (Align)
return MinAlign(Align, GVOffset);
}
// If this is a direct reference to a stack slot, use information about the
// stack slot's alignment.
int FrameIdx = 1 << 31;
int64_t FrameOffset = 0;
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
FrameIdx = FI->getIndex();
} else if (isBaseWithConstantOffset(Ptr) &&
isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
// Handle FI+Cst
FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
FrameOffset = Ptr.getConstantOperandVal(1);
}
if (FrameIdx != (1 << 31)) {
const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
FrameOffset);
return FIInfoAlign;
}
return 0;
}
/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
/// which is split (or expanded) into two not necessarily identical pieces.
std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
// Currently all types are split in half.
EVT LoVT, HiVT;
if (!VT.isVector()) {
LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
} else {
unsigned NumElements = VT.getVectorNumElements();
assert(!(NumElements & 1) && "Splitting vector, but not in half!");
LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
NumElements/2);
}
return std::make_pair(LoVT, HiVT);
}
/// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
/// low/high part.
std::pair<SDValue, SDValue>
SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
const EVT &HiVT) {
assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
N.getValueType().getVectorNumElements() &&
"More vector elements requested than available!");
SDValue Lo, Hi;
Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
getConstant(LoVT.getVectorNumElements(), DL,
TLI->getVectorIdxTy(getDataLayout())));
return std::make_pair(Lo, Hi);
}
void SelectionDAG::ExtractVectorElements(SDValue Op,
SmallVectorImpl<SDValue> &Args,
unsigned Start, unsigned Count) {
EVT VT = Op.getValueType();
if (Count == 0)
Count = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
SDLoc SL(Op);
for (unsigned i = Start, e = Start + Count; i != e; ++i) {
Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
Op, getConstant(i, SL, IdxTy)));
}
}
// getAddressSpace - Return the address space this GlobalAddress belongs to.
unsigned GlobalAddressSDNode::getAddressSpace() const {
return getGlobal()->getType()->getAddressSpace();
}
Type *ConstantPoolSDNode::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
return Val.ConstVal->getType();
}
bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
APInt &SplatUndef,
unsigned &SplatBitSize,
bool &HasAnyUndefs,
unsigned MinSplatBits,
bool isBigEndian) const {
EVT VT = getValueType(0);
assert(VT.isVector() && "Expected a vector type");
unsigned sz = VT.getSizeInBits();
if (MinSplatBits > sz)
return false;
SplatValue = APInt(sz, 0);
SplatUndef = APInt(sz, 0);
// Get the bits. Bits with undefined values (when the corresponding element
// of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
// in SplatValue. If any of the values are not constant, give up and return
// false.
unsigned int nOps = getNumOperands();
assert(nOps > 0 && "isConstantSplat has 0-size build vector");
unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
for (unsigned j = 0; j < nOps; ++j) {
unsigned i = isBigEndian ? nOps-1-j : j;
SDValue OpVal = getOperand(i);
unsigned BitPos = j * EltBitSize;
if (OpVal.getOpcode() == ISD::UNDEF)
SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
zextOrTrunc(sz) << BitPos;
else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
else
return false;
}
// The build_vector is all constants or undefs. Find the smallest element
// size that splats the vector.
HasAnyUndefs = (SplatUndef != 0);
while (sz > 8) {
unsigned HalfSize = sz / 2;
APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
APInt LowValue = SplatValue.trunc(HalfSize);
APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
APInt LowUndef = SplatUndef.trunc(HalfSize);
// If the two halves do not match (ignoring undef bits), stop here.
if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
MinSplatBits > HalfSize)
break;
SplatValue = HighValue | LowValue;
SplatUndef = HighUndef & LowUndef;
sz = HalfSize;
}
SplatBitSize = sz;
return true;
}
SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
if (UndefElements) {
UndefElements->clear();
UndefElements->resize(getNumOperands());
}
SDValue Splatted;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
SDValue Op = getOperand(i);
if (Op.getOpcode() == ISD::UNDEF) {
if (UndefElements)
(*UndefElements)[i] = true;
} else if (!Splatted) {
Splatted = Op;
} else if (Splatted != Op) {
return SDValue();
}
}
if (!Splatted) {
assert(getOperand(0).getOpcode() == ISD::UNDEF &&
"Can only have a splat without a constant for all undefs.");
return getOperand(0);
}
return Splatted;
}
ConstantSDNode *
BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
}
ConstantFPSDNode *
BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
}
bool BuildVectorSDNode::isConstant() const {
for (const SDValue &Op : op_values()) {
unsigned Opc = Op.getOpcode();
if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
return false;
}
return true;
}
bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
// Find the first non-undef value in the shuffle mask.
unsigned i, e;
for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
/* search */;
assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
// Make sure all remaining elements are either undef or the same as the first
// non-undef value.
for (int Idx = Mask[i]; i != e; ++i)
if (Mask[i] >= 0 && Mask[i] != Idx)
return false;
return true;
}
#ifndef NDEBUG
static void checkForCyclesHelper(const SDNode *N,
SmallPtrSetImpl<const SDNode*> &Visited,
SmallPtrSetImpl<const SDNode*> &Checked,
const llvm::SelectionDAG *DAG) {
// If this node has already been checked, don't check it again.
if (Checked.count(N))
return;
// If a node has already been visited on this depth-first walk, reject it as
// a cycle.
if (!Visited.insert(N).second) {
errs() << "Detected cycle in SelectionDAG\n";
dbgs() << "Offending node:\n";
N->dumprFull(DAG); dbgs() << "\n";
abort();
}
for (const SDValue &Op : N->op_values())
checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
Checked.insert(N);
Visited.erase(N);
}
#endif
void llvm::checkForCycles(const llvm::SDNode *N,
const llvm::SelectionDAG *DAG,
bool force) {
#ifndef NDEBUG
bool check = force;
#ifdef XDEBUG
check = true;
#endif // XDEBUG
if (check) {
assert(N && "Checking nonexistent SDNode");
SmallPtrSet<const SDNode*, 32> visited;
SmallPtrSet<const SDNode*, 32> checked;
checkForCyclesHelper(N, visited, checked, DAG);
}
#endif // !NDEBUG
}
void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
checkForCycles(DAG->getRoot().getNode(), DAG, force);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SelectionDAG::Legalize method.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "legalizedag"
//===----------------------------------------------------------------------===//
/// This takes an arbitrary SelectionDAG as input and
/// hacks on it until the target machine can handle it. This involves
/// eliminating value sizes the machine cannot handle (promoting small sizes to
/// large sizes or splitting up large values into small values) as well as
/// eliminating operations the machine cannot handle.
///
/// This code also does a small amount of optimization and recognition of idioms
/// as part of its processing. For example, if a target does not support a
/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
/// will attempt merge setcc and brc instructions into brcc's.
///
namespace {
class SelectionDAGLegalize {
const TargetMachine &TM;
const TargetLowering &TLI;
SelectionDAG &DAG;
/// \brief The set of nodes which have already been legalized. We hold a
/// reference to it in order to update as necessary on node deletion.
SmallPtrSetImpl<SDNode *> &LegalizedNodes;
/// \brief A set of all the nodes updated during legalization.
SmallSetVector<SDNode *, 16> *UpdatedNodes;
EVT getSetCCResultType(EVT VT) const {
return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
// Libcall insertion helpers.
public:
SelectionDAGLegalize(SelectionDAG &DAG,
SmallPtrSetImpl<SDNode *> &LegalizedNodes,
SmallSetVector<SDNode *, 16> *UpdatedNodes = nullptr)
: TM(DAG.getTarget()), TLI(DAG.getTargetLoweringInfo()), DAG(DAG),
LegalizedNodes(LegalizedNodes), UpdatedNodes(UpdatedNodes) {}
/// \brief Legalizes the given operation.
void LegalizeOp(SDNode *Node);
private:
SDValue OptimizeFloatStore(StoreSDNode *ST);
void LegalizeLoadOps(SDNode *Node);
void LegalizeStoreOps(SDNode *Node);
/// Some targets cannot handle a variable
/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
/// is necessary to spill the vector being inserted into to memory, perform
/// the insert there, and then read the result back.
SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val,
SDValue Idx, SDLoc dl);
SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
SDValue Idx, SDLoc dl);
/// Return a vector shuffle operation which
/// performs the same shuffe in terms of order or result bytes, but on a type
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl,
SDValue N1, SDValue N2,
ArrayRef<int> Mask) const;
bool LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC,
bool &NeedInvert, SDLoc dl);
SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops,
unsigned NumOps, bool isSigned, SDLoc dl);
std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node, bool isSigned);
SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_F128,
RTLIB::Libcall Call_PPCF128);
SDValue ExpandIntLibCall(SDNode *Node, bool isSigned,
RTLIB::Libcall Call_I8,
RTLIB::Libcall Call_I16,
RTLIB::Libcall Call_I32,
RTLIB::Libcall Call_I64,
RTLIB::Libcall Call_I128);
void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
void ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, SDLoc dl);
SDValue ExpandBUILD_VECTOR(SDNode *Node);
SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node);
void ExpandDYNAMIC_STACKALLOC(SDNode *Node,
SmallVectorImpl<SDValue> &Results);
SDValue ExpandFCOPYSIGN(SDNode *Node);
SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT,
SDLoc dl);
SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned,
SDLoc dl);
SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned,
SDLoc dl);
SDValue ExpandBSWAP(SDValue Op, SDLoc dl);
SDValue ExpandBitCount(unsigned Opc, SDValue Op, SDLoc dl);
SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
SDValue ExpandInsertToVectorThroughStack(SDValue Op);
SDValue ExpandVectorBuildThroughStack(SDNode* Node);
SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP);
std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
void ExpandNode(SDNode *Node);
void PromoteNode(SDNode *Node);
public:
// Node replacement helpers
void ReplacedNode(SDNode *N) {
LegalizedNodes.erase(N);
if (UpdatedNodes)
UpdatedNodes->insert(N);
}
void ReplaceNode(SDNode *Old, SDNode *New) {
DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
dbgs() << " with: "; New->dump(&DAG));
assert(Old->getNumValues() == New->getNumValues() &&
"Replacing one node with another that produces a different number "
"of values!");
DAG.ReplaceAllUsesWith(Old, New);
for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i)
DAG.TransferDbgValues(SDValue(Old, i), SDValue(New, i));
if (UpdatedNodes)
UpdatedNodes->insert(New);
ReplacedNode(Old);
}
void ReplaceNode(SDValue Old, SDValue New) {
DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
dbgs() << " with: "; New->dump(&DAG));
DAG.ReplaceAllUsesWith(Old, New);
DAG.TransferDbgValues(Old, New);
if (UpdatedNodes)
UpdatedNodes->insert(New.getNode());
ReplacedNode(Old.getNode());
}
void ReplaceNode(SDNode *Old, const SDValue *New) {
DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG));
DAG.ReplaceAllUsesWith(Old, New);
for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) {
DEBUG(dbgs() << (i == 0 ? " with: "
: " and: ");
New[i]->dump(&DAG));
DAG.TransferDbgValues(SDValue(Old, i), New[i]);
if (UpdatedNodes)
UpdatedNodes->insert(New[i].getNode());
}
ReplacedNode(Old);
}
};
}
/// Return a vector shuffle operation which
/// performs the same shuffe in terms of order or result bytes, but on a type
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue
SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl,
SDValue N1, SDValue N2,
ArrayRef<int> Mask) const {
unsigned NumMaskElts = VT.getVectorNumElements();
unsigned NumDestElts = NVT.getVectorNumElements();
unsigned NumEltsGrowth = NumDestElts / NumMaskElts;
assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!");
if (NumEltsGrowth == 1)
return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]);
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumMaskElts; ++i) {
int Idx = Mask[i];
for (unsigned j = 0; j != NumEltsGrowth; ++j) {
if (Idx < 0)
NewMask.push_back(-1);
else
NewMask.push_back(Idx * NumEltsGrowth + j);
}
}
assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?");
assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?");
return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]);
}
/// Expands the ConstantFP node to an integer constant or
/// a load from the constant pool.
SDValue
SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) {
bool Extend = false;
SDLoc dl(CFP);
// If a FP immediate is precise when represented as a float and if the
// target can do an extending load from float to double, we put it into
// the constant pool as a float, even if it's is statically typed as a
// double. This shrinks FP constants and canonicalizes them for targets where
// an FP extending load is the same cost as a normal load (such as on the x87
// fp stack or PPC FP unit).
EVT VT = CFP->getValueType(0);
ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue());
if (!UseCP) {
assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion");
return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), dl,
(VT == MVT::f64) ? MVT::i64 : MVT::i32);
}
EVT OrigVT = VT;
EVT SVT = VT;
while (SVT != MVT::f32 && SVT != MVT::f16) {
SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
// smaller type.
TLI.isLoadExtLegal(ISD::EXTLOAD, OrigVT, SVT) &&
TLI.ShouldShrinkFPConstant(OrigVT)) {
Type *SType = SVT.getTypeForEVT(*DAG.getContext());
LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
VT = SVT;
Extend = true;
}
}
SDValue CPIdx =
DAG.getConstantPool(LLVMC, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
if (Extend) {
SDValue Result =
DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT,
DAG.getEntryNode(),
CPIdx, MachinePointerInfo::getConstantPool(),
VT, false, false, false, Alignment);
return Result;
}
SDValue Result =
DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(), false, false, false,
Alignment);
return Result;
}
/// Expands an unaligned store to 2 half-size stores.
static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
const TargetLowering &TLI,
SelectionDAGLegalize *DAGLegalize) {
assert(ST->getAddressingMode() == ISD::UNINDEXED &&
"unaligned indexed stores not implemented!");
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
SDValue Val = ST->getValue();
EVT VT = Val.getValueType();
int Alignment = ST->getAlignment();
unsigned AS = ST->getAddressSpace();
SDLoc dl(ST);
if (ST->getMemoryVT().isFloatingPoint() ||
ST->getMemoryVT().isVector()) {
EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
if (TLI.isTypeLegal(intVT)) {
// Expand to a bitconvert of the value to the integer type of the
// same size, then a (misaligned) int store.
// FIXME: Does not handle truncating floating point stores!
SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
ST->isVolatile(), ST->isNonTemporal(), Alignment);
DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
return;
}
// Do a (aligned) store to a stack slot, then copy from the stack slot
// to the final destination using (unaligned) integer loads and stores.
EVT StoredVT = ST->getMemoryVT();
MVT RegVT =
TLI.getRegisterType(*DAG.getContext(),
EVT::getIntegerVT(*DAG.getContext(),
StoredVT.getSizeInBits()));
unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
unsigned RegBytes = RegVT.getSizeInBits() / 8;
unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
// Make sure the stack slot is also aligned for the register type.
SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
// Perform the original store, only redirected to the stack slot.
SDValue Store = DAG.getTruncStore(Chain, dl,
Val, StackPtr, MachinePointerInfo(),
StoredVT, false, false, 0);
SDValue Increment = DAG.getConstant(
RegBytes, dl, TLI.getPointerTy(DAG.getDataLayout(), AS));
SmallVector<SDValue, 8> Stores;
unsigned Offset = 0;
// Do all but one copies using the full register width.
for (unsigned i = 1; i < NumRegs; i++) {
// Load one integer register's worth from the stack slot.
SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr,
MachinePointerInfo(),
false, false, false, 0);
// Store it to the final location. Remember the store.
Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
ST->getPointerInfo().getWithOffset(Offset),
ST->isVolatile(), ST->isNonTemporal(),
MinAlign(ST->getAlignment(), Offset)));
// Increment the pointers.
Offset += RegBytes;
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
Increment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
}
// The last store may be partial. Do a truncating store. On big-endian
// machines this requires an extending load from the stack slot to ensure
// that the bits are in the right place.
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
8 * (StoredBytes - Offset));
// Load from the stack slot.
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
MachinePointerInfo(),
MemVT, false, false, false, 0);
Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
ST->getPointerInfo()
.getWithOffset(Offset),
MemVT, ST->isVolatile(),
ST->isNonTemporal(),
MinAlign(ST->getAlignment(), Offset),
ST->getAAInfo()));
// The order of the stores doesn't matter - say it with a TokenFactor.
SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
return;
}
assert(ST->getMemoryVT().isInteger() &&
!ST->getMemoryVT().isVector() &&
"Unaligned store of unknown type.");
// Get the half-size VT
EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
int NumBits = NewStoredVT.getSizeInBits();
int IncrementSize = NumBits / 8;
// Divide the stored value in two parts.
SDValue ShiftAmount =
DAG.getConstant(NumBits, dl, TLI.getShiftAmountTy(Val.getValueType(),
DAG.getDataLayout()));
SDValue Lo = Val;
SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
// Store the two parts
SDValue Store1, Store2;
Store1 = DAG.getTruncStore(Chain, dl,
DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
Ptr, ST->getPointerInfo(), NewStoredVT,
ST->isVolatile(), ST->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl,
TLI.getPointerTy(DAG.getDataLayout(), AS)));
Alignment = MinAlign(Alignment, IncrementSize);
Store2 = DAG.getTruncStore(
Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT,
ST->isVolatile(), ST->isNonTemporal(), Alignment, ST->getAAInfo());
SDValue Result =
DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
}
/// Expands an unaligned load to 2 half-size loads.
static void
ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
const TargetLowering &TLI,
SDValue &ValResult, SDValue &ChainResult) {
assert(LD->getAddressingMode() == ISD::UNINDEXED &&
"unaligned indexed loads not implemented!");
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
EVT VT = LD->getValueType(0);
EVT LoadedVT = LD->getMemoryVT();
SDLoc dl(LD);
if (VT.isFloatingPoint() || VT.isVector()) {
EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
if (TLI.isTypeLegal(intVT) && TLI.isTypeLegal(LoadedVT)) {
// Expand to a (misaligned) integer load of the same size,
// then bitconvert to floating point or vector.
SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
LD->getMemOperand());
SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
if (LoadedVT != VT)
Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
ISD::ANY_EXTEND, dl, VT, Result);
ValResult = Result;
ChainResult = Chain;
return;
}
// Copy the value to a (aligned) stack slot using (unaligned) integer
// loads and stores, then do a (aligned) load from the stack slot.
MVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
unsigned RegBytes = RegVT.getSizeInBits() / 8;
unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
// Make sure the stack slot is also aligned for the register type.
SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
SDValue Increment =
DAG.getConstant(RegBytes, dl, TLI.getPointerTy(DAG.getDataLayout()));
SmallVector<SDValue, 8> Stores;
SDValue StackPtr = StackBase;
unsigned Offset = 0;
// Do all but one copies using the full register width.
for (unsigned i = 1; i < NumRegs; i++) {
// Load one integer register's worth from the original location.
SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr,
LD->getPointerInfo().getWithOffset(Offset),
LD->isVolatile(), LD->isNonTemporal(),
LD->isInvariant(),
MinAlign(LD->getAlignment(), Offset),
LD->getAAInfo());
// Follow the load with a store to the stack slot. Remember the store.
Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
MachinePointerInfo(), false, false, 0));
// Increment the pointers.
Offset += RegBytes;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
Increment);
}
// The last copy may be partial. Do an extending load.
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
8 * (LoadedBytes - Offset));
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(Offset),
MemVT, LD->isVolatile(),
LD->isNonTemporal(),
LD->isInvariant(),
MinAlign(LD->getAlignment(), Offset),
LD->getAAInfo());
// Follow the load with a store to the stack slot. Remember the store.
// On big-endian machines this requires a truncating store to ensure
// that the bits end up in the right place.
Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
MachinePointerInfo(), MemVT,
false, false, 0));
// The order of the stores doesn't matter - say it with a TokenFactor.
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
// Finally, perform the original load only redirected to the stack slot.
Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
MachinePointerInfo(), LoadedVT, false,false, false,
0);
// Callers expect a MERGE_VALUES node.
ValResult = Load;
ChainResult = TF;
return;
}
assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
"Unaligned load of unsupported type.");
// Compute the new VT that is half the size of the old one. This is an
// integer MVT.
unsigned NumBits = LoadedVT.getSizeInBits();
EVT NewLoadedVT;
NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
NumBits >>= 1;
unsigned Alignment = LD->getAlignment();
unsigned IncrementSize = NumBits / 8;
ISD::LoadExtType HiExtType = LD->getExtensionType();
// If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
if (HiExtType == ISD::NON_EXTLOAD)
HiExtType = ISD::ZEXTLOAD;
// Load the value in two parts
SDValue Lo, Hi;
if (DAG.getDataLayout().isLittleEndian()) {
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(), Alignment,
LD->getAAInfo());
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(),LD->isInvariant(),
MinAlign(Alignment, IncrementSize), LD->getAAInfo());
} else {
Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(), Alignment,
LD->getAAInfo());
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(),
MinAlign(Alignment, IncrementSize), LD->getAAInfo());
}
// aggregate the two parts
SDValue ShiftAmount =
DAG.getConstant(NumBits, dl, TLI.getShiftAmountTy(Hi.getValueType(),
DAG.getDataLayout()));
SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
ValResult = Result;
ChainResult = TF;
}
/// Some target cannot handle a variable insertion index for the
/// INSERT_VECTOR_ELT instruction. In this case, it
/// is necessary to spill the vector being inserted into to memory, perform
/// the insert there, and then read the result back.
SDValue SelectionDAGLegalize::
PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
SDLoc dl) {
SDValue Tmp1 = Vec;
SDValue Tmp2 = Val;
SDValue Tmp3 = Idx;
// If the target doesn't support this, we have to spill the input vector
// to a temporary stack slot, update the element, then reload it. This is
// badness. We could also load the value into a vector register (either
// with a "move to register" or "extload into register" instruction, then
// permute it into place, if the idx is a constant and if the idx is
// supported by the target.
EVT VT = Tmp1.getValueType();
EVT EltVT = VT.getVectorElementType();
EVT IdxVT = Tmp3.getValueType();
EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
SDValue StackPtr = DAG.CreateStackTemporary(VT);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
// Store the vector.
SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr,
MachinePointerInfo::getFixedStack(SPFI),
false, false, 0);
// Truncate or zero extend offset to target pointer type.
unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3);
// Add the offset to the index.
unsigned EltSize = EltVT.getSizeInBits()/8;
Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,
DAG.getConstant(EltSize, dl, IdxVT));
SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr);
// Store the scalar value.
Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT,
false, false, 0);
// Load the updated vector.
return DAG.getLoad(VT, dl, Ch, StackPtr,
MachinePointerInfo::getFixedStack(SPFI), false, false,
false, 0);
}
SDValue SelectionDAGLegalize::
ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, SDLoc dl) {
if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) {
// SCALAR_TO_VECTOR requires that the type of the value being inserted
// match the element type of the vector being created, except for
// integers in which case the inserted value can be over width.
EVT EltVT = Vec.getValueType().getVectorElementType();
if (Val.getValueType() == EltVT ||
(EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) {
SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
Vec.getValueType(), Val);
unsigned NumElts = Vec.getValueType().getVectorNumElements();
// We generate a shuffle of InVec and ScVec, so the shuffle mask
// should be 0,1,2,3,4,5... with the appropriate element replaced with
// elt 0 of the RHS.
SmallVector<int, 8> ShufOps;
for (unsigned i = 0; i != NumElts; ++i)
ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts);
return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec,
&ShufOps[0]);
}
}
return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl);
}
SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
// Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
// FIXME: We shouldn't do this for TargetConstantFP's.
// FIXME: move this to the DAG Combiner! Note that we can't regress due
// to phase ordering between legalized code and the dag combiner. This
// probably means that we need to integrate dag combiner and legalizer
// together.
// We generally can't do this one for long doubles.
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
AAMDNodes AAInfo = ST->getAAInfo();
SDLoc dl(ST);
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) {
if (CFP->getValueType(0) == MVT::f32 &&
TLI.isTypeLegal(MVT::i32)) {
SDValue Con = DAG.getConstant(CFP->getValueAPF().
bitcastToAPInt().zextOrTrunc(32),
SDLoc(CFP), MVT::i32);
return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
isVolatile, isNonTemporal, Alignment, AAInfo);
}
if (CFP->getValueType(0) == MVT::f64) {
// If this target supports 64-bit registers, do a single 64-bit store.
if (TLI.isTypeLegal(MVT::i64)) {
SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
zextOrTrunc(64), SDLoc(CFP), MVT::i64);
return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
isVolatile, isNonTemporal, Alignment, AAInfo);
}
if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) {
// Otherwise, if the target supports 32-bit registers, use 2 32-bit
// stores. If the target supports neither 32- nor 64-bits, this
// xform is certainly not worth it.
const APInt &IntVal = CFP->getValueAPF().bitcastToAPInt();
SDValue Lo = DAG.getConstant(IntVal.trunc(32), dl, MVT::i32);
SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), dl, MVT::i32);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile,
isNonTemporal, Alignment, AAInfo);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(4, dl, Ptr.getValueType()));
Hi = DAG.getStore(Chain, dl, Hi, Ptr,
ST->getPointerInfo().getWithOffset(4),
isVolatile, isNonTemporal, MinAlign(Alignment, 4U),
AAInfo);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
}
}
return SDValue(nullptr, 0);
}
void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
StoreSDNode *ST = cast<StoreSDNode>(Node);
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
SDLoc dl(Node);
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
AAMDNodes AAInfo = ST->getAAInfo();
if (!ST->isTruncatingStore()) {
if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
ReplaceNode(ST, OptStore);
return;
}
{
SDValue Value = ST->getValue();
MVT VT = Value.getSimpleValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal: {
// If this is an unaligned store and the target doesn't support it,
// expand it.
unsigned AS = ST->getAddressSpace();
unsigned Align = ST->getAlignment();
if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
if (Align < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
break;
}
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res && Res != SDValue(Node, 0))
ReplaceNode(SDValue(Node, 0), Res);
return;
}
case TargetLowering::Promote: {
MVT NVT = TLI.getTypeToPromoteTo(ISD::STORE, VT);
assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
"Can only promote stores to same size type");
Value = DAG.getNode(ISD::BITCAST, dl, NVT, Value);
SDValue Result =
DAG.getStore(Chain, dl, Value, Ptr,
ST->getPointerInfo(), isVolatile,
isNonTemporal, Alignment, AAInfo);
ReplaceNode(SDValue(Node, 0), Result);
break;
}
}
return;
}
} else {
SDValue Value = ST->getValue();
EVT StVT = ST->getMemoryVT();
unsigned StWidth = StVT.getSizeInBits();
auto &DL = DAG.getDataLayout();
if (StWidth != StVT.getStoreSizeInBits()) {
// Promote to a byte-sized store with upper bits zero if not
// storing an integral number of bytes. For example, promote
// TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
StVT.getStoreSizeInBits());
Value = DAG.getZeroExtendInReg(Value, dl, StVT);
SDValue Result =
DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
NVT, isVolatile, isNonTemporal, Alignment, AAInfo);
ReplaceNode(SDValue(Node, 0), Result);
} else if (StWidth & (StWidth - 1)) {
// If not storing a power-of-2 number of bits, expand as two stores.
assert(!StVT.isVector() && "Unsupported truncstore!");
unsigned RoundWidth = 1 << Log2_32(StWidth);
assert(RoundWidth < StWidth);
unsigned ExtraWidth = StWidth - RoundWidth;
assert(ExtraWidth < RoundWidth);
assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
"Store size not an integral number of bytes!");
EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
SDValue Lo, Hi;
unsigned IncrementSize;
if (DL.isLittleEndian()) {
// TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
// Store the bottom RoundWidth bits.
Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
RoundVT,
isVolatile, isNonTemporal, Alignment,
AAInfo);
// Store the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl,
Ptr.getValueType()));
Hi = DAG.getNode(
ISD::SRL, dl, Value.getValueType(), Value,
DAG.getConstant(RoundWidth, dl,
TLI.getShiftAmountTy(Value.getValueType(), DL)));
Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize), AAInfo);
} else {
// Big endian - avoid unaligned stores.
// TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
// Store the top RoundWidth bits.
Hi = DAG.getNode(
ISD::SRL, dl, Value.getValueType(), Value,
DAG.getConstant(ExtraWidth, dl,
TLI.getShiftAmountTy(Value.getValueType(), DL)));
Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(),
RoundVT, isVolatile, isNonTemporal, Alignment,
AAInfo);
// Store the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl,
Ptr.getValueType()));
Lo = DAG.getTruncStore(Chain, dl, Value, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize), AAInfo);
}
// The order of the stores doesn't matter.
SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
ReplaceNode(SDValue(Node, 0), Result);
} else {
switch (TLI.getTruncStoreAction(ST->getValue().getSimpleValueType(),
StVT.getSimpleVT())) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal: {
unsigned AS = ST->getAddressSpace();
unsigned Align = ST->getAlignment();
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = DL.getABITypeAlignment(Ty);
if (Align < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
break;
}
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res && Res != SDValue(Node, 0))
ReplaceNode(SDValue(Node, 0), Res);
return;
}
case TargetLowering::Expand:
assert(!StVT.isVector() &&
"Vector Stores are handled in LegalizeVectorOps");
// TRUNCSTORE:i16 i32 -> STORE i16
assert(TLI.isTypeLegal(StVT) &&
"Do not know how to expand this store!");
Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value);
SDValue Result =
DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
isVolatile, isNonTemporal, Alignment, AAInfo);
ReplaceNode(SDValue(Node, 0), Result);
break;
}
}
}
}
void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
LoadSDNode *LD = cast<LoadSDNode>(Node);
SDValue Chain = LD->getChain(); // The chain.
SDValue Ptr = LD->getBasePtr(); // The base pointer.
SDValue Value; // The value returned by the load op.
SDLoc dl(Node);
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD) {
MVT VT = Node->getSimpleValueType(0);
SDValue RVal = SDValue(Node, 0);
SDValue RChain = SDValue(Node, 1);
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal: {
unsigned AS = LD->getAddressSpace();
unsigned Align = LD->getAlignment();
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsMisalignedMemoryAccesses(LD->getMemoryVT(), AS, Align)) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
if (Align < ABIAlignment){
ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
}
}
break;
}
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(RVal, DAG);
if (Res.getNode()) {
RVal = Res;
RChain = Res.getValue(1);
}
break;
}
case TargetLowering::Promote: {
MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
"Can only promote loads to same size type");
SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getMemOperand());
RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res);
RChain = Res.getValue(1);
break;
}
}
if (RChain.getNode() != Node) {
assert(RVal.getNode() != Node && "Load must be completely replaced");
DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal);
DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain);
if (UpdatedNodes) {
UpdatedNodes->insert(RVal.getNode());
UpdatedNodes->insert(RChain.getNode());
}
ReplacedNode(Node);
}
return;
}
EVT SrcVT = LD->getMemoryVT();
unsigned SrcWidth = SrcVT.getSizeInBits();
unsigned Alignment = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
bool isInvariant = LD->isInvariant();
AAMDNodes AAInfo = LD->getAAInfo();
if (SrcWidth != SrcVT.getStoreSizeInBits() &&
// Some targets pretend to have an i1 loading operation, and actually
// load an i8. This trick is correct for ZEXTLOAD because the top 7
// bits are guaranteed to be zero; it helps the optimizers understand
// that these bits are zero. It is also useful for EXTLOAD, since it
// tells the optimizers that those bits are undefined. It would be
// nice to have an effective generic way of getting these benefits...
// Until such a way is found, don't insist on promoting i1 here.
(SrcVT != MVT::i1 ||
TLI.getLoadExtAction(ExtType, Node->getValueType(0), MVT::i1) ==
TargetLowering::Promote)) {
// Promote to a byte-sized load if not loading an integral number of
// bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
unsigned NewWidth = SrcVT.getStoreSizeInBits();
EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
SDValue Ch;
// The extra bits are guaranteed to be zero, since we stored them that
// way. A zext load from NVT thus automatically gives zext from SrcVT.
ISD::LoadExtType NewExtType =
ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
SDValue Result =
DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
Chain, Ptr, LD->getPointerInfo(),
NVT, isVolatile, isNonTemporal, isInvariant, Alignment,
AAInfo);
Ch = Result.getValue(1); // The chain.
if (ExtType == ISD::SEXTLOAD)
// Having the top bits zero doesn't help when sign extending.
Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
Result.getValueType(),
Result, DAG.getValueType(SrcVT));
else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
// All the top bits are guaranteed to be zero - inform the optimizers.
Result = DAG.getNode(ISD::AssertZext, dl,
Result.getValueType(), Result,
DAG.getValueType(SrcVT));
Value = Result;
Chain = Ch;
} else if (SrcWidth & (SrcWidth - 1)) {
// If not loading a power-of-2 number of bits, expand as two loads.
assert(!SrcVT.isVector() && "Unsupported extload!");
unsigned RoundWidth = 1 << Log2_32(SrcWidth);
assert(RoundWidth < SrcWidth);
unsigned ExtraWidth = SrcWidth - RoundWidth;
assert(ExtraWidth < RoundWidth);
assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
"Load size not an integral number of bytes!");
EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
SDValue Lo, Hi, Ch;
unsigned IncrementSize;
auto &DL = DAG.getDataLayout();
if (DL.isLittleEndian()) {
// EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
// Load the bottom RoundWidth bits.
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
Chain, Ptr,
LD->getPointerInfo(), RoundVT, isVolatile,
isNonTemporal, isInvariant, Alignment, AAInfo);
// Load the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl,
Ptr.getValueType()));
Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal, isInvariant,
MinAlign(Alignment, IncrementSize), AAInfo);
// Build a factor node to remember that this load is independent of
// the other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Move the top bits to the right place.
Hi = DAG.getNode(
ISD::SHL, dl, Hi.getValueType(), Hi,
DAG.getConstant(RoundWidth, dl,
TLI.getShiftAmountTy(Hi.getValueType(), DL)));
// Join the hi and lo parts.
Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
} else {
// Big endian - avoid unaligned loads.
// EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
// Load the top RoundWidth bits.
Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
LD->getPointerInfo(), RoundVT, isVolatile,
isNonTemporal, isInvariant, Alignment, AAInfo);
// Load the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl,
Ptr.getValueType()));
Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
dl, Node->getValueType(0), Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal, isInvariant,
MinAlign(Alignment, IncrementSize), AAInfo);
// Build a factor node to remember that this load is independent of
// the other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Move the top bits to the right place.
Hi = DAG.getNode(
ISD::SHL, dl, Hi.getValueType(), Hi,
DAG.getConstant(ExtraWidth, dl,
TLI.getShiftAmountTy(Hi.getValueType(), DL)));
// Join the hi and lo parts.
Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
}
Chain = Ch;
} else {
bool isCustom = false;
switch (TLI.getLoadExtAction(ExtType, Node->getValueType(0),
SrcVT.getSimpleVT())) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
// FALLTHROUGH
case TargetLowering::Legal: {
Value = SDValue(Node, 0);
Chain = SDValue(Node, 1);
if (isCustom) {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode()) {
Value = Res;
Chain = Res.getValue(1);
}
} else {
// If this is an unaligned load and the target doesn't support
// it, expand it.
EVT MemVT = LD->getMemoryVT();
unsigned AS = LD->getAddressSpace();
unsigned Align = LD->getAlignment();
if (!TLI.allowsMisalignedMemoryAccesses(MemVT, AS, Align)) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
if (Align < ABIAlignment){
ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, Value, Chain);
}
}
}
break;
}
case TargetLowering::Expand:
if (!TLI.isLoadExtLegal(ISD::EXTLOAD, Node->getValueType(0), SrcVT)) {
// If the source type is not legal, see if there is a legal extload to
// an intermediate type that we can then extend further.
EVT LoadVT = TLI.getRegisterType(SrcVT.getSimpleVT());
if (TLI.isTypeLegal(SrcVT) || // Same as SrcVT == LoadVT?
TLI.isLoadExtLegal(ExtType, LoadVT, SrcVT)) {
// If we are loading a legal type, this is a non-extload followed by a
// full extend.
ISD::LoadExtType MidExtType =
(LoadVT == SrcVT) ? ISD::NON_EXTLOAD : ExtType;
SDValue Load = DAG.getExtLoad(MidExtType, dl, LoadVT, Chain, Ptr,
SrcVT, LD->getMemOperand());
unsigned ExtendOp =
ISD::getExtForLoadExtType(SrcVT.isFloatingPoint(), ExtType);
Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
Chain = Load.getValue(1);
break;
}
}
assert(!SrcVT.isVector() &&
"Vector Loads are handled in LegalizeVectorOps");
// FIXME: This does not work for vectors on most targets. Sign-
// and zero-extend operations are currently folded into extending
// loads, whether they are legal or not, and then we end up here
// without any support for legalizing them.
assert(ExtType != ISD::EXTLOAD &&
"EXTLOAD should always be supported!");
// Turn the unsupported load into an EXTLOAD followed by an
// explicit zero/sign extend inreg.
SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl,
Node->getValueType(0),
Chain, Ptr, SrcVT,
LD->getMemOperand());
SDValue ValRes;
if (ExtType == ISD::SEXTLOAD)
ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
Result.getValueType(),
Result, DAG.getValueType(SrcVT));
else
ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
Value = ValRes;
Chain = Result.getValue(1);
break;
}
}
// Since loads produce two values, make sure to remember that we legalized
// both of them.
if (Chain.getNode() != Node) {
assert(Value.getNode() != Node && "Load must be completely replaced");
DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value);
DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
if (UpdatedNodes) {
UpdatedNodes->insert(Value.getNode());
UpdatedNodes->insert(Chain.getNode());
}
ReplacedNode(Node);
}
}
/// Return a legal replacement for the given operation, with all legal operands.
void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
DEBUG(dbgs() << "\nLegalizing: "; Node->dump(&DAG));
if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes.
return;
#ifndef NDEBUG
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) ==
TargetLowering::TypeLegal &&
"Unexpected illegal type!");
for (const SDValue &Op : Node->op_values())
assert((TLI.getTypeAction(*DAG.getContext(),
Op.getValueType()) == TargetLowering::TypeLegal ||
Op.getOpcode() == ISD::TargetConstant) &&
"Unexpected illegal type!");
#endif
// Figure out the correct action; the way to query this varies by opcode
TargetLowering::LegalizeAction Action = TargetLowering::Legal;
bool SimpleFinishLegalizing = true;
switch (Node->getOpcode()) {
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_VOID:
case ISD::STACKSAVE:
Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
break;
case ISD::VAARG:
Action = TLI.getOperationAction(Node->getOpcode(),
Node->getValueType(0));
if (Action != TargetLowering::Promote)
Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
break;
case ISD::FP_TO_FP16:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::EXTRACT_VECTOR_ELT:
Action = TLI.getOperationAction(Node->getOpcode(),
Node->getOperand(0).getValueType());
break;
case ISD::FP_ROUND_INREG:
case ISD::SIGN_EXTEND_INREG: {
EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT();
Action = TLI.getOperationAction(Node->getOpcode(), InnerType);
break;
}
case ISD::ATOMIC_STORE: {
Action = TLI.getOperationAction(Node->getOpcode(),
Node->getOperand(2).getValueType());
break;
}
case ISD::SELECT_CC:
case ISD::SETCC:
case ISD::BR_CC: {
unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 :
Node->getOpcode() == ISD::SETCC ? 2 : 1;
unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0;
MVT OpVT = Node->getOperand(CompareOperand).getSimpleValueType();
ISD::CondCode CCCode =
cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get();
Action = TLI.getCondCodeAction(CCCode, OpVT);
if (Action == TargetLowering::Legal) {
if (Node->getOpcode() == ISD::SELECT_CC)
Action = TLI.getOperationAction(Node->getOpcode(),
Node->getValueType(0));
else
Action = TLI.getOperationAction(Node->getOpcode(), OpVT);
}
break;
}
case ISD::LOAD:
case ISD::STORE:
// FIXME: Model these properly. LOAD and STORE are complicated, and
// STORE expects the unlegalized operand in some cases.
SimpleFinishLegalizing = false;
break;
case ISD::CALLSEQ_START:
case ISD::CALLSEQ_END:
// FIXME: This shouldn't be necessary. These nodes have special properties
// dealing with the recursive nature of legalization. Removing this
// special case should be done as part of making LegalizeDAG non-recursive.
SimpleFinishLegalizing = false;
break;
case ISD::EXTRACT_ELEMENT:
case ISD::FLT_ROUNDS_:
case ISD::FPOWI:
case ISD::MERGE_VALUES:
case ISD::EH_RETURN:
case ISD::FRAME_TO_ARGS_OFFSET:
case ISD::EH_SJLJ_SETJMP:
case ISD::EH_SJLJ_LONGJMP:
// These operations lie about being legal: when they claim to be legal,
// they should actually be expanded.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
if (Action == TargetLowering::Legal)
Action = TargetLowering::Expand;
break;
case ISD::INIT_TRAMPOLINE:
case ISD::ADJUST_TRAMPOLINE:
case ISD::FRAMEADDR:
case ISD::RETURNADDR:
// These operations lie about being legal: when they claim to be legal,
// they should actually be custom-lowered.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
if (Action == TargetLowering::Legal)
Action = TargetLowering::Custom;
break;
case ISD::READ_REGISTER:
case ISD::WRITE_REGISTER:
// Named register is legal in the DAG, but blocked by register name
// selection if not implemented by target (to chose the correct register)
// They'll be converted to Copy(To/From)Reg.
Action = TargetLowering::Legal;
break;
case ISD::DEBUGTRAP:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
if (Action == TargetLowering::Expand) {
// replace ISD::DEBUGTRAP with ISD::TRAP
SDValue NewVal;
NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(),
Node->getOperand(0));
ReplaceNode(Node, NewVal.getNode());
LegalizeOp(NewVal.getNode());
return;
}
break;
default:
if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
Action = TargetLowering::Legal;
} else {
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
}
break;
}
if (SimpleFinishLegalizing) {
SDNode *NewNode = Node;
switch (Node->getOpcode()) {
default: break;
case ISD::SHL:
case ISD::SRL:
case ISD::SRA:
case ISD::ROTL:
case ISD::ROTR:
// Legalizing shifts/rotates requires adjusting the shift amount
// to the appropriate width.
if (!Node->getOperand(1).getValueType().isVector()) {
SDValue SAO =
DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
Node->getOperand(1));
HandleSDNode Handle(SAO);
LegalizeOp(SAO.getNode());
NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
Handle.getValue());
}
break;
case ISD::SRL_PARTS:
case ISD::SRA_PARTS:
case ISD::SHL_PARTS:
// Legalizing shifts/rotates requires adjusting the shift amount
// to the appropriate width.
if (!Node->getOperand(2).getValueType().isVector()) {
SDValue SAO =
DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
Node->getOperand(2));
HandleSDNode Handle(SAO);
LegalizeOp(SAO.getNode());
NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
Node->getOperand(1),
Handle.getValue());
}
break;
}
if (NewNode != Node) {
ReplaceNode(Node, NewNode);
Node = NewNode;
}
switch (Action) {
case TargetLowering::Legal:
return;
case TargetLowering::Custom: {
// FIXME: The handling for custom lowering with multiple results is
// a complete mess.
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode()) {
if (!(Res.getNode() != Node || Res.getResNo() != 0))
return;
if (Node->getNumValues() == 1) {
// We can just directly replace this node with the lowered value.
ReplaceNode(SDValue(Node, 0), Res);
return;
}
SmallVector<SDValue, 8> ResultVals;
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
ResultVals.push_back(Res.getValue(i));
ReplaceNode(Node, ResultVals.data());
return;
}
}
// FALL THROUGH
case TargetLowering::Expand:
ExpandNode(Node);
return;
case TargetLowering::Promote:
PromoteNode(Node);
return;
}
}
switch (Node->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "NODE: ";
Node->dump( &DAG);
dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to legalize this operator!");
case ISD::CALLSEQ_START:
case ISD::CALLSEQ_END:
break;
case ISD::LOAD: {
return LegalizeLoadOps(Node);
}
case ISD::STORE: {
return LegalizeStoreOps(Node);
}
}
}
SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
SDValue Vec = Op.getOperand(0);
SDValue Idx = Op.getOperand(1);
SDLoc dl(Op);
// Before we generate a new store to a temporary stack slot, see if there is
// already one that we can use. There often is because when we scalarize
// vector operations (using SelectionDAG::UnrollVectorOp for example) a whole
// series of EXTRACT_VECTOR_ELT nodes are generated, one for each element in
// the vector. If all are expanded here, we don't want one store per vector
// element.
SDValue StackPtr, Ch;
for (SDNode::use_iterator UI = Vec.getNode()->use_begin(),
UE = Vec.getNode()->use_end(); UI != UE; ++UI) {
SDNode *User = *UI;
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(User)) {
if (ST->isIndexed() || ST->isTruncatingStore() ||
ST->getValue() != Vec)
continue;
// Make sure that nothing else could have stored into the destination of
// this store.
if (!ST->getChain().reachesChainWithoutSideEffects(DAG.getEntryNode()))
continue;
StackPtr = ST->getBasePtr();
Ch = SDValue(ST, 0);
break;
}
}
if (!Ch.getNode()) {
// Store the value to a temporary stack slot, then LOAD the returned part.
StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
MachinePointerInfo(), false, false, 0);
}
// Add the offset to the index.
unsigned EltSize =
Vec.getValueType().getVectorElementType().getSizeInBits()/8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy(DAG.getDataLayout()));
StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr);
SDValue NewLoad;
if (Op.getValueType().isVector())
NewLoad = DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,
MachinePointerInfo(), false, false, false, 0);
else
NewLoad = DAG.getExtLoad(
ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, MachinePointerInfo(),
Vec.getValueType().getVectorElementType(), false, false, false, 0);
// Replace the chain going out of the store, by the one out of the load.
DAG.ReplaceAllUsesOfValueWith(Ch, SDValue(NewLoad.getNode(), 1));
// We introduced a cycle though, so update the loads operands, making sure
// to use the original store's chain as an incoming chain.
SmallVector<SDValue, 6> NewLoadOperands(NewLoad->op_begin(),
NewLoad->op_end());
NewLoadOperands[0] = Ch;
NewLoad =
SDValue(DAG.UpdateNodeOperands(NewLoad.getNode(), NewLoadOperands), 0);
return NewLoad;
}
SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
assert(Op.getValueType().isVector() && "Non-vector insert subvector!");
SDValue Vec = Op.getOperand(0);
SDValue Part = Op.getOperand(1);
SDValue Idx = Op.getOperand(2);
SDLoc dl(Op);
// Store the value to a temporary stack slot, then LOAD the returned part.
SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
// First store the whole vector.
SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
false, false, 0);
// Then store the inserted part.
// Add the offset to the index.
unsigned EltSize =
Vec.getValueType().getVectorElementType().getSizeInBits()/8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy(DAG.getDataLayout()));
SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
StackPtr);
// Store the subvector.
Ch = DAG.getStore(Ch, dl, Part, SubStackPtr,
MachinePointerInfo(), false, false, 0);
// Finally, load the updated vector.
return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo,
false, false, false, 0);
}
SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
// We can't handle this case efficiently. Allocate a sufficiently
// aligned object on the stack, store each element into it, then load
// the result as a vector.
// Create the stack frame object.
EVT VT = Node->getValueType(0);
EVT EltVT = VT.getVectorElementType();
SDLoc dl(Node);
SDValue FIPtr = DAG.CreateStackTemporary(VT);
int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
// Emit a store of each element to the stack slot.
SmallVector<SDValue, 8> Stores;
unsigned TypeByteSize = EltVT.getSizeInBits() / 8;
// Store (in the right endianness) the elements to memory.
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
// Ignore undef elements.
if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue;
unsigned Offset = TypeByteSize*i;
SDValue Idx = DAG.getConstant(Offset, dl, FIPtr.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx);
// If the destination vector element type is narrower than the source
// element type, only store the bits necessary.
if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) {
Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
Node->getOperand(i), Idx,
PtrInfo.getWithOffset(Offset),
EltVT, false, false, 0));
} else
Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
Node->getOperand(i), Idx,
PtrInfo.getWithOffset(Offset),
false, false, 0));
}
SDValue StoreChain;
if (!Stores.empty()) // Not all undef elements?
StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
else
StoreChain = DAG.getEntryNode();
// Result is a load from the stack slot.
return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo,
false, false, false, 0);
}
SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
SDLoc dl(Node);
SDValue Tmp1 = Node->getOperand(0);
SDValue Tmp2 = Node->getOperand(1);
// Get the sign bit of the RHS. First obtain a value that has the same
// sign as the sign bit, i.e. negative if and only if the sign bit is 1.
SDValue SignBit;
EVT FloatVT = Tmp2.getValueType();
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits());
if (TLI.isTypeLegal(IVT)) {
// Convert to an integer with the same sign bit.
SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
} else {
auto &DL = DAG.getDataLayout();
// Store the float to memory, then load the sign part out as an integer.
MVT LoadTy = TLI.getPointerTy(DL);
// First create a temporary that is aligned for both the load and store.
SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy);
// Then store the float to it.
SDValue Ch =
DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(),
false, false, 0);
if (DL.isBigEndian()) {
assert(FloatVT.isByteSized() && "Unsupported floating point type!");
// Load out a legal integer with the same sign bit as the float.
SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(),
false, false, false, 0);
} else { // Little endian
SDValue LoadPtr = StackPtr;
// The float may be wider than the integer we are going to load. Advance
// the pointer so that the loaded integer will contain the sign bit.
unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits();
unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8;
LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), LoadPtr,
DAG.getConstant(ByteOffset, dl,
LoadPtr.getValueType()));
// Load a legal integer containing the sign bit.
SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(),
false, false, false, 0);
// Move the sign bit to the top bit of the loaded integer.
unsigned BitShift = LoadTy.getSizeInBits() -
(FloatVT.getSizeInBits() - 8 * ByteOffset);
assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?");
if (BitShift)
SignBit = DAG.getNode(
ISD::SHL, dl, LoadTy, SignBit,
DAG.getConstant(BitShift, dl,
TLI.getShiftAmountTy(SignBit.getValueType(), DL)));
}
}
// Now get the sign bit proper, by seeing whether the value is negative.
SignBit = DAG.getSetCC(dl, getSetCCResultType(SignBit.getValueType()),
SignBit,
DAG.getConstant(0, dl, SignBit.getValueType()),
ISD::SETLT);
// Get the absolute value of the result.
SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1);
// Select between the nabs and abs value based on the sign bit of
// the input.
return DAG.getSelect(dl, AbsVal.getValueType(), SignBit,
DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal),
AbsVal);
}
void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
SmallVectorImpl<SDValue> &Results) {
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
SDLoc dl(Node);
EVT VT = Node->getValueType(0);
SDValue Tmp1 = SDValue(Node, 0);
SDValue Tmp2 = SDValue(Node, 1);
SDValue Tmp3 = Node->getOperand(2);
SDValue Chain = Tmp1.getOperand(0);
// Chain the dynamic stack allocation so that it doesn't modify the stack
// pointer when other instructions are using the stack.
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, dl, true), dl);
SDValue Size = Tmp2.getOperand(1);
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
unsigned StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlignment();
Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
DAG.getConstant(-(uint64_t)Align, dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
Results.push_back(Tmp1);
Results.push_back(Tmp2);
}
/// Legalize a SETCC with given LHS and RHS and condition code CC on the current
/// target.
///
/// If the SETCC has been legalized using AND / OR, then the legalized node
/// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert
/// will be set to false.
///
/// If the SETCC has been legalized by using getSetCCSwappedOperands(),
/// then the values of LHS and RHS will be swapped, CC will be set to the
/// new condition, and NeedInvert will be set to false.
///
/// If the SETCC has been legalized using the inverse condcode, then LHS and
/// RHS will be unchanged, CC will set to the inverted condcode, and NeedInvert
/// will be set to true. The caller must invert the result of the SETCC with
/// SelectionDAG::getLogicalNOT() or take equivalent action to swap the effect
/// of a true/false result.
///
/// \returns true if the SetCC has been legalized, false if it hasn't.
bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
SDValue &LHS, SDValue &RHS,
SDValue &CC,
bool &NeedInvert,
SDLoc dl) {
MVT OpVT = LHS.getSimpleValueType();
ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
NeedInvert = false;
switch (TLI.getCondCodeAction(CCCode, OpVT)) {
default: llvm_unreachable("Unknown condition code action!");
case TargetLowering::Legal:
// Nothing to do.
break;
case TargetLowering::Expand: {
ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode);
if (TLI.isCondCodeLegal(InvCC, OpVT)) {
std::swap(LHS, RHS);
CC = DAG.getCondCode(InvCC);
return true;
}
ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID;
unsigned Opc = 0;
switch (CCCode) {
default: llvm_unreachable("Don't know how to expand this condition!");
case ISD::SETO:
assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT)
== TargetLowering::Legal
&& "If SETO is expanded, SETOEQ must be legal!");
CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break;
case ISD::SETUO:
assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT)
== TargetLowering::Legal
&& "If SETUO is expanded, SETUNE must be legal!");
CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break;
case ISD::SETOEQ:
case ISD::SETOGT:
case ISD::SETOGE:
case ISD::SETOLT:
case ISD::SETOLE:
case ISD::SETONE:
case ISD::SETUEQ:
case ISD::SETUNE:
case ISD::SETUGT:
case ISD::SETUGE:
case ISD::SETULT:
case ISD::SETULE:
// If we are floating point, assign and break, otherwise fall through.
if (!OpVT.isInteger()) {
// We can use the 4th bit to tell if we are the unordered
// or ordered version of the opcode.
CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO;
Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND;
CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10);
break;
}
// Fallthrough if we are unsigned integer.
case ISD::SETLE:
case ISD::SETGT:
case ISD::SETGE:
case ISD::SETLT:
// We only support using the inverted operation, which is computed above
// and not a different manner of supporting expanding these cases.
llvm_unreachable("Don't know how to expand this condition!");
case ISD::SETNE:
case ISD::SETEQ:
// Try inverting the result of the inverse condition.
InvCC = CCCode == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
if (TLI.isCondCodeLegal(InvCC, OpVT)) {
CC = DAG.getCondCode(InvCC);
NeedInvert = true;
return true;
}
// If inverting the condition didn't work then we have no means to expand
// the condition.
llvm_unreachable("Don't know how to expand this condition!");
}
SDValue SetCC1, SetCC2;
if (CCCode != ISD::SETO && CCCode != ISD::SETUO) {
// If we aren't the ordered or unorder operation,
// then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS).
SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1);
SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2);
} else {
// Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS)
SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1);
SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2);
}
LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2);
RHS = SDValue();
CC = SDValue();
return true;
}
}
return false;
}
/// Emit a store/load combination to the stack. This stores
/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
/// a load from the stack slot to DestVT, extending it if needed.
/// The resultant code need not be legal.
SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
EVT SlotVT,
EVT DestVT,
SDLoc dl) {
// Create the stack frame object.
unsigned SrcAlign = DAG.getDataLayout().getPrefTypeAlignment(
SrcOp.getValueType().getTypeForEVT(*DAG.getContext()));
SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
int SPFI = StackPtrFI->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits();
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
unsigned DestAlign = DAG.getDataLayout().getPrefTypeAlignment(DestType);
// Emit a store to the stack slot. Use a truncstore if the input value is
// later than DestVT.
SDValue Store;
if (SrcSize > SlotSize)
Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
PtrInfo, SlotVT, false, false, SrcAlign);
else {
assert(SrcSize == SlotSize && "Invalid store");
Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
PtrInfo, false, false, SrcAlign);
}
// Result is a load from the stack slot.
if (SlotSize == DestSize)
return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo,
false, false, false, DestAlign);
assert(SlotSize < DestSize && "Unknown extension!");
return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr,
PtrInfo, SlotVT, false, false, false, DestAlign);
}
SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
SDLoc dl(Node);
// Create a vector sized/aligned stack slot, store the value to element #0,
// then load the whole vector back out.
SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0));
FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr);
int SPFI = StackPtrFI->getIndex();
SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0),
StackPtr,
MachinePointerInfo::getFixedStack(SPFI),
Node->getValueType(0).getVectorElementType(),
false, false, 0);
return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr,
MachinePointerInfo::getFixedStack(SPFI),
false, false, false, 0);
}
static bool
ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG,
const TargetLowering &TLI, SDValue &Res) {
unsigned NumElems = Node->getNumOperands();
SDLoc dl(Node);
EVT VT = Node->getValueType(0);
// Try to group the scalars into pairs, shuffle the pairs together, then
// shuffle the pairs of pairs together, etc. until the vector has
// been built. This will work only if all of the necessary shuffle masks
// are legal.
// We do this in two phases; first to check the legality of the shuffles,
// and next, assuming that all shuffles are legal, to create the new nodes.
for (int Phase = 0; Phase < 2; ++Phase) {
SmallVector<std::pair<SDValue, SmallVector<int, 16> >, 16> IntermedVals,
NewIntermedVals;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue V = Node->getOperand(i);
if (V.getOpcode() == ISD::UNDEF)
continue;
SDValue Vec;
if (Phase)
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V);
IntermedVals.push_back(std::make_pair(Vec, SmallVector<int, 16>(1, i)));
}
while (IntermedVals.size() > 2) {
NewIntermedVals.clear();
for (unsigned i = 0, e = (IntermedVals.size() & ~1u); i < e; i += 2) {
// This vector and the next vector are shuffled together (simply to
// append the one to the other).
SmallVector<int, 16> ShuffleVec(NumElems, -1);
SmallVector<int, 16> FinalIndices;
FinalIndices.reserve(IntermedVals[i].second.size() +
IntermedVals[i+1].second.size());
int k = 0;
for (unsigned j = 0, f = IntermedVals[i].second.size(); j != f;
++j, ++k) {
ShuffleVec[k] = j;
FinalIndices.push_back(IntermedVals[i].second[j]);
}
for (unsigned j = 0, f = IntermedVals[i+1].second.size(); j != f;
++j, ++k) {
ShuffleVec[k] = NumElems + j;
FinalIndices.push_back(IntermedVals[i+1].second[j]);
}
SDValue Shuffle;
if (Phase)
Shuffle = DAG.getVectorShuffle(VT, dl, IntermedVals[i].first,
IntermedVals[i+1].first,
ShuffleVec.data());
else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
return false;
NewIntermedVals.push_back(
std::make_pair(Shuffle, std::move(FinalIndices)));
}
// If we had an odd number of defined values, then append the last
// element to the array of new vectors.
if ((IntermedVals.size() & 1) != 0)
NewIntermedVals.push_back(IntermedVals.back());
IntermedVals.swap(NewIntermedVals);
}
assert(IntermedVals.size() <= 2 && IntermedVals.size() > 0 &&
"Invalid number of intermediate vectors");
SDValue Vec1 = IntermedVals[0].first;
SDValue Vec2;
if (IntermedVals.size() > 1)
Vec2 = IntermedVals[1].first;
else if (Phase)
Vec2 = DAG.getUNDEF(VT);
SmallVector<int, 16> ShuffleVec(NumElems, -1);
for (unsigned i = 0, e = IntermedVals[0].second.size(); i != e; ++i)
ShuffleVec[IntermedVals[0].second[i]] = i;
for (unsigned i = 0, e = IntermedVals[1].second.size(); i != e; ++i)
ShuffleVec[IntermedVals[1].second[i]] = NumElems + i;
if (Phase)
Res = DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
return false;
}
return true;
}
/// Expand a BUILD_VECTOR node on targets that don't
/// support the operation, but do support the resultant vector type.
SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
unsigned NumElems = Node->getNumOperands();
SDValue Value1, Value2;
SDLoc dl(Node);
EVT VT = Node->getValueType(0);
EVT OpVT = Node->getOperand(0).getValueType();
EVT EltVT = VT.getVectorElementType();
// If the only non-undef value is the low element, turn this into a
// SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
bool isOnlyLowElement = true;
bool MoreThanTwoValues = false;
bool isConstant = true;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue V = Node->getOperand(i);
if (V.getOpcode() == ISD::UNDEF)
continue;
if (i > 0)
isOnlyLowElement = false;
if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
isConstant = false;
if (!Value1.getNode()) {
Value1 = V;
} else if (!Value2.getNode()) {
if (V != Value1)
Value2 = V;
} else if (V != Value1 && V != Value2) {
MoreThanTwoValues = true;
}
}
if (!Value1.getNode())
return DAG.getUNDEF(VT);
if (isOnlyLowElement)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0));
// If all elements are constants, create a load from the constant pool.
if (isConstant) {
SmallVector<Constant*, 16> CV;
for (unsigned i = 0, e = NumElems; i != e; ++i) {
if (ConstantFPSDNode *V =
dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) {
CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue()));
} else if (ConstantSDNode *V =
dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
if (OpVT==EltVT)
CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue()));
else {
// If OpVT and EltVT don't match, EltVT is not legal and the
// element values have been promoted/truncated earlier. Undo this;
// we don't want a v16i8 to become a v16i32 for example.
const ConstantInt *CI = V->getConstantIntValue();
CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()),
CI->getZExtValue()));
}
} else {
assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
CV.push_back(UndefValue::get(OpNTy));
}
}
Constant *CP = ConstantVector::get(CV);
SDValue CPIdx =
DAG.getConstantPool(CP, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
false, false, false, Alignment);
}
SmallSet<SDValue, 16> DefinedValues;
for (unsigned i = 0; i < NumElems; ++i) {
if (Node->getOperand(i).getOpcode() == ISD::UNDEF)
continue;
DefinedValues.insert(Node->getOperand(i));
}
if (TLI.shouldExpandBuildVectorWithShuffles(VT, DefinedValues.size())) {
if (!MoreThanTwoValues) {
SmallVector<int, 8> ShuffleVec(NumElems, -1);
for (unsigned i = 0; i < NumElems; ++i) {
SDValue V = Node->getOperand(i);
if (V.getOpcode() == ISD::UNDEF)
continue;
ShuffleVec[i] = V == Value1 ? 0 : NumElems;
}
if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) {
// Get the splatted value into the low element of a vector register.
SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1);
SDValue Vec2;
if (Value2.getNode())
Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2);
else
Vec2 = DAG.getUNDEF(VT);
// Return shuffle(LowValVec, undef, <0,0,0,0>)
return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
}
} else {
SDValue Res;
if (ExpandBVWithShuffles(Node, DAG, TLI, Res))
return Res;
}
}
// Otherwise, we can't handle this case efficiently.
return ExpandVectorBuildThroughStack(Node);
}
// Expand a node into a call to a libcall. If the result value
// does not fit into a register, return the lo part and set the hi part to the
// by-reg argument. If it does fit into a single register, return the result
// and leave the Hi part unset.
SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
bool isSigned) {
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (const SDValue &Op : Node->op_values()) {
EVT ArgVT = Op.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Op;
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
// By default, the input chain to this libcall is the entry node of the
// function. If the libcall is going to be emitted as a tail call then
// TLI.isUsedByReturnOnly will change it to the right chain if the return
// node which is being folded has a non-entry input chain.
SDValue InChain = DAG.getEntryNode();
// isTailCall may be true since the callee does not reference caller stack
// frame. Check if it's in the right position.
SDValue TCChain = InChain;
bool isTailCall = TLI.isInTailCallPosition(DAG, Node, TCChain);
if (isTailCall)
InChain = TCChain;
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
.setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
.setTailCall(isTailCall).setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
if (!CallInfo.second.getNode())
// It's a tailcall, return the chain (which is the DAG root).
return DAG.getRoot();
return CallInfo.first;
}
/// Generate a libcall taking the given operands as arguments
/// and returning a result of type RetVT.
SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
const SDValue *Ops, unsigned NumOps,
bool isSigned, SDLoc dl) {
TargetLowering::ArgListTy Args;
Args.reserve(NumOps);
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0; i != NumOps; ++i) {
Entry.Node = Ops[i];
Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
.setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
.setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo.first;
}
// Expand a node into a call to a libcall. Similar to
// ExpandLibCall except that the first operand is the in-chain.
std::pair<SDValue, SDValue>
SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node,
bool isSigned) {
SDValue InChain = Node->getOperand(0);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
.setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
.setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo;
}
SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64,
RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_F128,
RTLIB::Libcall Call_PPCF128) {
RTLIB::Libcall LC;
switch (Node->getSimpleValueType(0).SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case MVT::f32: LC = Call_F32; break;
case MVT::f64: LC = Call_F64; break;
case MVT::f80: LC = Call_F80; break;
case MVT::f128: LC = Call_F128; break;
case MVT::ppcf128: LC = Call_PPCF128; break;
}
return ExpandLibCall(LC, Node, false);
}
SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
RTLIB::Libcall Call_I8,
RTLIB::Libcall Call_I16,
RTLIB::Libcall Call_I32,
RTLIB::Libcall Call_I64,
RTLIB::Libcall Call_I128) {
RTLIB::Libcall LC;
switch (Node->getSimpleValueType(0).SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case MVT::i8: LC = Call_I8; break;
case MVT::i16: LC = Call_I16; break;
case MVT::i32: LC = Call_I32; break;
case MVT::i64: LC = Call_I64; break;
case MVT::i128: LC = Call_I128; break;
}
return ExpandLibCall(LC, Node, isSigned);
}
/// Return true if divmod libcall is available.
static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
const TargetLowering &TLI) {
RTLIB::Libcall LC;
switch (Node->getSimpleValueType(0).SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
}
return TLI.getLibcallName(LC) != nullptr;
}
/// Only issue divrem libcall if both quotient and remainder are needed.
static bool useDivRem(SDNode *Node, bool isSigned, bool isDIV) {
// The other use might have been replaced with a divrem already.
unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
unsigned OtherOpcode = 0;
if (isSigned)
OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV;
else
OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV;
SDValue Op0 = Node->getOperand(0);
SDValue Op1 = Node->getOperand(1);
for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
SDNode *User = *UI;
if (User == Node)
continue;
if ((User->getOpcode() == OtherOpcode || User->getOpcode() == DivRemOpc) &&
User->getOperand(0) == Op0 &&
User->getOperand(1) == Op1)
return true;
}
return false;
}
/// Issue libcalls to __{u}divmod to compute div / rem pairs.
void
SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
SmallVectorImpl<SDValue> &Results) {
unsigned Opcode = Node->getOpcode();
bool isSigned = Opcode == ISD::SDIVREM;
RTLIB::Libcall LC;
switch (Node->getSimpleValueType(0).SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
}
// The input chain to this libcall is the entry node of the function.
// Legalizing the call will automatically add the previous call to the
// dependence.
SDValue InChain = DAG.getEntryNode();
EVT RetVT = Node->getValueType(0);
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (const SDValue &Op : Node->op_values()) {
EVT ArgVT = Op.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Op;
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
Args.push_back(Entry);
}
// Also pass the return address of the remainder.
SDValue FIPtr = DAG.CreateStackTemporary(RetVT);
Entry.Node = FIPtr;
Entry.Ty = RetTy->getPointerTo();
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
Args.push_back(Entry);
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy(DAG.getDataLayout()));
SDLoc dl(Node);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(InChain)
.setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
.setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
// Remainder is loaded back from the stack frame.
SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr,
MachinePointerInfo(), false, false, false, 0);
Results.push_back(CallInfo.first);
Results.push_back(Rem);
}
/// Return true if sincos libcall is available.
static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) {
RTLIB::Libcall LC;
switch (Node->getSimpleValueType(0).SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case MVT::f32: LC = RTLIB::SINCOS_F32; break;
case MVT::f64: LC = RTLIB::SINCOS_F64; break;
case MVT::f80: LC = RTLIB::SINCOS_F80; break;
case MVT::f128: LC = RTLIB::SINCOS_F128; break;
case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
}
return TLI.getLibcallName(LC) != nullptr;
}
/// Return true if sincos libcall is available and can be used to combine sin
/// and cos.
static bool canCombineSinCosLibcall(SDNode *Node, const TargetLowering &TLI,
const TargetMachine &TM) {
if (!isSinCosLibcallAvailable(Node, TLI))
return false;
// GNU sin/cos functions set errno while sincos does not. Therefore
// combining sin and cos is only safe if unsafe-fpmath is enabled.
bool isGNU = Triple(TM.getTargetTriple()).getEnvironment() == Triple::GNU;
if (isGNU && !TM.Options.UnsafeFPMath)
return false;
return true;
}
/// Only issue sincos libcall if both sin and cos are needed.
static bool useSinCos(SDNode *Node) {
unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN
? ISD::FCOS : ISD::FSIN;
SDValue Op0 = Node->getOperand(0);
for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
SDNode *User = *UI;
if (User == Node)
continue;
// The other user might have been turned into sincos already.
if (User->getOpcode() == OtherOpcode || User->getOpcode() == ISD::FSINCOS)
return true;
}
return false;
}
/// Issue libcalls to sincos to compute sin / cos pairs.
void
SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
SmallVectorImpl<SDValue> &Results) {
RTLIB::Libcall LC;
switch (Node->getSimpleValueType(0).SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case MVT::f32: LC = RTLIB::SINCOS_F32; break;
case MVT::f64: LC = RTLIB::SINCOS_F64; break;
case MVT::f80: LC = RTLIB::SINCOS_F80; break;
case MVT::f128: LC = RTLIB::SINCOS_F128; break;
case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
}
// The input chain to this libcall is the entry node of the function.
// Legalizing the call will automatically add the previous call to the
// dependence.
SDValue InChain = DAG.getEntryNode();
EVT RetVT = Node->getValueType(0);
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
// Pass the argument.
Entry.Node = Node->getOperand(0);
Entry.Ty = RetTy;
Entry.isSExt = false;
Entry.isZExt = false;
Args.push_back(Entry);
// Pass the return address of sin.
SDValue SinPtr = DAG.CreateStackTemporary(RetVT);
Entry.Node = SinPtr;
Entry.Ty = RetTy->getPointerTo();
Entry.isSExt = false;
Entry.isZExt = false;
Args.push_back(Entry);
// Also pass the return address of the cos.
SDValue CosPtr = DAG.CreateStackTemporary(RetVT);
Entry.Node = CosPtr;
Entry.Ty = RetTy->getPointerTo();
Entry.isSExt = false;
Entry.isZExt = false;
Args.push_back(Entry);
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy(DAG.getDataLayout()));
SDLoc dl(Node);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(InChain)
.setCallee(TLI.getLibcallCallingConv(LC),
Type::getVoidTy(*DAG.getContext()), Callee, std::move(Args), 0);
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr,
MachinePointerInfo(), false, false, false, 0));
Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, CosPtr,
MachinePointerInfo(), false, false, false, 0));
}
/// This function is responsible for legalizing a
/// INT_TO_FP operation of the specified operand when the target requests that
/// we expand it. At this point, we know that the result and operand types are
/// legal for the target.
SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Op0,
EVT DestVT,
SDLoc dl) {
if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) {
// simple 32-bit [signed|unsigned] integer to float/double expansion
// Get the stack frame index of a 8 byte buffer.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64);
// word offset constant for Hi/Lo address computation
SDValue WordOff = DAG.getConstant(sizeof(int), dl,
StackSlot.getValueType());
// set up Hi and Lo (into buffer) address based on endian
SDValue Hi = StackSlot;
SDValue Lo = DAG.getNode(ISD::ADD, dl, StackSlot.getValueType(),
StackSlot, WordOff);
if (DAG.getDataLayout().isLittleEndian())
std::swap(Hi, Lo);
// if signed map to unsigned space
SDValue Op0Mapped;
if (isSigned) {
// constant used to invert sign bit (signed to unsigned mapping)
SDValue SignBit = DAG.getConstant(0x80000000u, dl, MVT::i32);
Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit);
} else {
Op0Mapped = Op0;
}
// store the lo of the constructed double - based on integer input
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl,
Op0Mapped, Lo, MachinePointerInfo(),
false, false, 0);
// initial hi portion of constructed double
SDValue InitialHi = DAG.getConstant(0x43300000u, dl, MVT::i32);
// store the hi of the constructed double - biased exponent
SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi,
MachinePointerInfo(),
false, false, 0);
// load the constructed double
SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot,
MachinePointerInfo(), false, false, false, 0);
// FP constant to bias correct the final result
SDValue Bias = DAG.getConstantFP(isSigned ?
BitsToDouble(0x4330000080000000ULL) :
BitsToDouble(0x4330000000000000ULL),
dl, MVT::f64);
// subtract the bias
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias);
// final result
SDValue Result;
// handle final rounding
if (DestVT == MVT::f64) {
// do nothing
Result = Sub;
} else if (DestVT.bitsLT(MVT::f64)) {
Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
DAG.getIntPtrConstant(0, dl));
} else if (DestVT.bitsGT(MVT::f64)) {
Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
}
return Result;
}
assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet");
// Code below here assumes !isSigned without checking again.
// Implementation of unsigned i64 to f64 following the algorithm in
// __floatundidf in compiler_rt. This implementation has the advantage
// of performing rounding correctly, both in the default rounding mode
// and in all alternate rounding modes.
// TODO: Generalize this for use with other types.
if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) {
SDValue TwoP52 =
DAG.getConstant(UINT64_C(0x4330000000000000), dl, MVT::i64);
SDValue TwoP84PlusTwoP52 =
DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), dl,
MVT::f64);
SDValue TwoP84 =
DAG.getConstant(UINT64_C(0x4530000000000000), dl, MVT::i64);
SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32);
SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0,
DAG.getConstant(32, dl, MVT::i64));
SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52);
SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr);
SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr);
SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
TwoP84PlusTwoP52);
return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
}
// Implementation of unsigned i64 to f32.
// TODO: Generalize this for use with other types.
if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) {
// For unsigned conversions, convert them to signed conversions using the
// algorithm from the x86_64 __floatundidf in compiler_rt.
if (!isSigned) {
SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
SDValue ShiftConst = DAG.getConstant(
1, dl, TLI.getShiftAmountTy(Op0.getValueType(), DAG.getDataLayout()));
SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
SDValue AndConst = DAG.getConstant(1, dl, MVT::i64);
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr);
SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or);
SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt);
// TODO: This really should be implemented using a branch rather than a
// select. We happen to get lucky and machinesink does the right
// thing most of the time. This would be a good candidate for a
//pseudo-op, or, even better, for whole-function isel.
SDValue SignBitTest = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
Op0, DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
return DAG.getSelect(dl, MVT::f32, SignBitTest, Slow, Fast);
}
// Otherwise, implement the fully general conversion.
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
DAG.getConstant(UINT64_C(0xfffffffffffff800), dl, MVT::i64));
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And,
DAG.getConstant(UINT64_C(0x800), dl, MVT::i64));
SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
DAG.getConstant(UINT64_C(0x7ff), dl, MVT::i64));
SDValue Ne = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), And2,
DAG.getConstant(UINT64_C(0), dl, MVT::i64),
ISD::SETNE);
SDValue Sel = DAG.getSelect(dl, MVT::i64, Ne, Or, Op0);
SDValue Ge = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), Op0,
DAG.getConstant(UINT64_C(0x0020000000000000), dl,
MVT::i64),
ISD::SETUGE);
SDValue Sel2 = DAG.getSelect(dl, MVT::i64, Ge, Sel, Op0);
EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType(), DAG.getDataLayout());
SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
DAG.getConstant(32, dl, SHVT));
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh);
SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc);
SDValue TwoP32 =
DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), dl,
MVT::f64);
SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt);
SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2);
SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo);
SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2);
return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd,
DAG.getIntPtrConstant(0, dl));
}
SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0);
SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(Op0.getValueType()),
Op0,
DAG.getConstant(0, dl, Op0.getValueType()),
ISD::SETLT);
SDValue Zero = DAG.getIntPtrConstant(0, dl),
Four = DAG.getIntPtrConstant(4, dl);
SDValue CstOffset = DAG.getSelect(dl, Zero.getValueType(),
SignSet, Four, Zero);
// If the sign bit of the integer is set, the large number will be treated
// as a negative number. To counteract this, the dynamic code adds an
// offset depending on the data type.
uint64_t FF;
switch (Op0.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unsupported integer type!");
case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
}
if (DAG.getDataLayout().isLittleEndian())
FF <<= 32;
Constant *FudgeFactor = ConstantInt::get(
Type::getInt64Ty(*DAG.getContext()), FF);
SDValue CPIdx =
DAG.getConstantPool(FudgeFactor, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
CPIdx = DAG.getNode(ISD::ADD, dl, CPIdx.getValueType(), CPIdx, CstOffset);
Alignment = std::min(Alignment, 4u);
SDValue FudgeInReg;
if (DestVT == MVT::f32)
FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
false, false, false, Alignment);
else {
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
MVT::f32, false, false, false, Alignment);
HandleSDNode Handle(Load);
LegalizeOp(Load.getNode());
FudgeInReg = Handle.getValue();
}
return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg);
}
/// This function is responsible for legalizing a
/// *INT_TO_FP operation of the specified operand when the target requests that
/// we promote it. At this point, we know that the result and operand types are
/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
/// operation that takes a larger input.
SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
EVT DestVT,
bool isSigned,
SDLoc dl) {
// First step, figure out the appropriate *INT_TO_FP operation to use.
EVT NewInTy = LegalOp.getValueType();
unsigned OpToUse = 0;
// Scan for the appropriate larger type to use.
while (1) {
NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1);
assert(NewInTy.isInteger() && "Ran out of possibilities!");
// If the target supports SINT_TO_FP of this type, use it.
if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) {
OpToUse = ISD::SINT_TO_FP;
break;
}
if (isSigned) continue;
// If the target supports UINT_TO_FP of this type, use it.
if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) {
OpToUse = ISD::UINT_TO_FP;
break;
}
// Otherwise, try a larger type.
}
// Okay, we found the operation and type to use. Zero extend our input to the
// desired type then run the operation on it.
return DAG.getNode(OpToUse, dl, DestVT,
DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
dl, NewInTy, LegalOp));
}
/// This function is responsible for legalizing a
/// FP_TO_*INT operation of the specified operand when the target requests that
/// we promote it. At this point, we know that the result and operand types are
/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
/// operation that returns a larger result.
SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
EVT DestVT,
bool isSigned,
SDLoc dl) {
// First step, figure out the appropriate FP_TO*INT operation to use.
EVT NewOutTy = DestVT;
unsigned OpToUse = 0;
// Scan for the appropriate larger type to use.
while (1) {
NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1);
assert(NewOutTy.isInteger() && "Ran out of possibilities!");
// A larger signed type can hold all unsigned values of the requested type,
// so using FP_TO_SINT is valid
if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) {
OpToUse = ISD::FP_TO_SINT;
break;
}
// However, if the value may be < 0.0, we *must* use some FP_TO_SINT.
if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) {
OpToUse = ISD::FP_TO_UINT;
break;
}
// Otherwise, try a larger type.
}
// Okay, we found the operation and type to use.
SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp);
// Truncate the result of the extended FP_TO_*INT operation to the desired
// size.
return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation);
}
/// Open code the operations for BSWAP of the specified operation.
SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, SDLoc dl) {
EVT VT = Op.getValueType();
EVT SHVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled Expand type in BSWAP!");
case MVT::i16:
Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT));
Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT));
return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
case MVT::i32:
Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT));
Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT));
Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3,
DAG.getConstant(0xFF0000, dl, VT));
Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, dl, VT));
Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3);
Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1);
return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2);
case MVT::i64:
Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT));
Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, dl, SHVT));
Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT));
Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT));
Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT));
Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT));
Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7,
DAG.getConstant(255ULL<<48, dl, VT));
Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6,
DAG.getConstant(255ULL<<40, dl, VT));
Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5,
DAG.getConstant(255ULL<<32, dl, VT));
Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4,
DAG.getConstant(255ULL<<24, dl, VT));
Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3,
DAG.getConstant(255ULL<<16, dl, VT));
Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2,
DAG.getConstant(255ULL<<8 , dl, VT));
Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7);
Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5);
Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3);
Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1);
Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6);
Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2);
return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4);
}
}
/// Expand the specified bitcount instruction into operations.
SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
SDLoc dl) {
switch (Opc) {
default: llvm_unreachable("Cannot expand this yet!");
case ISD::CTPOP: {
EVT VT = Op.getValueType();
EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
unsigned Len = VT.getSizeInBits();
assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 &&
"CTPOP not implemented for this type.");
// This is the "best" algorithm from
// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
SDValue Mask55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)),
dl, VT);
SDValue Mask33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)),
dl, VT);
SDValue Mask0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)),
dl, VT);
SDValue Mask01 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)),
dl, VT);
// v = v - ((v >> 1) & 0x55555555...)
Op = DAG.getNode(ISD::SUB, dl, VT, Op,
DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::SRL, dl, VT, Op,
DAG.getConstant(1, dl, ShVT)),
Mask55));
// v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
Op = DAG.getNode(ISD::ADD, dl, VT,
DAG.getNode(ISD::AND, dl, VT, Op, Mask33),
DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::SRL, dl, VT, Op,
DAG.getConstant(2, dl, ShVT)),
Mask33));
// v = (v + (v >> 4)) & 0x0F0F0F0F...
Op = DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::ADD, dl, VT, Op,
DAG.getNode(ISD::SRL, dl, VT, Op,
DAG.getConstant(4, dl, ShVT))),
Mask0F);
// v = (v * 0x01010101...) >> (Len - 8)
Op = DAG.getNode(ISD::SRL, dl, VT,
DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
DAG.getConstant(Len - 8, dl, ShVT));
return Op;
}
case ISD::CTLZ_ZERO_UNDEF:
// This trivially expands to CTLZ.
return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op);
case ISD::CTLZ: {
// for now, we do this:
// x = x | (x >> 1);
// x = x | (x >> 2);
// ...
// x = x | (x >>16);
// x = x | (x >>32); // for 64-bit input
// return popcount(~x);
//
// Ref: "Hacker's Delight" by Henry Warren
EVT VT = Op.getValueType();
EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
unsigned len = VT.getSizeInBits();
for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
SDValue Tmp3 = DAG.getConstant(1ULL << i, dl, ShVT);
Op = DAG.getNode(ISD::OR, dl, VT, Op,
DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3));
}
Op = DAG.getNOT(dl, Op, VT);
return DAG.getNode(ISD::CTPOP, dl, VT, Op);
}
case ISD::CTTZ_ZERO_UNDEF:
// This trivially expands to CTTZ.
return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op);
case ISD::CTTZ: {
// for now, we use: { return popcount(~x & (x - 1)); }
// unless the target has ctlz but not ctpop, in which case we use:
// { return 32 - nlz(~x & (x-1)); }
// Ref: "Hacker's Delight" by Henry Warren
EVT VT = Op.getValueType();
SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNOT(dl, Op, VT),
DAG.getNode(ISD::SUB, dl, VT, Op,
DAG.getConstant(1, dl, VT)));
// If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) &&
TLI.isOperationLegalOrCustom(ISD::CTLZ, VT))
return DAG.getNode(ISD::SUB, dl, VT,
DAG.getConstant(VT.getSizeInBits(), dl, VT),
DAG.getNode(ISD::CTLZ, dl, VT, Tmp3));
return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3);
}
}
}
std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
unsigned Opc = Node->getOpcode();
MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
RTLIB::Libcall LC = RTLIB::getATOMIC(Opc, VT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected atomic op or value type!");
return ExpandChainLibCall(LC, Node, false);
}
void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
SmallVector<SDValue, 8> Results;
SDLoc dl(Node);
SDValue Tmp1, Tmp2, Tmp3, Tmp4;
bool NeedInvert;
switch (Node->getOpcode()) {
case ISD::CTPOP:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl);
Results.push_back(Tmp1);
break;
case ISD::BSWAP:
Results.push_back(ExpandBSWAP(Node->getOperand(0), dl));
break;
case ISD::FRAMEADDR:
case ISD::RETURNADDR:
case ISD::FRAME_TO_ARGS_OFFSET:
Results.push_back(DAG.getConstant(0, dl, Node->getValueType(0)));
break;
case ISD::FLT_ROUNDS_:
Results.push_back(DAG.getConstant(1, dl, Node->getValueType(0)));
break;
case ISD::EH_RETURN:
case ISD::EH_LABEL:
case ISD::PREFETCH:
case ISD::VAEND:
case ISD::EH_SJLJ_LONGJMP:
// If the target didn't expand these, there's nothing to do, so just
// preserve the chain and be done.
Results.push_back(Node->getOperand(0));
break;
case ISD::EH_SJLJ_SETJMP:
// If the target didn't expand this, just return 'zero' and preserve the
// chain.
Results.push_back(DAG.getConstant(0, dl, MVT::i32));
Results.push_back(Node->getOperand(0));
break;
case ISD::ATOMIC_FENCE: {
// If the target didn't lower this, lower it to '__sync_synchronize()' call
// FIXME: handle "fence singlethread" more efficiently.
TargetLowering::ArgListTy Args;
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(Node->getOperand(0))
.setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
DAG.getExternalSymbol("__sync_synchronize",
TLI.getPointerTy(DAG.getDataLayout())),
std::move(Args), 0);
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
Results.push_back(CallResult.second);
break;
}
case ISD::ATOMIC_LOAD: {
// There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP.
SDValue Zero = DAG.getConstant(0, dl, Node->getValueType(0));
SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
SDValue Swap = DAG.getAtomicCmpSwap(
ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
Node->getOperand(0), Node->getOperand(1), Zero, Zero,
cast<AtomicSDNode>(Node)->getMemOperand(),
cast<AtomicSDNode>(Node)->getOrdering(),
cast<AtomicSDNode>(Node)->getOrdering(),
cast<AtomicSDNode>(Node)->getSynchScope());
Results.push_back(Swap.getValue(0));
Results.push_back(Swap.getValue(1));
break;
}
case ISD::ATOMIC_STORE: {
// There is no libcall for atomic store; fake it with ATOMIC_SWAP.
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
cast<AtomicSDNode>(Node)->getMemoryVT(),
Node->getOperand(0),
Node->getOperand(1), Node->getOperand(2),
cast<AtomicSDNode>(Node)->getMemOperand(),
cast<AtomicSDNode>(Node)->getOrdering(),
cast<AtomicSDNode>(Node)->getSynchScope());
Results.push_back(Swap.getValue(1));
break;
}
// By default, atomic intrinsics are marked Legal and lowered. Targets
// which don't support them directly, however, may want libcalls, in which
// case they mark them Expand, and we get here.
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_CMP_SWAP: {
std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node);
Results.push_back(Tmp.first);
Results.push_back(Tmp.second);
break;
}
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
// Expanding an ATOMIC_CMP_SWAP_WITH_SUCCESS produces an ATOMIC_CMP_SWAP and
// splits out the success value as a comparison. Expanding the resulting
// ATOMIC_CMP_SWAP will produce a libcall.
SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
SDValue Res = DAG.getAtomicCmpSwap(
ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
Node->getOperand(0), Node->getOperand(1), Node->getOperand(2),
Node->getOperand(3), cast<MemSDNode>(Node)->getMemOperand(),
cast<AtomicSDNode>(Node)->getSuccessOrdering(),
cast<AtomicSDNode>(Node)->getFailureOrdering(),
cast<AtomicSDNode>(Node)->getSynchScope());
SDValue Success = DAG.getSetCC(SDLoc(Node), Node->getValueType(1),
Res, Node->getOperand(2), ISD::SETEQ);
Results.push_back(Res.getValue(0));
Results.push_back(Success);
Results.push_back(Res.getValue(1));
break;
}
case ISD::DYNAMIC_STACKALLOC:
ExpandDYNAMIC_STACKALLOC(Node, Results);
break;
case ISD::MERGE_VALUES:
for (unsigned i = 0; i < Node->getNumValues(); i++)
Results.push_back(Node->getOperand(i));
break;
case ISD::UNDEF: {
EVT VT = Node->getValueType(0);
if (VT.isInteger())
Results.push_back(DAG.getConstant(0, dl, VT));
else {
assert(VT.isFloatingPoint() && "Unknown value type!");
Results.push_back(DAG.getConstantFP(0, dl, VT));
}
break;
}
case ISD::TRAP: {
// If this operation is not supported, lower it to 'abort()' call
TargetLowering::ArgListTy Args;
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(Node->getOperand(0))
.setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
DAG.getExternalSymbol("abort",
TLI.getPointerTy(DAG.getDataLayout())),
std::move(Args), 0);
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
Results.push_back(CallResult.second);
break;
}
case ISD::FP_ROUND:
case ISD::BITCAST:
Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
Node->getValueType(0), dl);
Results.push_back(Tmp1);
break;
case ISD::FP_EXTEND:
Tmp1 = EmitStackConvert(Node->getOperand(0),
Node->getOperand(0).getValueType(),
Node->getValueType(0), dl);
Results.push_back(Tmp1);
break;
case ISD::SIGN_EXTEND_INREG: {
// NOTE: we could fall back on load/store here too for targets without
// SAR. However, it is doubtful that any exist.
EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
EVT VT = Node->getValueType(0);
EVT ShiftAmountTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
if (VT.isVector())
ShiftAmountTy = VT;
unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
ExtraVT.getScalarType().getSizeInBits();
SDValue ShiftCst = DAG.getConstant(BitsDiff, dl, ShiftAmountTy);
Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0),
Node->getOperand(0), ShiftCst);
Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst);
Results.push_back(Tmp1);
break;
}
case ISD::FP_ROUND_INREG: {
// The only way we can lower this is to turn it into a TRUNCSTORE,
// EXTLOAD pair, targeting a temporary location (a stack slot).
// NOTE: there is a choice here between constantly creating new stack
// slots and always reusing the same one. We currently always create
// new ones, as reuse may inhibit scheduling.
EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT,
Node->getValueType(0), dl);
Results.push_back(Tmp1);
break;
}
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP,
Node->getOperand(0), Node->getValueType(0), dl);
Results.push_back(Tmp1);
break;
case ISD::FP_TO_SINT:
if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG))
Results.push_back(Tmp1);
break;
case ISD::FP_TO_UINT: {
SDValue True, False;
EVT VT = Node->getOperand(0).getValueType();
EVT NVT = Node->getValueType(0);
APFloat apf(DAG.EVTToAPFloatSemantics(VT),
APInt::getNullValue(VT.getSizeInBits()));
APInt x = APInt::getSignBit(NVT.getSizeInBits());
(void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven);
Tmp1 = DAG.getConstantFP(apf, dl, VT);
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(VT),
Node->getOperand(0),
Tmp1, ISD::SETLT);
True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0));
False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT,
DAG.getNode(ISD::FSUB, dl, VT,
Node->getOperand(0), Tmp1));
False = DAG.getNode(ISD::XOR, dl, NVT, False,
DAG.getConstant(x, dl, NVT));
Tmp1 = DAG.getSelect(dl, NVT, Tmp2, True, False);
Results.push_back(Tmp1);
break;
}
case ISD::VAARG: {
const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
EVT VT = Node->getValueType(0);
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
unsigned Align = Node->getConstantOperandVal(3);
SDValue VAListLoad =
DAG.getLoad(TLI.getPointerTy(DAG.getDataLayout()), dl, Tmp1, Tmp2,
MachinePointerInfo(V), false, false, false, 0);
SDValue VAList = VAListLoad;
if (Align > TLI.getMinStackArgumentAlignment()) {
assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
VAList = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
DAG.getConstant(Align - 1, dl,
VAList.getValueType()));
VAList = DAG.getNode(ISD::AND, dl, VAList.getValueType(), VAList,
DAG.getConstant(-(int64_t)Align, dl,
VAList.getValueType()));
}
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
DAG.getConstant(DAG.getDataLayout().getTypeAllocSize(
VT.getTypeForEVT(*DAG.getContext())),
dl, VAList.getValueType()));
// Store the incremented VAList to the legalized pointer
Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2,
MachinePointerInfo(V), false, false, 0);
// Load the actual argument out of the pointer VAList
Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
false, false, false, 0));
Results.push_back(Results[0].getValue(1));
break;
}
case ISD::VACOPY: {
// This defaults to loading a pointer from the input and storing it to the
// output, returning the chain.
const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
Tmp1 = DAG.getLoad(TLI.getPointerTy(DAG.getDataLayout()), dl,
Node->getOperand(0), Node->getOperand(2),
MachinePointerInfo(VS), false, false, false, 0);
Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
MachinePointerInfo(VD), false, false, 0);
Results.push_back(Tmp1);
break;
}
case ISD::EXTRACT_VECTOR_ELT:
if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
// This must be an access of the only element. Return it.
Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0),
Node->getOperand(0));
else
Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
Results.push_back(Tmp1);
break;
case ISD::EXTRACT_SUBVECTOR:
Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0)));
break;
case ISD::INSERT_SUBVECTOR:
Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0)));
break;
case ISD::CONCAT_VECTORS: {
Results.push_back(ExpandVectorBuildThroughStack(Node));
break;
}
case ISD::SCALAR_TO_VECTOR:
Results.push_back(ExpandSCALAR_TO_VECTOR(Node));
break;
case ISD::INSERT_VECTOR_ELT:
Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0),
Node->getOperand(1),
Node->getOperand(2), dl));
break;
case ISD::VECTOR_SHUFFLE: {
SmallVector<int, 32> NewMask;
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
EVT VT = Node->getValueType(0);
EVT EltVT = VT.getVectorElementType();
SDValue Op0 = Node->getOperand(0);
SDValue Op1 = Node->getOperand(1);
if (!TLI.isTypeLegal(EltVT)) {
EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
// BUILD_VECTOR operands are allowed to be wider than the element type.
// But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept
// it.
if (NewEltVT.bitsLT(EltVT)) {
// Convert shuffle node.
// If original node was v4i64 and the new EltVT is i32,
// cast operands to v8i32 and re-build the mask.
// Calculate new VT, the size of the new VT should be equal to original.
EVT NewVT =
EVT::getVectorVT(*DAG.getContext(), NewEltVT,
VT.getSizeInBits() / NewEltVT.getSizeInBits());
assert(NewVT.bitsEq(VT));
// cast operands to new VT
Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0);
Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1);
// Convert the shuffle mask
unsigned int factor =
NewVT.getVectorNumElements()/VT.getVectorNumElements();
// EltVT gets smaller
assert(factor > 0);
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
if (Mask[i] < 0) {
for (unsigned fi = 0; fi < factor; ++fi)
NewMask.push_back(Mask[i]);
}
else {
for (unsigned fi = 0; fi < factor; ++fi)
NewMask.push_back(Mask[i]*factor+fi);
}
}
Mask = NewMask;
VT = NewVT;
}
EltVT = NewEltVT;
}
unsigned NumElems = VT.getVectorNumElements();
SmallVector<SDValue, 16> Ops;
for (unsigned i = 0; i != NumElems; ++i) {
if (Mask[i] < 0) {
Ops.push_back(DAG.getUNDEF(EltVT));
continue;
}
unsigned Idx = Mask[i];
if (Idx < NumElems)
Ops.push_back(DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
else
Ops.push_back(DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op1,
DAG.getConstant(Idx - NumElems, dl,
TLI.getVectorIdxTy(DAG.getDataLayout()))));
}
Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
// We may have changed the BUILD_VECTOR type. Cast it back to the Node type.
Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1);
Results.push_back(Tmp1);
break;
}
case ISD::EXTRACT_ELEMENT: {
EVT OpTy = Node->getOperand(0).getValueType();
if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
// 1 -> Hi
Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0),
DAG.getConstant(OpTy.getSizeInBits() / 2, dl,
TLI.getShiftAmountTy(
Node->getOperand(0).getValueType(),
DAG.getDataLayout())));
Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1);
} else {
// 0 -> Lo
Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0),
Node->getOperand(0));
}
Results.push_back(Tmp1);
break;
}
case ISD::STACKSAVE:
// Expand to CopyFromReg if the target set
// StackPointerRegisterToSaveRestore.
if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) {
Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP,
Node->getValueType(0)));
Results.push_back(Results[0].getValue(1));
} else {
Results.push_back(DAG.getUNDEF(Node->getValueType(0)));
Results.push_back(Node->getOperand(0));
}
break;
case ISD::STACKRESTORE:
// Expand to CopyToReg if the target set
// StackPointerRegisterToSaveRestore.
if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) {
Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP,
Node->getOperand(1)));
} else {
Results.push_back(Node->getOperand(0));
}
break;
case ISD::FCOPYSIGN:
Results.push_back(ExpandFCOPYSIGN(Node));
break;
case ISD::FNEG:
// Expand Y = FNEG(X) -> Y = SUB -0.0, X
Tmp1 = DAG.getConstantFP(-0.0, dl, Node->getValueType(0));
Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1,
Node->getOperand(0));
Results.push_back(Tmp1);
break;
case ISD::FABS: {
// Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
EVT VT = Node->getValueType(0);
Tmp1 = Node->getOperand(0);
Tmp2 = DAG.getConstantFP(0.0, dl, VT);
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(Tmp1.getValueType()),
Tmp1, Tmp2, ISD::SETUGT);
Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1);
Tmp1 = DAG.getSelect(dl, VT, Tmp2, Tmp1, Tmp3);
Results.push_back(Tmp1);
break;
}
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX: {
// Expand Y = MAX(A, B) -> Y = (A > B) ? A : B
ISD::CondCode Pred;
switch (Node->getOpcode()) {
default: llvm_unreachable("How did we get here?");
case ISD::SMAX: Pred = ISD::SETGT; break;
case ISD::SMIN: Pred = ISD::SETLT; break;
case ISD::UMAX: Pred = ISD::SETUGT; break;
case ISD::UMIN: Pred = ISD::SETULT; break;
}
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp1, Tmp2, Pred);
Results.push_back(Tmp1);
break;
}
case ISD::FMINNUM:
Results.push_back(ExpandFPLibCall(Node, RTLIB::FMIN_F32, RTLIB::FMIN_F64,
RTLIB::FMIN_F80, RTLIB::FMIN_F128,
RTLIB::FMIN_PPCF128));
break;
case ISD::FMAXNUM:
Results.push_back(ExpandFPLibCall(Node, RTLIB::FMAX_F32, RTLIB::FMAX_F64,
RTLIB::FMAX_F80, RTLIB::FMAX_F128,
RTLIB::FMAX_PPCF128));
break;
case ISD::FSQRT:
Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64,
RTLIB::SQRT_F80, RTLIB::SQRT_F128,
RTLIB::SQRT_PPCF128));
break;
case ISD::FSIN:
case ISD::FCOS: {
EVT VT = Node->getValueType(0);
bool isSIN = Node->getOpcode() == ISD::FSIN;
// Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin /
// fcos which share the same operand and both are used.
if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) ||
canCombineSinCosLibcall(Node, TLI, TM))
&& useSinCos(Node)) {
SDVTList VTs = DAG.getVTList(VT, VT);
Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0));
if (!isSIN)
Tmp1 = Tmp1.getValue(1);
Results.push_back(Tmp1);
} else if (isSIN) {
Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64,
RTLIB::SIN_F80, RTLIB::SIN_F128,
RTLIB::SIN_PPCF128));
} else {
Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64,
RTLIB::COS_F80, RTLIB::COS_F128,
RTLIB::COS_PPCF128));
}
break;
}
case ISD::FSINCOS:
// Expand into sincos libcall.
ExpandSinCosLibCall(Node, Results);
break;
case ISD::FLOG:
Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64,
RTLIB::LOG_F80, RTLIB::LOG_F128,
RTLIB::LOG_PPCF128));
break;
case ISD::FLOG2:
Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64,
RTLIB::LOG2_F80, RTLIB::LOG2_F128,
RTLIB::LOG2_PPCF128));
break;
case ISD::FLOG10:
Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64,
RTLIB::LOG10_F80, RTLIB::LOG10_F128,
RTLIB::LOG10_PPCF128));
break;
case ISD::FEXP:
Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64,
RTLIB::EXP_F80, RTLIB::EXP_F128,
RTLIB::EXP_PPCF128));
break;
case ISD::FEXP2:
Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64,
RTLIB::EXP2_F80, RTLIB::EXP2_F128,
RTLIB::EXP2_PPCF128));
break;
case ISD::FTRUNC:
Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64,
RTLIB::TRUNC_F80, RTLIB::TRUNC_F128,
RTLIB::TRUNC_PPCF128));
break;
case ISD::FFLOOR:
Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64,
RTLIB::FLOOR_F80, RTLIB::FLOOR_F128,
RTLIB::FLOOR_PPCF128));
break;
case ISD::FCEIL:
Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64,
RTLIB::CEIL_F80, RTLIB::CEIL_F128,
RTLIB::CEIL_PPCF128));
break;
case ISD::FRINT:
Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64,
RTLIB::RINT_F80, RTLIB::RINT_F128,
RTLIB::RINT_PPCF128));
break;
case ISD::FNEARBYINT:
Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32,
RTLIB::NEARBYINT_F64,
RTLIB::NEARBYINT_F80,
RTLIB::NEARBYINT_F128,
RTLIB::NEARBYINT_PPCF128));
break;
case ISD::FROUND:
Results.push_back(ExpandFPLibCall(Node, RTLIB::ROUND_F32,
RTLIB::ROUND_F64,
RTLIB::ROUND_F80,
RTLIB::ROUND_F128,
RTLIB::ROUND_PPCF128));
break;
case ISD::FPOWI:
Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64,
RTLIB::POWI_F80, RTLIB::POWI_F128,
RTLIB::POWI_PPCF128));
break;
case ISD::FPOW:
Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64,
RTLIB::POW_F80, RTLIB::POW_F128,
RTLIB::POW_PPCF128));
break;
case ISD::FDIV:
Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64,
RTLIB::DIV_F80, RTLIB::DIV_F128,
RTLIB::DIV_PPCF128));
break;
case ISD::FREM:
Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64,
RTLIB::REM_F80, RTLIB::REM_F128,
RTLIB::REM_PPCF128));
break;
case ISD::FMA:
Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64,
RTLIB::FMA_F80, RTLIB::FMA_F128,
RTLIB::FMA_PPCF128));
break;
case ISD::FMAD:
llvm_unreachable("Illegal fmad should never be formed");
case ISD::FADD:
Results.push_back(ExpandFPLibCall(Node, RTLIB::ADD_F32, RTLIB::ADD_F64,
RTLIB::ADD_F80, RTLIB::ADD_F128,
RTLIB::ADD_PPCF128));
break;
case ISD::FMUL:
Results.push_back(ExpandFPLibCall(Node, RTLIB::MUL_F32, RTLIB::MUL_F64,
RTLIB::MUL_F80, RTLIB::MUL_F128,
RTLIB::MUL_PPCF128));
break;
case ISD::FP16_TO_FP: {
if (Node->getValueType(0) == MVT::f32) {
Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false));
break;
}
// We can extend to types bigger than f32 in two steps without changing the
// result. Since "f16 -> f32" is much more commonly available, give CodeGen
// the option of emitting that before resorting to a libcall.
SDValue Res =
DAG.getNode(ISD::FP16_TO_FP, dl, MVT::f32, Node->getOperand(0));
Results.push_back(
DAG.getNode(ISD::FP_EXTEND, dl, Node->getValueType(0), Res));
break;
}
case ISD::FP_TO_FP16: {
if (!TLI.useSoftFloat() && TM.Options.UnsafeFPMath) {
SDValue Op = Node->getOperand(0);
MVT SVT = Op.getSimpleValueType();
if ((SVT == MVT::f64 || SVT == MVT::f80) &&
TLI.isOperationLegalOrCustom(ISD::FP_TO_FP16, MVT::f32)) {
// Under fastmath, we can expand this node into a fround followed by
// a float-half conversion.
SDValue FloatVal = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Op,
DAG.getIntPtrConstant(0, dl));
Results.push_back(
DAG.getNode(ISD::FP_TO_FP16, dl, MVT::i16, FloatVal));
break;
}
}
RTLIB::Libcall LC =
RTLIB::getFPROUND(Node->getOperand(0).getValueType(), MVT::f16);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to expand fp_to_fp16");
Results.push_back(ExpandLibCall(LC, Node, false));
break;
}
case ISD::ConstantFP: {
ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
// Check to see if this FP immediate is already legal.
// If this is a legal constant, turn it into a TargetConstantFP node.
if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0)))
Results.push_back(ExpandConstantFP(CFP, true));
break;
}
case ISD::FSUB: {
EVT VT = Node->getValueType(0);
if (TLI.isOperationLegalOrCustom(ISD::FADD, VT) &&
TLI.isOperationLegalOrCustom(ISD::FNEG, VT)) {
Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1));
Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1);
Results.push_back(Tmp1);
} else {
Results.push_back(ExpandFPLibCall(Node, RTLIB::SUB_F32, RTLIB::SUB_F64,
RTLIB::SUB_F80, RTLIB::SUB_F128,
RTLIB::SUB_PPCF128));
}
break;
}
case ISD::SUB: {
EVT VT = Node->getValueType(0);
assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) &&
TLI.isOperationLegalOrCustom(ISD::XOR, VT) &&
"Don't know how to expand this subtraction!");
Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1),
DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
VT));
Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, dl, VT));
Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1));
break;
}
case ISD::UREM:
case ISD::SREM: {
EVT VT = Node->getValueType(0);
bool isSigned = Node->getOpcode() == ISD::SREM;
unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV;
unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
Tmp2 = Node->getOperand(0);
Tmp3 = Node->getOperand(1);
if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
(isDivRemLibcallAvailable(Node, isSigned, TLI) &&
// If div is legal, it's better to do the normal expansion
!TLI.isOperationLegalOrCustom(DivOpc, Node->getValueType(0)) &&
useDivRem(Node, isSigned, false))) {
SDVTList VTs = DAG.getVTList(VT, VT);
Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1);
} else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) {
// X % Y -> X-X/Y*Y
Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3);
Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3);
Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1);
} else if (isSigned)
Tmp1 = ExpandIntLibCall(Node, true,
RTLIB::SREM_I8,
RTLIB::SREM_I16, RTLIB::SREM_I32,
RTLIB::SREM_I64, RTLIB::SREM_I128);
else
Tmp1 = ExpandIntLibCall(Node, false,
RTLIB::UREM_I8,
RTLIB::UREM_I16, RTLIB::UREM_I32,
RTLIB::UREM_I64, RTLIB::UREM_I128);
Results.push_back(Tmp1);
break;
}
case ISD::UDIV:
case ISD::SDIV: {
bool isSigned = Node->getOpcode() == ISD::SDIV;
unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
EVT VT = Node->getValueType(0);
SDVTList VTs = DAG.getVTList(VT, VT);
if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
(isDivRemLibcallAvailable(Node, isSigned, TLI) &&
useDivRem(Node, isSigned, true)))
Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0),
Node->getOperand(1));
else if (isSigned)
Tmp1 = ExpandIntLibCall(Node, true,
RTLIB::SDIV_I8,
RTLIB::SDIV_I16, RTLIB::SDIV_I32,
RTLIB::SDIV_I64, RTLIB::SDIV_I128);
else
Tmp1 = ExpandIntLibCall(Node, false,
RTLIB::UDIV_I8,
RTLIB::UDIV_I16, RTLIB::UDIV_I32,
RTLIB::UDIV_I64, RTLIB::UDIV_I128);
Results.push_back(Tmp1);
break;
}
case ISD::MULHU:
case ISD::MULHS: {
unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI :
ISD::SMUL_LOHI;
EVT VT = Node->getValueType(0);
SDVTList VTs = DAG.getVTList(VT, VT);
assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) &&
"If this wasn't legal, it shouldn't have been created!");
Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0),
Node->getOperand(1));
Results.push_back(Tmp1.getValue(1));
break;
}
case ISD::SDIVREM:
case ISD::UDIVREM:
// Expand into divrem libcall
ExpandDivRemLibCall(Node, Results);
break;
case ISD::MUL: {
EVT VT = Node->getValueType(0);
SDVTList VTs = DAG.getVTList(VT, VT);
// See if multiply or divide can be lowered using two-result operations.
// We just need the low half of the multiply; try both the signed
// and unsigned forms. If the target supports both SMUL_LOHI and
// UMUL_LOHI, form a preference by checking which forms of plain
// MULH it supports.
bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT);
bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT);
bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT);
bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT);
unsigned OpToUse = 0;
if (HasSMUL_LOHI && !HasMULHS) {
OpToUse = ISD::SMUL_LOHI;
} else if (HasUMUL_LOHI && !HasMULHU) {
OpToUse = ISD::UMUL_LOHI;
} else if (HasSMUL_LOHI) {
OpToUse = ISD::SMUL_LOHI;
} else if (HasUMUL_LOHI) {
OpToUse = ISD::UMUL_LOHI;
}
if (OpToUse) {
Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0),
Node->getOperand(1)));
break;
}
SDValue Lo, Hi;
EVT HalfType = VT.getHalfSizedIntegerVT(*DAG.getContext());
if (TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, VT) &&
TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, VT) &&
TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
TLI.isOperationLegalOrCustom(ISD::OR, VT) &&
TLI.expandMUL(Node, Lo, Hi, HalfType, DAG)) {
Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Hi);
SDValue Shift =
DAG.getConstant(HalfType.getSizeInBits(), dl,
TLI.getShiftAmountTy(HalfType, DAG.getDataLayout()));
Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi));
break;
}
Tmp1 = ExpandIntLibCall(Node, false,
RTLIB::MUL_I8,
RTLIB::MUL_I16, RTLIB::MUL_I32,
RTLIB::MUL_I64, RTLIB::MUL_I128);
Results.push_back(Tmp1);
break;
}
case ISD::SADDO:
case ISD::SSUBO: {
SDValue LHS = Node->getOperand(0);
SDValue RHS = Node->getOperand(1);
SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
LHS, RHS);
Results.push_back(Sum);
EVT ResultType = Node->getValueType(1);
EVT OType = getSetCCResultType(Node->getValueType(0));
SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType());
// LHSSign -> LHS >= 0
// RHSSign -> RHS >= 0
// SumSign -> Sum >= 0
//
// Add:
// Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
// Sub:
// Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
//
SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
Node->getOpcode() == ISD::SADDO ?
ISD::SETEQ : ISD::SETNE);
SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
Results.push_back(DAG.getBoolExtOrTrunc(Cmp, dl, ResultType, ResultType));
break;
}
case ISD::UADDO:
case ISD::USUBO: {
SDValue LHS = Node->getOperand(0);
SDValue RHS = Node->getOperand(1);
SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ?
ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
LHS, RHS);
Results.push_back(Sum);
EVT ResultType = Node->getValueType(1);
EVT SetCCType = getSetCCResultType(Node->getValueType(0));
ISD::CondCode CC
= Node->getOpcode() == ISD::UADDO ? ISD::SETULT : ISD::SETUGT;
SDValue SetCC = DAG.getSetCC(dl, SetCCType, Sum, LHS, CC);
Results.push_back(DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType));
break;
}
case ISD::UMULO:
case ISD::SMULO: {
EVT VT = Node->getValueType(0);
EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
SDValue LHS = Node->getOperand(0);
SDValue RHS = Node->getOperand(1);
SDValue BottomHalf;
SDValue TopHalf;
static const unsigned Ops[2][3] =
{ { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND },
{ ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }};
bool isSigned = Node->getOpcode() == ISD::SMULO;
if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) {
BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS);
} else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) {
BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
RHS);
TopHalf = BottomHalf.getValue(1);
} else if (TLI.isTypeLegal(WideVT)) {
LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
DAG.getIntPtrConstant(0, dl));
TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
DAG.getIntPtrConstant(1, dl));
} else {
// We can fall back to a libcall with an illegal type for the MUL if we
// have a libcall big enough.
// Also, we can fall back to a division in some cases, but that's a big
// performance hit in the general case.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (WideVT == MVT::i16)
LC = RTLIB::MUL_I16;
else if (WideVT == MVT::i32)
LC = RTLIB::MUL_I32;
else if (WideVT == MVT::i64)
LC = RTLIB::MUL_I64;
else if (WideVT == MVT::i128)
LC = RTLIB::MUL_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
// The high part is obtained by SRA'ing all but one of the bits of low
// part.
unsigned LoSize = VT.getSizeInBits();
SDValue HiLHS =
DAG.getNode(ISD::SRA, dl, VT, RHS,
DAG.getConstant(LoSize - 1, dl,
TLI.getPointerTy(DAG.getDataLayout())));
SDValue HiRHS =
DAG.getNode(ISD::SRA, dl, VT, LHS,
DAG.getConstant(LoSize - 1, dl,
TLI.getPointerTy(DAG.getDataLayout())));
// Here we're passing the 2 arguments explicitly as 4 arguments that are
// pre-lowered to the correct types. This all depends upon WideVT not
// being a legal type for the architecture and thus has to be split to
// two arguments.
SDValue Args[] = { LHS, HiLHS, RHS, HiRHS };
SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl);
BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
DAG.getIntPtrConstant(0, dl));
TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
DAG.getIntPtrConstant(1, dl));
// Ret is a node with an illegal type. Because such things are not
// generally permitted during this phase of legalization, make sure the
// node has no more uses. The above EXTRACT_ELEMENT nodes should have been
// folded.
assert(Ret->use_empty() &&
"Unexpected uses of illegally type from expanded lib call.");
}
if (isSigned) {
Tmp1 = DAG.getConstant(
VT.getSizeInBits() - 1, dl,
TLI.getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout()));
Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1);
TopHalf = DAG.getSetCC(dl, getSetCCResultType(VT), TopHalf, Tmp1,
ISD::SETNE);
} else {
TopHalf = DAG.getSetCC(dl, getSetCCResultType(VT), TopHalf,
DAG.getConstant(0, dl, VT), ISD::SETNE);
}
Results.push_back(BottomHalf);
Results.push_back(TopHalf);
break;
}
case ISD::BUILD_PAIR: {
EVT PairTy = Node->getValueType(0);
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1));
Tmp2 = DAG.getNode(
ISD::SHL, dl, PairTy, Tmp2,
DAG.getConstant(PairTy.getSizeInBits() / 2, dl,
TLI.getShiftAmountTy(PairTy, DAG.getDataLayout())));
Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2));
break;
}
case ISD::SELECT:
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
Tmp3 = Node->getOperand(2);
if (Tmp1.getOpcode() == ISD::SETCC) {
Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1),
Tmp2, Tmp3,
cast<CondCodeSDNode>(Tmp1.getOperand(2))->get());
} else {
Tmp1 = DAG.getSelectCC(dl, Tmp1,
DAG.getConstant(0, dl, Tmp1.getValueType()),
Tmp2, Tmp3, ISD::SETNE);
}
Results.push_back(Tmp1);
break;
case ISD::BR_JT: {
SDValue Chain = Node->getOperand(0);
SDValue Table = Node->getOperand(1);
SDValue Index = Node->getOperand(2);
EVT PTy = TLI.getPointerTy(DAG.getDataLayout());
const DataLayout &TD = DAG.getDataLayout();
unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
Index = DAG.getNode(ISD::MUL, dl, Index.getValueType(), Index,
DAG.getConstant(EntrySize, dl, Index.getValueType()));
SDValue Addr = DAG.getNode(ISD::ADD, dl, Index.getValueType(),
Index, Table);
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr,
MachinePointerInfo::getJumpTable(), MemVT,
false, false, false, 0);
Addr = LD;
if (TM.getRelocationModel() == Reloc::PIC_) {
// For PIC, the sequence is:
// BRIND(load(Jumptable + index) + RelocBase)
// RelocBase can be JumpTable, GOT or some sort of global base.
Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr,
TLI.getPICJumpTableRelocBase(Table, DAG));
}
Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr);
Results.push_back(Tmp1);
break;
}
case ISD::BRCOND:
// Expand brcond's setcc into its constituent parts and create a BR_CC
// Node.
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
if (Tmp2.getOpcode() == ISD::SETCC) {
Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other,
Tmp1, Tmp2.getOperand(2),
Tmp2.getOperand(0), Tmp2.getOperand(1),
Node->getOperand(2));
} else {
// We test only the i1 bit. Skip the AND if UNDEF.
Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 :
DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2,
DAG.getConstant(1, dl, Tmp2.getValueType()));
Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1,
DAG.getCondCode(ISD::SETNE), Tmp3,
DAG.getConstant(0, dl, Tmp3.getValueType()),
Node->getOperand(2));
}
Results.push_back(Tmp1);
break;
case ISD::SETCC: {
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
Tmp3 = Node->getOperand(2);
bool Legalized = LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2,
Tmp3, NeedInvert, dl);
if (Legalized) {
// If we expanded the SETCC by swapping LHS and RHS, or by inverting the
// condition code, create a new SETCC node.
if (Tmp3.getNode())
Tmp1 = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
Tmp1, Tmp2, Tmp3);
// If we expanded the SETCC by inverting the condition code, then wrap
// the existing SETCC in a NOT to restore the intended condition.
if (NeedInvert)
Tmp1 = DAG.getLogicalNOT(dl, Tmp1, Tmp1->getValueType(0));
Results.push_back(Tmp1);
break;
}
// Otherwise, SETCC for the given comparison type must be completely
// illegal; expand it into a SELECT_CC.
EVT VT = Node->getValueType(0);
int TrueValue;
switch (TLI.getBooleanContents(Tmp1->getValueType(0))) {
case TargetLowering::ZeroOrOneBooleanContent:
case TargetLowering::UndefinedBooleanContent:
TrueValue = 1;
break;
case TargetLowering::ZeroOrNegativeOneBooleanContent:
TrueValue = -1;
break;
}
Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2,
DAG.getConstant(TrueValue, dl, VT),
DAG.getConstant(0, dl, VT),
Tmp3);
Results.push_back(Tmp1);
break;
}
case ISD::SELECT_CC: {
Tmp1 = Node->getOperand(0); // LHS
Tmp2 = Node->getOperand(1); // RHS
Tmp3 = Node->getOperand(2); // True
Tmp4 = Node->getOperand(3); // False
EVT VT = Node->getValueType(0);
SDValue CC = Node->getOperand(4);
ISD::CondCode CCOp = cast<CondCodeSDNode>(CC)->get();
if (TLI.isCondCodeLegal(CCOp, Tmp1.getSimpleValueType())) {
// If the condition code is legal, then we need to expand this
// node using SETCC and SELECT.
EVT CmpVT = Tmp1.getValueType();
assert(!TLI.isOperationExpand(ISD::SELECT, VT) &&
"Cannot expand ISD::SELECT_CC when ISD::SELECT also needs to be "
"expanded.");
EVT CCVT =
TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
SDValue Cond = DAG.getNode(ISD::SETCC, dl, CCVT, Tmp1, Tmp2, CC);
Results.push_back(DAG.getSelect(dl, VT, Cond, Tmp3, Tmp4));
break;
}
// SELECT_CC is legal, so the condition code must not be.
bool Legalized = false;
// Try to legalize by inverting the condition. This is for targets that
// might support an ordered version of a condition, but not the unordered
// version (or vice versa).
ISD::CondCode InvCC = ISD::getSetCCInverse(CCOp,
Tmp1.getValueType().isInteger());
if (TLI.isCondCodeLegal(InvCC, Tmp1.getSimpleValueType())) {
// Use the new condition code and swap true and false
Legalized = true;
Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp4, Tmp3, InvCC);
} else {
// If The inverse is not legal, then try to swap the arguments using
// the inverse condition code.
ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InvCC);
if (TLI.isCondCodeLegal(SwapInvCC, Tmp1.getSimpleValueType())) {
// The swapped inverse condition is legal, so swap true and false,
// lhs and rhs.
Legalized = true;
Tmp1 = DAG.getSelectCC(dl, Tmp2, Tmp1, Tmp4, Tmp3, SwapInvCC);
}
}
if (!Legalized) {
Legalized = LegalizeSetCCCondCode(
getSetCCResultType(Tmp1.getValueType()), Tmp1, Tmp2, CC, NeedInvert,
dl);
assert(Legalized && "Can't legalize SELECT_CC with legal condition!");
// If we expanded the SETCC by inverting the condition code, then swap
// the True/False operands to match.
if (NeedInvert)
std::swap(Tmp3, Tmp4);
// If we expanded the SETCC by swapping LHS and RHS, or by inverting the
// condition code, create a new SELECT_CC node.
if (CC.getNode()) {
Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0),
Tmp1, Tmp2, Tmp3, Tmp4, CC);
} else {
Tmp2 = DAG.getConstant(0, dl, Tmp1.getValueType());
CC = DAG.getCondCode(ISD::SETNE);
Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1,
Tmp2, Tmp3, Tmp4, CC);
}
}
Results.push_back(Tmp1);
break;
}
case ISD::BR_CC: {
Tmp1 = Node->getOperand(0); // Chain
Tmp2 = Node->getOperand(2); // LHS
Tmp3 = Node->getOperand(3); // RHS
Tmp4 = Node->getOperand(1); // CC
bool Legalized = LegalizeSetCCCondCode(getSetCCResultType(
Tmp2.getValueType()), Tmp2, Tmp3, Tmp4, NeedInvert, dl);
(void)Legalized;
assert(Legalized && "Can't legalize BR_CC with legal condition!");
// If we expanded the SETCC by inverting the condition code, then wrap
// the existing SETCC in a NOT to restore the intended condition.
if (NeedInvert)
Tmp4 = DAG.getNOT(dl, Tmp4, Tmp4->getValueType(0));
// If we expanded the SETCC by swapping LHS and RHS, create a new BR_CC
// node.
if (Tmp4.getNode()) {
Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1,
Tmp4, Tmp2, Tmp3, Node->getOperand(4));
} else {
Tmp3 = DAG.getConstant(0, dl, Tmp2.getValueType());
Tmp4 = DAG.getCondCode(ISD::SETNE);
Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4,
Tmp2, Tmp3, Node->getOperand(4));
}
Results.push_back(Tmp1);
break;
}
case ISD::BUILD_VECTOR:
Results.push_back(ExpandBUILD_VECTOR(Node));
break;
case ISD::SRA:
case ISD::SRL:
case ISD::SHL: {
// Scalarize vector SRA/SRL/SHL.
EVT VT = Node->getValueType(0);
assert(VT.isVector() && "Unable to legalize non-vector shift");
assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal");
unsigned NumElem = VT.getVectorNumElements();
SmallVector<SDValue, 8> Scalars;
for (unsigned Idx = 0; Idx < NumElem; Idx++) {
SDValue Ex = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, VT.getScalarType(), Node->getOperand(0),
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue Sh = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, VT.getScalarType(), Node->getOperand(1),
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
VT.getScalarType(), Ex, Sh));
}
SDValue Result =
DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Scalars);
ReplaceNode(SDValue(Node, 0), Result);
break;
}
case ISD::GLOBAL_OFFSET_TABLE:
case ISD::GlobalAddress:
case ISD::GlobalTLSAddress:
case ISD::ExternalSymbol:
case ISD::ConstantPool:
case ISD::JumpTable:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_VOID:
// FIXME: Custom lowering for these operations shouldn't return null!
break;
}
// Replace the original node with the legalized result.
if (!Results.empty())
ReplaceNode(Node, Results.data());
}
void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
SmallVector<SDValue, 8> Results;
MVT OVT = Node->getSimpleValueType(0);
if (Node->getOpcode() == ISD::UINT_TO_FP ||
Node->getOpcode() == ISD::SINT_TO_FP ||
Node->getOpcode() == ISD::SETCC) {
OVT = Node->getOperand(0).getSimpleValueType();
}
if (Node->getOpcode() == ISD::BR_CC)
OVT = Node->getOperand(2).getSimpleValueType();
MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
SDLoc dl(Node);
SDValue Tmp1, Tmp2, Tmp3;
switch (Node->getOpcode()) {
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTPOP:
// Zero extend the argument.
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
// Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is
// already the correct result.
Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
if (Node->getOpcode() == ISD::CTTZ) {
// FIXME: This should set a bit in the zero extended value instead.
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(NVT),
Tmp1, DAG.getConstant(NVT.getSizeInBits(), dl, NVT),
ISD::SETEQ);
Tmp1 = DAG.getSelect(dl, NVT, Tmp2,
DAG.getConstant(OVT.getSizeInBits(), dl, NVT), Tmp1);
} else if (Node->getOpcode() == ISD::CTLZ ||
Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
// Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1,
DAG.getConstant(NVT.getSizeInBits() -
OVT.getSizeInBits(), dl, NVT));
}
Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1));
break;
case ISD::BSWAP: {
unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1);
Tmp1 = DAG.getNode(
ISD::SRL, dl, NVT, Tmp1,
DAG.getConstant(DiffBits, dl,
TLI.getShiftAmountTy(NVT, DAG.getDataLayout())));
Results.push_back(Tmp1);
break;
}
case ISD::FP_TO_UINT:
case ISD::FP_TO_SINT:
Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0),
Node->getOpcode() == ISD::FP_TO_SINT, dl);
Results.push_back(Tmp1);
break;
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP:
Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0),
Node->getOpcode() == ISD::SINT_TO_FP, dl);
Results.push_back(Tmp1);
break;
case ISD::VAARG: {
SDValue Chain = Node->getOperand(0); // Get the chain.
SDValue Ptr = Node->getOperand(1); // Get the pointer.
unsigned TruncOp;
if (OVT.isVector()) {
TruncOp = ISD::BITCAST;
} else {
assert(OVT.isInteger()
&& "VAARG promotion is supported only for vectors or integer types");
TruncOp = ISD::TRUNCATE;
}
// Perform the larger operation, then convert back
Tmp1 = DAG.getVAArg(NVT, dl, Chain, Ptr, Node->getOperand(2),
Node->getConstantOperandVal(3));
Chain = Tmp1.getValue(1);
Tmp2 = DAG.getNode(TruncOp, dl, OVT, Tmp1);
// Modified the chain result - switch anything that used the old chain to
// use the new one.
DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp2);
DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
if (UpdatedNodes) {
UpdatedNodes->insert(Tmp2.getNode());
UpdatedNodes->insert(Chain.getNode());
}
ReplacedNode(Node);
break;
}
case ISD::AND:
case ISD::OR:
case ISD::XOR: {
unsigned ExtOp, TruncOp;
if (OVT.isVector()) {
ExtOp = ISD::BITCAST;
TruncOp = ISD::BITCAST;
} else {
assert(OVT.isInteger() && "Cannot promote logic operation");
ExtOp = ISD::ANY_EXTEND;
TruncOp = ISD::TRUNCATE;
}
// Promote each of the values to the new type.
Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
// Perform the larger operation, then convert back
Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1));
break;
}
case ISD::SELECT: {
unsigned ExtOp, TruncOp;
if (Node->getValueType(0).isVector() ||
Node->getValueType(0).getSizeInBits() == NVT.getSizeInBits()) {
ExtOp = ISD::BITCAST;
TruncOp = ISD::BITCAST;
} else if (Node->getValueType(0).isInteger()) {
ExtOp = ISD::ANY_EXTEND;
TruncOp = ISD::TRUNCATE;
} else {
ExtOp = ISD::FP_EXTEND;
TruncOp = ISD::FP_ROUND;
}
Tmp1 = Node->getOperand(0);
// Promote each of the values to the new type.
Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2));
// Perform the larger operation, then round down.
Tmp1 = DAG.getSelect(dl, NVT, Tmp1, Tmp2, Tmp3);
if (TruncOp != ISD::FP_ROUND)
Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1);
else
Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Tmp1);
break;
}
case ISD::VECTOR_SHUFFLE: {
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
// Cast the two input vectors.
Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1));
// Convert the shuffle mask to the right # elements.
Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask);
Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1);
Results.push_back(Tmp1);
break;
}
case ISD::SETCC: {
unsigned ExtOp = ISD::FP_EXTEND;
if (NVT.isInteger()) {
ISD::CondCode CCCode =
cast<CondCodeSDNode>(Node->getOperand(2))->get();
ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
}
Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
Tmp1, Tmp2, Node->getOperand(2)));
break;
}
case ISD::BR_CC: {
unsigned ExtOp = ISD::FP_EXTEND;
if (NVT.isInteger()) {
ISD::CondCode CCCode =
cast<CondCodeSDNode>(Node->getOperand(1))->get();
ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
}
Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2));
Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(3));
Results.push_back(DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0),
Node->getOperand(0), Node->getOperand(1),
Tmp1, Tmp2, Node->getOperand(4)));
break;
}
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::FCOPYSIGN:
case ISD::FPOW: {
Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
Tmp3, DAG.getIntPtrConstant(0, dl)));
break;
}
case ISD::FMA: {
Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
Tmp3 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(2));
Results.push_back(
DAG.getNode(ISD::FP_ROUND, dl, OVT,
DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2, Tmp3),
DAG.getIntPtrConstant(0, dl)));
break;
}
case ISD::FPOWI: {
Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
Tmp2 = Node->getOperand(1);
Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
Tmp3, DAG.getIntPtrConstant(0, dl)));
break;
}
case ISD::FFLOOR:
case ISD::FCEIL:
case ISD::FRINT:
case ISD::FNEARBYINT:
case ISD::FROUND:
case ISD::FTRUNC:
case ISD::FNEG:
case ISD::FSQRT:
case ISD::FSIN:
case ISD::FCOS:
case ISD::FLOG:
case ISD::FLOG2:
case ISD::FLOG10:
case ISD::FABS:
case ISD::FEXP:
case ISD::FEXP2: {
Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
Tmp2, DAG.getIntPtrConstant(0, dl)));
break;
}
}
// Replace the original node with the legalized result.
if (!Results.empty())
ReplaceNode(Node, Results.data());
}
/// This is the entry point for the file.
void SelectionDAG::Legalize() {
AssignTopologicalOrder();
SmallPtrSet<SDNode *, 16> LegalizedNodes;
SelectionDAGLegalize Legalizer(*this, LegalizedNodes);
// Visit all the nodes. We start in topological order, so that we see
// nodes with their original operands intact. Legalization can produce
// new nodes which may themselves need to be legalized. Iterate until all
// nodes have been legalized.
for (;;) {
bool AnyLegalized = false;
for (auto NI = allnodes_end(); NI != allnodes_begin();) {
--NI;
SDNode *N = NI;
if (N->use_empty() && N != getRoot().getNode()) {
++NI;
DeleteNode(N);
continue;
}
if (LegalizedNodes.insert(N).second) {
AnyLegalized = true;
Legalizer.LegalizeOp(N);
if (N->use_empty() && N != getRoot().getNode()) {
++NI;
DeleteNode(N);
}
}
}
if (!AnyLegalized)
break;
}
// Remove dead nodes now.
RemoveDeadNodes();
}
bool SelectionDAG::LegalizeOp(SDNode *N,
SmallSetVector<SDNode *, 16> &UpdatedNodes) {
SmallPtrSet<SDNode *, 16> LegalizedNodes;
SelectionDAGLegalize Legalizer(*this, LegalizedNodes, &UpdatedNodes);
// Directly insert the node in question, and legalize it. This will recurse
// as needed through operands.
LegalizedNodes.insert(N);
Legalizer.LegalizeOp(N);
return LegalizedNodes.count(N);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/TargetLowering.cpp | //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the TargetLowering class.
//
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cctype>
using namespace llvm;
/// NOTE: The TargetMachine owns TLOF.
TargetLowering::TargetLowering(const TargetMachine &tm)
: TargetLoweringBase(tm) {}
const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
return nullptr;
}
/// Check whether a given call node is in tail position within its function. If
/// so, it sets Chain to the input chain of the tail call.
bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
SDValue &Chain) const {
const Function *F = DAG.getMachineFunction().getFunction();
// Conservatively require the attributes of the call to match those of
// the return. Ignore noalias because it doesn't affect the call sequence.
AttributeSet CallerAttrs = F->getAttributes();
if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex)
.removeAttribute(Attribute::NoAlias).hasAttributes())
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
return false;
// Check if the only use is a function return node.
return isUsedByReturnOnly(Node, Chain);
}
/// \brief Set CallLoweringInfo attribute flags based on a call instruction
/// and called function attributes.
void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
unsigned AttrIdx) {
isSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
isZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
isInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
isSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
isNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
isByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
Alignment = CS->getParamAlignment(AttrIdx);
}
/// Generate a libcall taking the given operands as arguments and returning a
/// result of type RetVT.
std::pair<SDValue, SDValue>
TargetLowering::makeLibCall(SelectionDAG &DAG,
RTLIB::Libcall LC, EVT RetVT,
const SDValue *Ops, unsigned NumOps,
bool isSigned, SDLoc dl,
bool doesNotReturn,
bool isReturnValueUsed) const {
TargetLowering::ArgListTy Args;
Args.reserve(NumOps);
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0; i != NumOps; ++i) {
Entry.Node = Ops[i];
Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
Entry.isSExt = shouldSignExtendTypeInLibCall(Ops[i].getValueType(), isSigned);
Entry.isZExt = !shouldSignExtendTypeInLibCall(Ops[i].getValueType(), isSigned);
Args.push_back(Entry);
}
if (LC == RTLIB::UNKNOWN_LIBCALL)
report_fatal_error("Unsupported library call operation!");
SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
getPointerTy(DAG.getDataLayout()));
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
TargetLowering::CallLoweringInfo CLI(DAG);
bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
.setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
.setNoReturn(doesNotReturn).setDiscardResult(!isReturnValueUsed)
.setSExtResult(signExtend).setZExtResult(!signExtend);
return LowerCallTo(CLI);
}
/// SoftenSetCCOperands - Soften the operands of a comparison. This code is
/// shared among BR_CC, SELECT_CC, and SETCC handlers.
void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode,
SDLoc dl) const {
assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
&& "Unsupported setcc type!");
// Expand into one or more soft-fp libcall(s).
RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
switch (CCCode) {
case ISD::SETEQ:
case ISD::SETOEQ:
LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
(VT == MVT::f64) ? RTLIB::OEQ_F64 : RTLIB::OEQ_F128;
break;
case ISD::SETNE:
case ISD::SETUNE:
LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
(VT == MVT::f64) ? RTLIB::UNE_F64 : RTLIB::UNE_F128;
break;
case ISD::SETGE:
case ISD::SETOGE:
LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
(VT == MVT::f64) ? RTLIB::OGE_F64 : RTLIB::OGE_F128;
break;
case ISD::SETLT:
case ISD::SETOLT:
LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
(VT == MVT::f64) ? RTLIB::OLT_F64 : RTLIB::OLT_F128;
break;
case ISD::SETLE:
case ISD::SETOLE:
LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
(VT == MVT::f64) ? RTLIB::OLE_F64 : RTLIB::OLE_F128;
break;
case ISD::SETGT:
case ISD::SETOGT:
LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
(VT == MVT::f64) ? RTLIB::OGT_F64 : RTLIB::OGT_F128;
break;
case ISD::SETUO:
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
(VT == MVT::f64) ? RTLIB::UO_F64 : RTLIB::UO_F128;
break;
case ISD::SETO:
LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
(VT == MVT::f64) ? RTLIB::O_F64 : RTLIB::O_F128;
break;
default:
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
(VT == MVT::f64) ? RTLIB::UO_F64 : RTLIB::UO_F128;
switch (CCCode) {
case ISD::SETONE:
// SETONE = SETOLT | SETOGT
LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
(VT == MVT::f64) ? RTLIB::OLT_F64 : RTLIB::OLT_F128;
// Fallthrough
case ISD::SETUGT:
LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
(VT == MVT::f64) ? RTLIB::OGT_F64 : RTLIB::OGT_F128;
break;
case ISD::SETUGE:
LC2 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
(VT == MVT::f64) ? RTLIB::OGE_F64 : RTLIB::OGE_F128;
break;
case ISD::SETULT:
LC2 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
(VT == MVT::f64) ? RTLIB::OLT_F64 : RTLIB::OLT_F128;
break;
case ISD::SETULE:
LC2 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
(VT == MVT::f64) ? RTLIB::OLE_F64 : RTLIB::OLE_F128;
break;
case ISD::SETUEQ:
LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
(VT == MVT::f64) ? RTLIB::OEQ_F64 : RTLIB::OEQ_F128;
break;
default: llvm_unreachable("Do not know how to soften this setcc!");
}
}
// Use the target specific return value for comparions lib calls.
EVT RetVT = getCmpLibcallReturnType();
SDValue Ops[2] = { NewLHS, NewRHS };
NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, 2, false/*sign irrelevant*/,
dl).first;
NewRHS = DAG.getConstant(0, dl, RetVT);
CCCode = getCmpLibcallCC(LC1);
if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
SDValue Tmp = DAG.getNode(
ISD::SETCC, dl,
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
NewLHS, NewRHS, DAG.getCondCode(CCCode));
NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, 2, false/*sign irrelevant*/,
dl).first;
NewLHS = DAG.getNode(
ISD::SETCC, dl,
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
NewRHS = SDValue();
}
}
/// getJumpTableEncoding - Return the entry encoding for a jump table in the
/// current function. The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
unsigned TargetLowering::getJumpTableEncoding() const {
// In non-pic modes, just use the address of a block.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return MachineJumpTableInfo::EK_BlockAddress;
// In PIC mode, if the target supports a GPRel32 directive, use it.
if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
return MachineJumpTableInfo::EK_GPRel32BlockAddress;
// Otherwise, use a label difference.
return MachineJumpTableInfo::EK_LabelDifference32;
}
SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
// If our PIC model is GP relative, use the global offset table as the base.
unsigned JTEncoding = getJumpTableEncoding();
if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
(JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
return Table;
}
/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the
/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
/// MCExpr.
const MCExpr *
TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI,MCContext &Ctx) const{
// The normal PIC reloc base is the label at the start of the jump table.
return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
}
bool
TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// Assume that everything is safe in static mode.
if (getTargetMachine().getRelocationModel() == Reloc::Static)
return true;
// In dynamic-no-pic mode, assume that known defined values are safe.
if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC &&
GA && GA->getGlobal()->isStrongDefinitionForLinker())
return true;
// Otherwise assume nothing is safe.
return false;
}
//===----------------------------------------------------------------------===//
// Optimization Methods
//===----------------------------------------------------------------------===//
/// ShrinkDemandedConstant - Check to see if the specified operand of the
/// specified instruction is a constant integer. If so, check to see if there
/// are any bits set in the constant that are not demanded. If so, shrink the
/// constant and return true.
bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
const APInt &Demanded) {
SDLoc dl(Op);
// FIXME: ISD::SELECT, ISD::SELECT_CC
switch (Op.getOpcode()) {
default: break;
case ISD::XOR:
case ISD::AND:
case ISD::OR: {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
if (!C) return false;
if (Op.getOpcode() == ISD::XOR &&
(C->getAPIntValue() | (~Demanded)).isAllOnesValue())
return false;
// if we can expand it to have all bits set, do it
if (C->getAPIntValue().intersects(~Demanded)) {
EVT VT = Op.getValueType();
SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
DAG.getConstant(Demanded &
C->getAPIntValue(),
dl, VT));
return CombineTo(Op, New);
}
break;
}
}
return false;
}
/// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the
/// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening
/// cast, but it could be generalized for targets with other types of
/// implicit widening casts.
bool
TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
unsigned BitWidth,
const APInt &Demanded,
SDLoc dl) {
assert(Op.getNumOperands() == 2 &&
"ShrinkDemandedOp only supports binary operators!");
assert(Op.getNode()->getNumValues() == 1 &&
"ShrinkDemandedOp only supports nodes with one result!");
// Early return, as this function cannot handle vector types.
if (Op.getValueType().isVector())
return false;
// Don't do this if the node has another user, which may require the
// full value.
if (!Op.getNode()->hasOneUse())
return false;
// Search for the smallest integer type with free casts to and from
// Op's type. For expedience, just check power-of-2 integer types.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros();
unsigned SmallVTBits = DemandedSize;
if (!isPowerOf2_32(SmallVTBits))
SmallVTBits = NextPowerOf2(SmallVTBits);
for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
TLI.isZExtFree(SmallVT, Op.getValueType())) {
// We found a type with free casts.
SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
Op.getNode()->getOperand(0)),
DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
Op.getNode()->getOperand(1)));
bool NeedZext = DemandedSize > SmallVTBits;
SDValue Z = DAG.getNode(NeedZext ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND,
dl, Op.getValueType(), X);
return CombineTo(Op, Z);
}
}
return false;
}
/// SimplifyDemandedBits - Look at Op. At this point, we know that only the
/// DemandedMask bits of the result of Op are ever used downstream. If we can
/// use this information to simplify Op, create a new simplified DAG node and
/// return true, returning the original and new nodes in Old and New. Otherwise,
/// analyze the expression and return a mask of KnownOne and KnownZero bits for
/// the expression (used to simplify the caller). The KnownZero/One bits may
/// only be accurate for those bits in the DemandedMask.
bool TargetLowering::SimplifyDemandedBits(SDValue Op,
const APInt &DemandedMask,
APInt &KnownZero,
APInt &KnownOne,
TargetLoweringOpt &TLO,
unsigned Depth) const {
unsigned BitWidth = DemandedMask.getBitWidth();
assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth &&
"Mask size mismatches value type size!");
APInt NewMask = DemandedMask;
SDLoc dl(Op);
auto &DL = TLO.DAG.getDataLayout();
// Don't know anything.
KnownZero = KnownOne = APInt(BitWidth, 0);
// Other users may use these bits.
if (!Op.getNode()->hasOneUse()) {
if (Depth != 0) {
// If not at the root, Just compute the KnownZero/KnownOne bits to
// simplify things downstream.
TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
return false;
}
// If this is the root being simplified, allow it to have multiple uses,
// just set the NewMask to all bits.
NewMask = APInt::getAllOnesValue(BitWidth);
} else if (DemandedMask == 0) {
// Not demanding any bits from Op.
if (Op.getOpcode() != ISD::UNDEF)
return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
return false;
} else if (Depth == 6) { // Limit search depth.
return false;
}
APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
switch (Op.getOpcode()) {
case ISD::Constant:
// We know all of the bits for a constant!
KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
KnownZero = ~KnownOne;
return false; // Don't fall through, will infinitely loop.
case ISD::AND:
// If the RHS is a constant, check to see if the LHS would be zero without
// using the bits from the RHS. Below, we use knowledge about the RHS to
// simplify the LHS, here we're using information from the LHS to simplify
// the RHS.
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
APInt LHSZero, LHSOne;
// Do not increment Depth here; that can cause an infinite loop.
TLO.DAG.computeKnownBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
// If the LHS already has zeros where RHSC does, this and is dead.
if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
// If any of the set bits in the RHS are known zero on the LHS, shrink
// the constant.
if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
return true;
}
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
KnownZero2, KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If all of the demanded bits are known one on one side, return the other.
// These bits cannot contribute to the result of the 'and'.
if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
return TLO.CombineTo(Op, Op.getOperand(1));
// If all of the demanded bits in the inputs are known zeros, return zero.
if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, Op.getValueType()));
// If the RHS is a constant, see if we can simplify it.
if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
return true;
// If the operation can be done in a smaller type, do so.
if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-1 bits are only known if set in both the LHS & RHS.
KnownOne &= KnownOne2;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
KnownZero |= KnownZero2;
break;
case ISD::OR:
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
KnownZero2, KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'or'.
if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
return TLO.CombineTo(Op, Op.getOperand(1));
// If all of the potentially set bits on one side are known to be set on
// the other side, just use the 'other' side.
if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
return TLO.CombineTo(Op, Op.getOperand(1));
// If the RHS is a constant, see if we can simplify it.
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// If the operation can be done in a smaller type, do so.
if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-0 bits are only known if clear in both the LHS & RHS.
KnownZero &= KnownZero2;
// Output known-1 are known to be set if set in either the LHS | RHS.
KnownOne |= KnownOne2;
break;
case ISD::XOR:
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'xor'.
if ((KnownZero & NewMask) == NewMask)
return TLO.CombineTo(Op, Op.getOperand(0));
if ((KnownZero2 & NewMask) == NewMask)
return TLO.CombineTo(Op, Op.getOperand(1));
// If the operation can be done in a smaller type, do so.
if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// If all of the unknown bits are known to be zero on one side or the other
// (but not both) turn this into an *inclusive* or.
// e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
Op.getOperand(0),
Op.getOperand(1)));
// Output known-0 bits are known if clear or set in both the LHS & RHS.
KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
// If all of the demanded bits on one side are known, and all of the set
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
// NB: it is okay if more bits are known than are requested
if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
if (KnownOne == KnownOne2) { // set bits are the same on both sides
EVT VT = Op.getValueType();
SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, dl, VT);
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
Op.getOperand(0), ANDC));
}
}
// If the RHS is a constant, see if we can simplify it.
// for XOR, we prefer to force bits to 1 if they will make a -1.
// if we can't force bits, try to shrink constant
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
APInt Expanded = C->getAPIntValue() | (~NewMask);
// if we can expand it to have all bits set, do it
if (Expanded.isAllOnesValue()) {
if (Expanded != C->getAPIntValue()) {
EVT VT = Op.getValueType();
SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
TLO.DAG.getConstant(Expanded, dl, VT));
return TLO.CombineTo(Op, New);
}
// if it already has all the bits set, nothing to change
// but don't shrink either!
} else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
return true;
}
}
KnownZero = KnownZeroOut;
KnownOne = KnownOneOut;
break;
case ISD::SELECT:
if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If the operands are constants, see if we can simplify them.
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
break;
case ISD::SELECT_CC:
if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If the operands are constants, see if we can simplify them.
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
break;
case ISD::SHL:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
unsigned ShAmt = SA->getZExtValue();
SDValue InOp = Op.getOperand(0);
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
// If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
// single shift. We can do this if the bottom bits (which are shifted
// out) are never demanded.
if (InOp.getOpcode() == ISD::SRL &&
isa<ConstantSDNode>(InOp.getOperand(1))) {
if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
unsigned Opc = ISD::SHL;
int Diff = ShAmt-C1;
if (Diff < 0) {
Diff = -Diff;
Opc = ISD::SRL;
}
SDValue NewSA =
TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
EVT VT = Op.getValueType();
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
InOp.getOperand(0), NewSA));
}
}
if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
KnownZero, KnownOne, TLO, Depth+1))
return true;
// Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
// are not demanded. This will likely allow the anyext to be folded away.
if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
SDValue InnerOp = InOp.getNode()->getOperand(0);
EVT InnerVT = InnerOp.getValueType();
unsigned InnerBits = InnerVT.getSizeInBits();
if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
isTypeDesirableForOp(ISD::SHL, InnerVT)) {
EVT ShTy = getShiftAmountTy(InnerVT, DL);
if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
ShTy = InnerVT;
SDValue NarrowShl =
TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
TLO.DAG.getConstant(ShAmt, dl, ShTy));
return
TLO.CombineTo(Op,
TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
NarrowShl));
}
// Repeat the SHL optimization above in cases where an extension
// intervenes: (shl (anyext (shr x, c1)), c2) to
// (shl (anyext x), c2-c1). This requires that the bottom c1 bits
// aren't demanded (as above) and that the shifted upper c1 bits of
// x aren't demanded.
if (InOp.hasOneUse() &&
InnerOp.getOpcode() == ISD::SRL &&
InnerOp.hasOneUse() &&
isa<ConstantSDNode>(InnerOp.getOperand(1))) {
uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
->getZExtValue();
if (InnerShAmt < ShAmt &&
InnerShAmt < InnerBits &&
NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
NewMask.trunc(ShAmt) == 0) {
SDValue NewSA =
TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
Op.getOperand(1).getValueType());
EVT VT = Op.getValueType();
SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
InnerOp.getOperand(0));
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
NewExt, NewSA));
}
}
}
KnownZero <<= SA->getZExtValue();
KnownOne <<= SA->getZExtValue();
// low bits known zero.
KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
}
break;
case ISD::SRL:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
EVT VT = Op.getValueType();
unsigned ShAmt = SA->getZExtValue();
unsigned VTSize = VT.getSizeInBits();
SDValue InOp = Op.getOperand(0);
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
APInt InDemandedMask = (NewMask << ShAmt);
// If the shift is exact, then it does demand the low bits (and knows that
// they are zero).
if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
// If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
// single shift. We can do this if the top bits (which are shifted out)
// are never demanded.
if (InOp.getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(InOp.getOperand(1))) {
if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
unsigned Opc = ISD::SRL;
int Diff = ShAmt-C1;
if (Diff < 0) {
Diff = -Diff;
Opc = ISD::SHL;
}
SDValue NewSA =
TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
InOp.getOperand(0), NewSA));
}
}
// Compute the new bits that are at the top now.
if (SimplifyDemandedBits(InOp, InDemandedMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
KnownZero |= HighBits; // High bits known zero.
}
break;
case ISD::SRA:
// If this is an arithmetic shift right and only the low-bit is set, we can
// always convert this into a logical shr, even if the shift amount is
// variable. The low bit of the shift cannot be an input sign bit unless
// the shift amount is >= the size of the datatype, which is undefined.
if (NewMask == 1)
return TLO.CombineTo(Op,
TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
Op.getOperand(0), Op.getOperand(1)));
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
EVT VT = Op.getValueType();
unsigned ShAmt = SA->getZExtValue();
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
APInt InDemandedMask = (NewMask << ShAmt);
// If the shift is exact, then it does demand the low bits (and knows that
// they are zero).
if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
// If any of the demanded bits are produced by the sign extension, we also
// demand the input sign bit.
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
if (HighBits.intersects(NewMask))
InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits());
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
// Handle the sign bit, adjusted to where it is now in the mask.
APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
SDNodeFlags Flags;
Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact());
return TLO.CombineTo(Op,
TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
Op.getOperand(1), &Flags));
}
int Log2 = NewMask.exactLogBase2();
if (Log2 >= 0) {
// The bit must come from the sign.
SDValue NewSA =
TLO.DAG.getConstant(BitWidth - 1 - Log2, dl,
Op.getOperand(1).getValueType());
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
Op.getOperand(0), NewSA));
}
if (KnownOne.intersects(SignBit))
// New bits are known one.
KnownOne |= HighBits;
}
break;
case ISD::SIGN_EXTEND_INREG: {
EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
// If we only care about the highest bit, don't bother shifting right.
if (MsbMask == NewMask) {
unsigned ShAmt = ExVT.getScalarType().getSizeInBits();
SDValue InOp = Op.getOperand(0);
unsigned VTBits = Op->getValueType(0).getScalarType().getSizeInBits();
bool AlreadySignExtended =
TLO.DAG.ComputeNumSignBits(InOp) >= VTBits-ShAmt+1;
// However if the input is already sign extended we expect the sign
// extension to be dropped altogether later and do not simplify.
if (!AlreadySignExtended) {
// Compute the correct shift amount type, which must be getShiftAmountTy
// for scalar types after legalization.
EVT ShiftAmtTy = Op.getValueType();
if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl,
ShiftAmtTy);
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
Op.getValueType(), InOp,
ShiftAmt));
}
}
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
APInt NewBits =
APInt::getHighBitsSet(BitWidth,
BitWidth - ExVT.getScalarType().getSizeInBits());
// If none of the extended bits are demanded, eliminate the sextinreg.
if ((NewBits & NewMask) == 0)
return TLO.CombineTo(Op, Op.getOperand(0));
APInt InSignBit =
APInt::getSignBit(ExVT.getScalarType().getSizeInBits()).zext(BitWidth);
APInt InputDemandedBits =
APInt::getLowBitsSet(BitWidth,
ExVT.getScalarType().getSizeInBits()) &
NewMask;
// Since the sign extended bits are demanded, we know that the sign
// bit is demanded.
InputDemandedBits |= InSignBit;
if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
// If the input sign bit is known zero, convert this into a zero extension.
if (KnownZero.intersects(InSignBit))
return TLO.CombineTo(Op,
TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,ExVT));
if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
KnownOne |= NewBits;
KnownZero &= ~NewBits;
} else { // Input sign bit unknown
KnownZero &= ~NewBits;
KnownOne &= ~NewBits;
}
break;
}
case ISD::BUILD_PAIR: {
EVT HalfVT = Op.getOperand(0).getValueType();
unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
APInt KnownZeroLo, KnownOneLo;
APInt KnownZeroHi, KnownOneHi;
if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
KnownOneLo, TLO, Depth + 1))
return true;
if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
KnownOneHi, TLO, Depth + 1))
return true;
KnownZero = KnownZeroLo.zext(BitWidth) |
KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
KnownOne = KnownOneLo.zext(BitWidth) |
KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
break;
}
case ISD::ZERO_EXTEND: {
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
APInt InMask = NewMask.trunc(OperandBitWidth);
// If none of the top bits are demanded, convert this into an any_extend.
APInt NewBits =
APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
if (!NewBits.intersects(NewMask))
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
Op.getValueType(),
Op.getOperand(0)));
if (SimplifyDemandedBits(Op.getOperand(0), InMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
KnownZero |= NewBits;
break;
}
case ISD::SIGN_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
APInt NewBits = ~InMask & NewMask;
// If none of the top bits are demanded, convert this into an any_extend.
if (NewBits == 0)
return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
Op.getValueType(),
Op.getOperand(0)));
// Since some of the sign extended bits are demanded, we know that the sign
// bit is demanded.
APInt InDemandedBits = InMask & NewMask;
InDemandedBits |= InSignBit;
InDemandedBits = InDemandedBits.trunc(InBits);
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
KnownOne, TLO, Depth+1))
return true;
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
// If the sign bit is known zero, convert this to a zero extend.
if (KnownZero.intersects(InSignBit))
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
Op.getValueType(),
Op.getOperand(0)));
// If the sign bit is known one, the top bits match.
if (KnownOne.intersects(InSignBit)) {
KnownOne |= NewBits;
assert((KnownZero & NewBits) == 0);
} else { // Otherwise, top bits aren't known.
assert((KnownOne & NewBits) == 0);
assert((KnownZero & NewBits) == 0);
}
break;
}
case ISD::ANY_EXTEND: {
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
APInt InMask = NewMask.trunc(OperandBitWidth);
if (SimplifyDemandedBits(Op.getOperand(0), InMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
// Simplify the input, using demanded bit information, and compute the known
// zero/one bits live out.
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
APInt TruncMask = NewMask.zext(OperandBitWidth);
if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
KnownZero = KnownZero.trunc(BitWidth);
KnownOne = KnownOne.trunc(BitWidth);
// If the input is only used by this truncate, see if we can shrink it based
// on the known demanded bits.
if (Op.getOperand(0).getNode()->hasOneUse()) {
SDValue In = Op.getOperand(0);
switch (In.getOpcode()) {
default: break;
case ISD::SRL:
// Shrink SRL by a constant if none of the high bits shifted in are
// demanded.
if (TLO.LegalTypes() &&
!isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
// Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
// undesirable.
break;
ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
if (!ShAmt)
break;
SDValue Shift = In.getOperand(1);
if (TLO.LegalTypes()) {
uint64_t ShVal = ShAmt->getZExtValue();
Shift = TLO.DAG.getConstant(ShVal, dl,
getShiftAmountTy(Op.getValueType(), DL));
}
APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
OperandBitWidth - BitWidth);
HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
// None of the shifted in bits are needed. Add a truncate of the
// shift input, then shift it.
SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
Op.getValueType(),
In.getOperand(0));
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
Op.getValueType(),
NewTrunc,
Shift));
}
break;
}
}
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
break;
}
case ISD::AssertZext: {
// AssertZext demands all of the high bits, plus any of the low bits
// demanded by its users.
EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth,
VT.getSizeInBits());
if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero |= ~InMask & NewMask;
break;
}
case ISD::BITCAST:
// If this is an FP->Int bitcast and if the sign bit is the only
// thing demanded, turn this into a FGETSIGN.
if (!TLO.LegalOperations() &&
!Op.getValueType().isVector() &&
!Op.getOperand(0).getValueType().isVector() &&
NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) &&
Op.getOperand(0).getValueType().isFloatingPoint()) {
bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple()) {
EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32;
// Make a FGETSIGN + SHL to move the sign bit into the appropriate
// place. We expect the SHL to be eliminated by other optimizations.
SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits();
if (!OpVTLegal && OpVTSizeInBits > 32)
Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
unsigned ShVal = Op.getValueType().getSizeInBits()-1;
SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
Op.getValueType(),
Sign, ShAmt));
}
}
break;
case ISD::ADD:
case ISD::MUL:
case ISD::SUB: {
// Add, Sub, and Mul don't demand any bits in positions beyond that
// of the highest bit demanded of them.
APInt LoMask = APInt::getLowBitsSet(BitWidth,
BitWidth - NewMask.countLeadingZeros());
if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
// See if the operation should be performed at a smaller bit width.
if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
}
// FALL THROUGH
default:
// Just use computeKnownBits to compute output bits.
TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
break;
}
// If we know the value of all of the demanded bits, return this as a
// constant.
if ((NewMask & (KnownZero|KnownOne)) == NewMask) {
// Avoid folding to a constant if any OpaqueConstant is involved.
const SDNode *N = Op.getNode();
for (SDNodeIterator I = SDNodeIterator::begin(N),
E = SDNodeIterator::end(N); I != E; ++I) {
SDNode *Op = *I;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
if (C->isOpaque())
return false;
}
return TLO.CombineTo(Op,
TLO.DAG.getConstant(KnownOne, dl, Op.getValueType()));
}
return false;
}
/// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
}
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
/// targets that want to expose additional information about sign bits to the
/// DAG Combiner.
unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
const SelectionDAG &,
unsigned Depth) const {
assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use ComputeNumSignBits if you don't know whether Op"
" is a target node!");
return 1;
}
/// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly
/// one bit set. This differs from computeKnownBits in that it doesn't need to
/// determine which bit is set.
///
static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
// A left-shift of a constant one will have exactly one bit set, because
// shifting the bit off the end is undefined.
if (Val.getOpcode() == ISD::SHL)
if (ConstantSDNode *C =
dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
if (C->getAPIntValue() == 1)
return true;
// Similarly, a right-shift of a constant sign-bit will have exactly
// one bit set.
if (Val.getOpcode() == ISD::SRL)
if (ConstantSDNode *C =
dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
if (C->getAPIntValue().isSignBit())
return true;
// More could be done here, though the above checks are enough
// to handle some common cases.
// Fall back to computeKnownBits to catch other known cases.
EVT OpVT = Val.getValueType();
unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
APInt KnownZero, KnownOne;
DAG.computeKnownBits(Val, KnownZero, KnownOne);
return (KnownZero.countPopulation() == BitWidth - 1) &&
(KnownOne.countPopulation() == 1);
}
bool TargetLowering::isConstTrueVal(const SDNode *N) const {
if (!N)
return false;
const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
if (!CN) {
const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
if (!BV)
return false;
BitVector UndefElements;
CN = BV->getConstantSplatNode(&UndefElements);
// Only interested in constant splats, and we don't try to handle undef
// elements in identifying boolean constants.
if (!CN || UndefElements.none())
return false;
}
switch (getBooleanContents(N->getValueType(0))) {
case UndefinedBooleanContent:
return CN->getAPIntValue()[0];
case ZeroOrOneBooleanContent:
return CN->isOne();
case ZeroOrNegativeOneBooleanContent:
return CN->isAllOnesValue();
}
llvm_unreachable("Invalid boolean contents");
}
bool TargetLowering::isConstFalseVal(const SDNode *N) const {
if (!N)
return false;
const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
if (!CN) {
const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
if (!BV)
return false;
BitVector UndefElements;
CN = BV->getConstantSplatNode(&UndefElements);
// Only interested in constant splats, and we don't try to handle undef
// elements in identifying boolean constants.
if (!CN || UndefElements.none())
return false;
}
if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
return !CN->getAPIntValue()[0];
return CN->isNullValue();
}
/// SimplifySetCC - Try to simplify a setcc built with the specified operands
/// and cc. If it is unable to simplify it, return a null SDValue.
SDValue
TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
DAGCombinerInfo &DCI, SDLoc dl) const {
SelectionDAG &DAG = DCI.DAG;
// These setcc operations always fold.
switch (Cond) {
default: break;
case ISD::SETFALSE:
case ISD::SETFALSE2: return DAG.getConstant(0, dl, VT);
case ISD::SETTRUE:
case ISD::SETTRUE2: {
TargetLowering::BooleanContent Cnt =
getBooleanContents(N0->getValueType(0));
return DAG.getConstant(
Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
VT);
}
}
// Ensure that the constant occurs on the RHS, and fold constant
// comparisons.
ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
if (isa<ConstantSDNode>(N0.getNode()) &&
(DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
const APInt &C1 = N1C->getAPIntValue();
// If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
// equality comparison, then we're just comparing whether X itself is
// zero.
if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
N0.getOperand(0).getOpcode() == ISD::CTLZ &&
N0.getOperand(1).getOpcode() == ISD::Constant) {
const APInt &ShAmt
= cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
if ((C1 == 0) == (Cond == ISD::SETEQ)) {
// (srl (ctlz x), 5) == 0 -> X != 0
// (srl (ctlz x), 5) != 1 -> X != 0
Cond = ISD::SETNE;
} else {
// (srl (ctlz x), 5) != 0 -> X == 0
// (srl (ctlz x), 5) == 1 -> X == 0
Cond = ISD::SETEQ;
}
SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
Zero, Cond);
}
}
SDValue CTPOP = N0;
// Look through truncs that don't change the value of a ctpop.
if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
CTPOP = N0.getOperand(0);
if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
(N0 == CTPOP || N0.getValueType().getSizeInBits() >
Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) {
EVT CTVT = CTPOP.getValueType();
SDValue CTOp = CTPOP.getOperand(0);
// (ctpop x) u< 2 -> (x & x-1) == 0
// (ctpop x) u> 1 -> (x & x-1) != 0
if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
DAG.getConstant(1, dl, CTVT));
SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
}
// TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
}
// (zext x) == C --> x == (trunc C)
// (sext x) == C --> x == (trunc C)
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
DCI.isBeforeLegalize() && N0->hasOneUse()) {
unsigned MinBits = N0.getValueSizeInBits();
SDValue PreExt;
bool Signed = false;
if (N0->getOpcode() == ISD::ZERO_EXTEND) {
// ZExt
MinBits = N0->getOperand(0).getValueSizeInBits();
PreExt = N0->getOperand(0);
} else if (N0->getOpcode() == ISD::AND) {
// DAGCombine turns costly ZExts into ANDs
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
if ((C->getAPIntValue()+1).isPowerOf2()) {
MinBits = C->getAPIntValue().countTrailingOnes();
PreExt = N0->getOperand(0);
}
} else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
// SExt
MinBits = N0->getOperand(0).getValueSizeInBits();
PreExt = N0->getOperand(0);
Signed = true;
} else if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(N0)) {
// ZEXTLOAD / SEXTLOAD
if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
MinBits = LN0->getMemoryVT().getSizeInBits();
PreExt = N0;
} else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
Signed = true;
MinBits = LN0->getMemoryVT().getSizeInBits();
PreExt = N0;
}
}
// Figure out how many bits we need to preserve this constant.
unsigned ReqdBits = Signed ?
C1.getBitWidth() - C1.getNumSignBits() + 1 :
C1.getActiveBits();
// Make sure we're not losing bits from the constant.
if (MinBits > 0 &&
MinBits < C1.getBitWidth() &&
MinBits >= ReqdBits) {
EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
// Will get folded away.
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
return DAG.getSetCC(dl, VT, Trunc, C, Cond);
}
}
}
// If the LHS is '(and load, const)', the RHS is 0,
// the test is for equality or unsigned, and all 1 bits of the const are
// in the same partial word, see if we can shorten the load.
if (DCI.isBeforeLegalize() &&
!ISD::isSignedIntSetCC(Cond) &&
N0.getOpcode() == ISD::AND && C1 == 0 &&
N0.getNode()->hasOneUse() &&
isa<LoadSDNode>(N0.getOperand(0)) &&
N0.getOperand(0).getNode()->hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
APInt bestMask;
unsigned bestWidth = 0, bestOffset = 0;
if (!Lod->isVolatile() && Lod->isUnindexed()) {
unsigned origWidth = N0.getValueType().getSizeInBits();
unsigned maskWidth = origWidth;
// We can narrow (e.g.) 16-bit extending loads on 32-bit target to
// 8 bits, but have to be careful...
if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
origWidth = Lod->getMemoryVT().getSizeInBits();
const APInt &Mask =
cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
for (unsigned width = origWidth / 2; width>=8; width /= 2) {
APInt newMask = APInt::getLowBitsSet(maskWidth, width);
for (unsigned offset=0; offset<origWidth/width; offset++) {
if ((newMask & Mask) == Mask) {
if (!DAG.getDataLayout().isLittleEndian())
bestOffset = (origWidth/width - offset - 1) * (width/8);
else
bestOffset = (uint64_t)offset * (width/8);
bestMask = Mask.lshr(offset * (width/8) * 8);
bestWidth = width;
break;
}
newMask = newMask << width;
}
}
}
if (bestWidth) {
EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
if (newVT.isRound()) {
EVT PtrType = Lod->getOperand(1).getValueType();
SDValue Ptr = Lod->getBasePtr();
if (bestOffset != 0)
Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
DAG.getConstant(bestOffset, dl, PtrType));
unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
Lod->getPointerInfo().getWithOffset(bestOffset),
false, false, false, NewAlign);
return DAG.getSetCC(dl, VT,
DAG.getNode(ISD::AND, dl, newVT, NewLoad,
DAG.getConstant(bestMask.trunc(bestWidth),
dl, newVT)),
DAG.getConstant(0LL, dl, newVT), Cond);
}
}
}
// If the LHS is a ZERO_EXTEND, perform the comparison on the input.
if (N0.getOpcode() == ISD::ZERO_EXTEND) {
unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
// If the comparison constant has bits in the upper part, the
// zero-extended value could never match.
if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
C1.getBitWidth() - InSize))) {
switch (Cond) {
case ISD::SETUGT:
case ISD::SETUGE:
case ISD::SETEQ: return DAG.getConstant(0, dl, VT);
case ISD::SETULT:
case ISD::SETULE:
case ISD::SETNE: return DAG.getConstant(1, dl, VT);
case ISD::SETGT:
case ISD::SETGE:
// True if the sign bit of C1 is set.
return DAG.getConstant(C1.isNegative(), dl, VT);
case ISD::SETLT:
case ISD::SETLE:
// True if the sign bit of C1 isn't set.
return DAG.getConstant(C1.isNonNegative(), dl, VT);
default:
break;
}
}
// Otherwise, we can perform the comparison with the low bits.
switch (Cond) {
case ISD::SETEQ:
case ISD::SETNE:
case ISD::SETUGT:
case ISD::SETUGE:
case ISD::SETULT:
case ISD::SETULE: {
EVT newVT = N0.getOperand(0).getValueType();
if (DCI.isBeforeLegalizeOps() ||
(isOperationLegal(ISD::SETCC, newVT) &&
getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
EVT NewSetCCVT =
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
NewConst, Cond);
return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
}
break;
}
default:
break; // todo, be more careful with signed comparisons
}
} else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
(Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
EVT ExtDstTy = N0.getValueType();
unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
// If the constant doesn't fit into the number of bits for the source of
// the sign extension, it is impossible for both sides to be equal.
if (C1.getMinSignedBits() > ExtSrcTyBits)
return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
SDValue ZextOp;
EVT Op0Ty = N0.getOperand(0).getValueType();
if (Op0Ty == ExtSrcTy) {
ZextOp = N0.getOperand(0);
} else {
APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
DAG.getConstant(Imm, dl, Op0Ty));
}
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(ZextOp.getNode());
// Otherwise, make this a use of a zext.
return DAG.getSetCC(dl, VT, ZextOp,
DAG.getConstant(C1 & APInt::getLowBitsSet(
ExtDstTyBits,
ExtSrcTyBits),
dl, ExtDstTy),
Cond);
} else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
(Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
// SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
if (N0.getOpcode() == ISD::SETCC &&
isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
if (TrueWhenTrue)
return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
// Invert the condition.
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
CC = ISD::getSetCCInverse(CC,
N0.getOperand(0).getValueType().isInteger());
if (DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
}
if ((N0.getOpcode() == ISD::XOR ||
(N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::XOR &&
N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
isa<ConstantSDNode>(N0.getOperand(1)) &&
cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
// If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
// can only do this if the top bits are known zero.
unsigned BitWidth = N0.getValueSizeInBits();
if (DAG.MaskedValueIsZero(N0,
APInt::getHighBitsSet(BitWidth,
BitWidth-1))) {
// Okay, get the un-inverted input value.
SDValue Val;
if (N0.getOpcode() == ISD::XOR)
Val = N0.getOperand(0);
else {
assert(N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::XOR);
// ((X^1)&1)^1 -> X & 1
Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
N0.getOperand(0).getOperand(0),
N0.getOperand(1));
}
return DAG.getSetCC(dl, VT, Val, N1,
Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
}
} else if (N1C->getAPIntValue() == 1 &&
(VT == MVT::i1 ||
getBooleanContents(N0->getValueType(0)) ==
ZeroOrOneBooleanContent)) {
SDValue Op0 = N0;
if (Op0.getOpcode() == ISD::TRUNCATE)
Op0 = Op0.getOperand(0);
if ((Op0.getOpcode() == ISD::XOR) &&
Op0.getOperand(0).getOpcode() == ISD::SETCC &&
Op0.getOperand(1).getOpcode() == ISD::SETCC) {
// (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
Cond);
}
if (Op0.getOpcode() == ISD::AND &&
isa<ConstantSDNode>(Op0.getOperand(1)) &&
cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
// If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
if (Op0.getValueType().bitsGT(VT))
Op0 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
DAG.getConstant(1, dl, VT));
else if (Op0.getValueType().bitsLT(VT))
Op0 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
DAG.getConstant(1, dl, VT));
return DAG.getSetCC(dl, VT, Op0,
DAG.getConstant(0, dl, Op0.getValueType()),
Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
}
if (Op0.getOpcode() == ISD::AssertZext &&
cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
return DAG.getSetCC(dl, VT, Op0,
DAG.getConstant(0, dl, Op0.getValueType()),
Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
}
}
APInt MinVal, MaxVal;
unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
if (ISD::isSignedIntSetCC(Cond)) {
MinVal = APInt::getSignedMinValue(OperandBitSize);
MaxVal = APInt::getSignedMaxValue(OperandBitSize);
} else {
MinVal = APInt::getMinValue(OperandBitSize);
MaxVal = APInt::getMaxValue(OperandBitSize);
}
// Canonicalize GE/LE comparisons to use GT/LT comparisons.
if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
if (C1 == MinVal) return DAG.getConstant(1, dl, VT); // X >= MIN --> true
// X >= C0 --> X > (C0 - 1)
APInt C = C1 - 1;
ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
if ((DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
(!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
isLegalICmpImmediate(C.getSExtValue())))) {
return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(C, dl, N1.getValueType()),
NewCC);
}
}
if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
if (C1 == MaxVal) return DAG.getConstant(1, dl, VT); // X <= MAX --> true
// X <= C0 --> X < (C0 + 1)
APInt C = C1 + 1;
ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
if ((DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
(!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
isLegalICmpImmediate(C.getSExtValue())))) {
return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(C, dl, N1.getValueType()),
NewCC);
}
}
if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
return DAG.getConstant(0, dl, VT); // X < MIN --> false
if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
return DAG.getConstant(1, dl, VT); // X >= MIN --> true
if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
return DAG.getConstant(0, dl, VT); // X > MAX --> false
if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
return DAG.getConstant(1, dl, VT); // X <= MAX --> true
// Canonicalize setgt X, Min --> setne X, Min
if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
// Canonicalize setlt X, Max --> setne X, Max
if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
// If we have setult X, 1, turn it into seteq X, 0
if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(MinVal, dl, N0.getValueType()),
ISD::SETEQ);
// If we have setugt X, Max-1, turn it into seteq X, Max
if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(MaxVal, dl, N0.getValueType()),
ISD::SETEQ);
// If we have "setcc X, C0", check to see if we can shrink the immediate
// by changing cc.
// SETUGT X, SINTMAX -> SETLT X, 0
if (Cond == ISD::SETUGT &&
C1 == APInt::getSignedMaxValue(OperandBitSize))
return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(0, dl, N1.getValueType()),
ISD::SETLT);
// SETULT X, SINTMIN -> SETGT X, -1
if (Cond == ISD::SETULT &&
C1 == APInt::getSignedMinValue(OperandBitSize)) {
SDValue ConstMinusOne =
DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
N1.getValueType());
return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
}
// Fold bit comparisons when we can.
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
(VT == N0.getValueType() ||
(isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
N0.getOpcode() == ISD::AND) {
auto &DL = DAG.getDataLayout();
if (ConstantSDNode *AndRHS =
dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
EVT ShiftTy = DCI.isBeforeLegalize()
? getPointerTy(DL)
: getShiftAmountTy(N0.getValueType(), DL);
if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
// Perform the xform if the AND RHS is a single bit.
if (AndRHS->getAPIntValue().isPowerOf2()) {
return DAG.getNode(ISD::TRUNCATE, dl, VT,
DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
DAG.getConstant(AndRHS->getAPIntValue().logBase2(), dl,
ShiftTy)));
}
} else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
// (X & 8) == 8 --> (X & 8) >> 3
// Perform the xform if C1 is a single bit.
if (C1.isPowerOf2()) {
return DAG.getNode(ISD::TRUNCATE, dl, VT,
DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
DAG.getConstant(C1.logBase2(), dl,
ShiftTy)));
}
}
}
}
if (C1.getMinSignedBits() <= 64 &&
!isLegalICmpImmediate(C1.getSExtValue())) {
// (X & -256) == 256 -> (X >> 8) == 1
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
if (ConstantSDNode *AndRHS =
dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
const APInt &AndRHSC = AndRHS->getAPIntValue();
if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
unsigned ShiftBits = AndRHSC.countTrailingZeros();
auto &DL = DAG.getDataLayout();
EVT ShiftTy = DCI.isBeforeLegalize()
? getPointerTy(DL)
: getShiftAmountTy(N0.getValueType(), DL);
EVT CmpTy = N0.getValueType();
SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
DAG.getConstant(ShiftBits, dl,
ShiftTy));
SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
}
}
} else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
Cond == ISD::SETULE || Cond == ISD::SETUGT) {
bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
// X < 0x100000000 -> (X >> 32) < 1
// X >= 0x100000000 -> (X >> 32) >= 1
// X <= 0x0ffffffff -> (X >> 32) < 1
// X > 0x0ffffffff -> (X >> 32) >= 1
unsigned ShiftBits;
APInt NewC = C1;
ISD::CondCode NewCond = Cond;
if (AdjOne) {
ShiftBits = C1.countTrailingOnes();
NewC = NewC + 1;
NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
} else {
ShiftBits = C1.countTrailingZeros();
}
NewC = NewC.lshr(ShiftBits);
if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
isLegalICmpImmediate(NewC.getSExtValue())) {
auto &DL = DAG.getDataLayout();
EVT ShiftTy = DCI.isBeforeLegalize()
? getPointerTy(DL)
: getShiftAmountTy(N0.getValueType(), DL);
EVT CmpTy = N0.getValueType();
SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
DAG.getConstant(ShiftBits, dl, ShiftTy));
SDValue CmpRHS = DAG.getConstant(NewC, dl, CmpTy);
return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
}
}
}
}
if (isa<ConstantFPSDNode>(N0.getNode())) {
// Constant fold or commute setcc.
SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
if (O.getNode()) return O;
} else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
// If the RHS of an FP comparison is a constant, simplify it away in
// some cases.
if (CFP->getValueAPF().isNaN()) {
// If an operand is known to be a nan, we can fold it.
switch (ISD::getUnorderedFlavor(Cond)) {
default: llvm_unreachable("Unknown flavor!");
case 0: // Known false.
return DAG.getConstant(0, dl, VT);
case 1: // Known true.
return DAG.getConstant(1, dl, VT);
case 2: // Undefined.
return DAG.getUNDEF(VT);
}
}
// Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
// constant if knowing that the operand is non-nan is enough. We prefer to
// have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
// materialize 0.0.
if (Cond == ISD::SETO || Cond == ISD::SETUO)
return DAG.getSetCC(dl, VT, N0, N0, Cond);
// If the condition is not legal, see if we can find an equivalent one
// which is legal.
if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
// If the comparison was an awkward floating-point == or != and one of
// the comparison operands is infinity or negative infinity, convert the
// condition to a less-awkward <= or >=.
if (CFP->getValueAPF().isInfinity()) {
if (CFP->getValueAPF().isNegative()) {
if (Cond == ISD::SETOEQ &&
isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
if (Cond == ISD::SETUEQ &&
isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
if (Cond == ISD::SETUNE &&
isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
if (Cond == ISD::SETONE &&
isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
} else {
if (Cond == ISD::SETOEQ &&
isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
if (Cond == ISD::SETUEQ &&
isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
if (Cond == ISD::SETUNE &&
isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
if (Cond == ISD::SETONE &&
isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
}
}
}
}
if (N0 == N1) {
// The sext(setcc()) => setcc() optimization relies on the appropriate
// constant being emitted.
uint64_t EqVal = 0;
switch (getBooleanContents(N0.getValueType())) {
case UndefinedBooleanContent:
case ZeroOrOneBooleanContent:
EqVal = ISD::isTrueWhenEqual(Cond);
break;
case ZeroOrNegativeOneBooleanContent:
EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
break;
}
// We can always fold X == X for integer setcc's.
if (N0.getValueType().isInteger()) {
return DAG.getConstant(EqVal, dl, VT);
}
unsigned UOF = ISD::getUnorderedFlavor(Cond);
if (UOF == 2) // FP operators that are undefined on NaNs.
return DAG.getConstant(EqVal, dl, VT);
if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
return DAG.getConstant(EqVal, dl, VT);
// Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
// if it is not already.
ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
getCondCodeAction(NewCond, N0.getSimpleValueType()) == Legal))
return DAG.getSetCC(dl, VT, N0, N1, NewCond);
}
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
N0.getValueType().isInteger()) {
if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
N0.getOpcode() == ISD::XOR) {
// Simplify (X+Y) == (X+Z) --> Y == Z
if (N0.getOpcode() == N1.getOpcode()) {
if (N0.getOperand(0) == N1.getOperand(0))
return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
if (N0.getOperand(1) == N1.getOperand(1))
return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
if (DAG.isCommutativeBinOp(N0.getOpcode())) {
// If X op Y == Y op X, try other combinations.
if (N0.getOperand(0) == N1.getOperand(1))
return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
Cond);
if (N0.getOperand(1) == N1.getOperand(0))
return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
Cond);
}
}
// If RHS is a legal immediate value for a compare instruction, we need
// to be careful about increasing register pressure needlessly.
bool LegalRHSImm = false;
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
// Turn (X+C1) == C2 --> X == C2-C1
if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
return DAG.getSetCC(dl, VT, N0.getOperand(0),
DAG.getConstant(RHSC->getAPIntValue()-
LHSR->getAPIntValue(),
dl, N0.getValueType()), Cond);
}
// Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
if (N0.getOpcode() == ISD::XOR)
// If we know that all of the inverted bits are zero, don't bother
// performing the inversion.
if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
return
DAG.getSetCC(dl, VT, N0.getOperand(0),
DAG.getConstant(LHSR->getAPIntValue() ^
RHSC->getAPIntValue(),
dl, N0.getValueType()),
Cond);
}
// Turn (C1-X) == C2 --> X == C1-C2
if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
return
DAG.getSetCC(dl, VT, N0.getOperand(1),
DAG.getConstant(SUBC->getAPIntValue() -
RHSC->getAPIntValue(),
dl, N0.getValueType()),
Cond);
}
}
// Could RHSC fold directly into a compare?
if (RHSC->getValueType(0).getSizeInBits() <= 64)
LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
}
// Simplify (X+Z) == X --> Z == 0
// Don't do this if X is an immediate that can fold into a cmp
// instruction and X+Z has other uses. It could be an induction variable
// chain, and the transform would increase register pressure.
if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
if (N0.getOperand(0) == N1)
return DAG.getSetCC(dl, VT, N0.getOperand(1),
DAG.getConstant(0, dl, N0.getValueType()), Cond);
if (N0.getOperand(1) == N1) {
if (DAG.isCommutativeBinOp(N0.getOpcode()))
return DAG.getSetCC(dl, VT, N0.getOperand(0),
DAG.getConstant(0, dl, N0.getValueType()),
Cond);
if (N0.getNode()->hasOneUse()) {
assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
auto &DL = DAG.getDataLayout();
// (Z-X) == X --> Z == X<<1
SDValue SH = DAG.getNode(
ISD::SHL, dl, N1.getValueType(), N1,
DAG.getConstant(1, dl,
getShiftAmountTy(N1.getValueType(), DL)));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
}
}
}
}
if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
N1.getOpcode() == ISD::XOR) {
// Simplify X == (X+Z) --> Z == 0
if (N1.getOperand(0) == N0)
return DAG.getSetCC(dl, VT, N1.getOperand(1),
DAG.getConstant(0, dl, N1.getValueType()), Cond);
if (N1.getOperand(1) == N0) {
if (DAG.isCommutativeBinOp(N1.getOpcode()))
return DAG.getSetCC(dl, VT, N1.getOperand(0),
DAG.getConstant(0, dl, N1.getValueType()), Cond);
if (N1.getNode()->hasOneUse()) {
assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
auto &DL = DAG.getDataLayout();
// X == (Z-X) --> X<<1 == Z
SDValue SH = DAG.getNode(
ISD::SHL, dl, N1.getValueType(), N0,
DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL)));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
}
}
}
// Simplify x&y == y to x&y != 0 if y has exactly one bit set.
// Note that where y is variable and is known to have at most
// one bit set (for example, if it is z&1) we cannot do this;
// the expressions are not equivalent when y==0.
if (N0.getOpcode() == ISD::AND)
if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) {
if (ValueHasExactlyOneBitSet(N1, DAG)) {
Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
if (DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(Cond, N0.getSimpleValueType())) {
SDValue Zero = DAG.getConstant(0, dl, N1.getValueType());
return DAG.getSetCC(dl, VT, N0, Zero, Cond);
}
}
}
if (N1.getOpcode() == ISD::AND)
if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) {
if (ValueHasExactlyOneBitSet(N0, DAG)) {
Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
if (DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(Cond, N1.getSimpleValueType())) {
SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
return DAG.getSetCC(dl, VT, N1, Zero, Cond);
}
}
}
}
// Fold away ALL boolean setcc's.
SDValue Temp;
if (N0.getValueType() == MVT::i1 && foldBooleans) {
switch (Cond) {
default: llvm_unreachable("Unknown integer setcc!");
case ISD::SETEQ: // X == Y -> ~(X^Y)
Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
N0 = DAG.getNOT(dl, Temp, MVT::i1);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETNE: // X != Y --> (X^Y)
N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
break;
case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
Temp = DAG.getNOT(dl, N0, MVT::i1);
N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
Temp = DAG.getNOT(dl, N1, MVT::i1);
N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
Temp = DAG.getNOT(dl, N0, MVT::i1);
N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
Temp = DAG.getNOT(dl, N1, MVT::i1);
N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
break;
}
if (VT != MVT::i1) {
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(N0.getNode());
// FIXME: If running after legalize, we probably can't do this.
N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
}
return N0;
}
// Could not fold it.
return SDValue();
}
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
int64_t &Offset) const {
if (isa<GlobalAddressSDNode>(N)) {
GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);
GA = GASD->getGlobal();
Offset += GASD->getOffset();
return true;
}
if (N->getOpcode() == ISD::ADD) {
SDValue N1 = N->getOperand(0);
SDValue N2 = N->getOperand(1);
if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
if (V) {
Offset += V->getSExtValue();
return true;
}
} else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
if (V) {
Offset += V->getSExtValue();
return true;
}
}
}
return false;
}
SDValue TargetLowering::
PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
// Default implementation: no optimization.
return SDValue();
}
//===----------------------------------------------------------------------===//
// Inline Assembler Implementation Methods
//===----------------------------------------------------------------------===//
TargetLowering::ConstraintType
TargetLowering::getConstraintType(StringRef Constraint) const {
unsigned S = Constraint.size();
if (S == 1) {
switch (Constraint[0]) {
default: break;
case 'r': return C_RegisterClass;
case 'm': // memory
case 'o': // offsetable
case 'V': // not offsetable
return C_Memory;
case 'i': // Simple Integer or Relocatable Constant
case 'n': // Simple Integer
case 'E': // Floating Point Constant
case 'F': // Floating Point Constant
case 's': // Relocatable Constant
case 'p': // Address.
case 'X': // Allow ANY value.
case 'I': // Target registers.
case 'J':
case 'K':
case 'L':
case 'M':
case 'N':
case 'O':
case 'P':
case '<':
case '>':
return C_Other;
}
}
if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
return C_Memory;
return C_Register;
}
return C_Unknown;
}
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
if (ConstraintVT.isInteger())
return "r";
if (ConstraintVT.isFloatingPoint())
return "f"; // works for many targets
return nullptr;
}
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
if (Constraint.length() > 1) return;
char ConstraintLetter = Constraint[0];
switch (ConstraintLetter) {
default: break;
case 'X': // Allows any operand; labels (basic block) use this.
if (Op.getOpcode() == ISD::BasicBlock) {
Ops.push_back(Op);
return;
}
// fall through
case 'i': // Simple Integer or Relocatable Constant
case 'n': // Simple Integer
case 's': { // Relocatable Constant
// These operands are interested in values of the form (GV+C), where C may
// be folded in as an offset of GV, or it may be explicitly added. Also, it
// is possible and fine if either GV or C are missing.
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
// If we have "(add GV, C)", pull out GV/C
if (Op.getOpcode() == ISD::ADD) {
C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
if (!C || !GA) {
C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
}
if (!C || !GA)
C = nullptr, GA = nullptr;
}
// If we find a valid operand, map to the TargetXXX version so that the
// value itself doesn't get selected.
if (GA) { // Either &GV or &GV+C
if (ConstraintLetter != 'n') {
int64_t Offs = GA->getOffset();
if (C) Offs += C->getZExtValue();
Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
C ? SDLoc(C) : SDLoc(),
Op.getValueType(), Offs));
}
return;
}
if (C) { // just C, no GV.
// Simple constants are not allowed for 's'.
if (ConstraintLetter != 's') {
// gcc prints these as sign extended. Sign extend value to 64 bits
// now; without this it would get ZExt'd later in
// ScheduleDAGSDNodes::EmitNode, which is very generic.
Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
SDLoc(C), MVT::i64));
}
return;
}
break;
}
}
}
std::pair<unsigned, const TargetRegisterClass *>
TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
StringRef Constraint,
MVT VT) const {
if (Constraint.empty() || Constraint[0] != '{')
return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
// Remove the braces from around the name.
StringRef RegName(Constraint.data()+1, Constraint.size()-2);
std::pair<unsigned, const TargetRegisterClass*> R =
std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
// Figure out which register class contains this reg.
for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
E = RI->regclass_end(); RCI != E; ++RCI) {
const TargetRegisterClass *RC = *RCI;
// If none of the value types for this register class are valid, we
// can't use it. For example, 64-bit reg classes on 32-bit targets.
if (!isLegalRC(RC))
continue;
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I) {
if (RegName.equals_lower(RI->getName(*I))) {
std::pair<unsigned, const TargetRegisterClass*> S =
std::make_pair(*I, RC);
// If this register class has the requested value type, return it,
// otherwise keep searching and return the first class found
// if no other is found which explicitly has the requested type.
if (RC->hasType(VT))
return S;
else if (!R.second)
R = S;
}
}
}
return R;
}
//===----------------------------------------------------------------------===//
// Constraint Selection.
/// isMatchingInputConstraint - Return true of this is an input operand that is
/// a matching constraint like "4".
bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
assert(!ConstraintCode.empty() && "No known constraint!");
return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
}
/// getMatchedOperand - If this is an input matching constraint, this method
/// returns the output operand it matches.
unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
assert(!ConstraintCode.empty() && "No known constraint!");
return atoi(ConstraintCode.c_str());
}
/// ParseConstraints - Split up the constraint string from the inline
/// assembly value into the specific constraints and their prefixes,
/// and also tie in the associated operand values.
/// If this returns an empty vector, and if the constraint string itself
/// isn't empty, there was an error parsing.
TargetLowering::AsmOperandInfoVector
TargetLowering::ParseConstraints(const DataLayout &DL,
const TargetRegisterInfo *TRI,
ImmutableCallSite CS) const {
/// ConstraintOperands - Information about all of the constraints.
AsmOperandInfoVector ConstraintOperands;
const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
unsigned maCount = 0; // Largest number of multiple alternative constraints.
// Do a prepass over the constraints, canonicalizing them, and building up the
// ConstraintOperands list.
unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
unsigned ResNo = 0; // ResNo - The result number of the next output.
for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
ConstraintOperands.emplace_back(std::move(CI));
AsmOperandInfo &OpInfo = ConstraintOperands.back();
// Update multiple alternative constraint count.
if (OpInfo.multipleAlternatives.size() > maCount)
maCount = OpInfo.multipleAlternatives.size();
OpInfo.ConstraintVT = MVT::Other;
// Compute the value type for each operand.
switch (OpInfo.Type) {
case InlineAsm::isOutput:
// Indirect outputs just consume an argument.
if (OpInfo.isIndirect) {
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
}
// The return value of the call is this value. As such, there is no
// corresponding argument.
assert(!CS.getType()->isVoidTy() &&
"Bad inline asm!");
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
OpInfo.ConstraintVT =
getSimpleValueType(DL, STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
}
++ResNo;
break;
case InlineAsm::isInput:
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
case InlineAsm::isClobber:
// Nothing to do.
break;
}
if (OpInfo.CallOperandVal) {
llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
if (OpInfo.isIndirect) {
llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
if (!PtrTy)
report_fatal_error("Indirect operand for inline asm not a pointer!");
OpTy = PtrTy->getElementType();
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
if (StructType *STy = dyn_cast<StructType>(OpTy))
if (STy->getNumElements() == 1)
OpTy = STy->getElementType(0);
// If OpTy is not a single value, it may be a struct/union that we
// can tile with integers.
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
unsigned BitSize = DL.getTypeSizeInBits(OpTy);
switch (BitSize) {
default: break;
case 1:
case 8:
case 16:
case 32:
case 64:
case 128:
OpInfo.ConstraintVT =
MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
break;
}
} else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
} else {
OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
}
}
}
// If we have multiple alternative constraints, select the best alternative.
if (!ConstraintOperands.empty()) {
if (maCount) {
unsigned bestMAIndex = 0;
int bestWeight = -1;
// weight: -1 = invalid match, and 0 = so-so match to 5 = good match.
int weight = -1;
unsigned maIndex;
// Compute the sums of the weights for each alternative, keeping track
// of the best (highest weight) one so far.
for (maIndex = 0; maIndex < maCount; ++maIndex) {
int weightSum = 0;
for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
cIndex != eIndex; ++cIndex) {
AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
if (OpInfo.Type == InlineAsm::isClobber)
continue;
// If this is an output operand with a matching input operand,
// look up the matching input. If their types mismatch, e.g. one
// is an integer, the other is floating point, or their sizes are
// different, flag it as an maCantMatch.
if (OpInfo.hasMatchingInput()) {
AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
(OpInfo.ConstraintVT.getSizeInBits() !=
Input.ConstraintVT.getSizeInBits())) {
weightSum = -1; // Can't match.
break;
}
}
}
weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
if (weight == -1) {
weightSum = -1;
break;
}
weightSum += weight;
}
// Update best.
if (weightSum > bestWeight) {
bestWeight = weightSum;
bestMAIndex = maIndex;
}
}
// Now select chosen alternative in each constraint.
for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
cIndex != eIndex; ++cIndex) {
AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
if (cInfo.Type == InlineAsm::isClobber)
continue;
cInfo.selectAlternative(bestMAIndex);
}
}
}
// Check and hook up tied operands, choose constraint code to use.
for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
cIndex != eIndex; ++cIndex) {
AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
// If this is an output operand with a matching input operand, look up the
// matching input. If their types mismatch, e.g. one is an integer, the
// other is floating point, or their sizes are different, flag it as an
// error.
if (OpInfo.hasMatchingInput()) {
AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
std::pair<unsigned, const TargetRegisterClass *> MatchRC =
getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
OpInfo.ConstraintVT);
std::pair<unsigned, const TargetRegisterClass *> InputRC =
getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
Input.ConstraintVT);
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
(MatchRC.second != InputRC.second)) {
report_fatal_error("Unsupported asm: input constraint"
" with a matching output constraint of"
" incompatible type!");
}
}
}
}
return ConstraintOperands;
}
/// getConstraintGenerality - Return an integer indicating how general CT
/// is.
static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
switch (CT) {
case TargetLowering::C_Other:
case TargetLowering::C_Unknown:
return 0;
case TargetLowering::C_Register:
return 1;
case TargetLowering::C_RegisterClass:
return 2;
case TargetLowering::C_Memory:
return 3;
}
llvm_unreachable("Invalid constraint type");
}
/// Examine constraint type and operand type and determine a weight value.
/// This object must already have been set up with the operand type
/// and the current alternative constraint selected.
TargetLowering::ConstraintWeight
TargetLowering::getMultipleConstraintMatchWeight(
AsmOperandInfo &info, int maIndex) const {
InlineAsm::ConstraintCodeVector *rCodes;
if (maIndex >= (int)info.multipleAlternatives.size())
rCodes = &info.Codes;
else
rCodes = &info.multipleAlternatives[maIndex].Codes;
ConstraintWeight BestWeight = CW_Invalid;
// Loop over the options, keeping track of the most general one.
for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
ConstraintWeight weight =
getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
if (weight > BestWeight)
BestWeight = weight;
}
return BestWeight;
}
/// Examine constraint type and operand type and determine a weight value.
/// This object must already have been set up with the operand type
/// and the current alternative constraint selected.
TargetLowering::ConstraintWeight
TargetLowering::getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const {
ConstraintWeight weight = CW_Invalid;
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
if (!CallOperandVal)
return CW_Default;
// Look at the constraint type.
switch (*constraint) {
case 'i': // immediate integer.
case 'n': // immediate integer with a known value.
if (isa<ConstantInt>(CallOperandVal))
weight = CW_Constant;
break;
case 's': // non-explicit intregal immediate.
if (isa<GlobalValue>(CallOperandVal))
weight = CW_Constant;
break;
case 'E': // immediate float if host format.
case 'F': // immediate float.
if (isa<ConstantFP>(CallOperandVal))
weight = CW_Constant;
break;
case '<': // memory operand with autodecrement.
case '>': // memory operand with autoincrement.
case 'm': // memory operand.
case 'o': // offsettable memory operand
case 'V': // non-offsettable memory operand
weight = CW_Memory;
break;
case 'r': // general register.
case 'g': // general register, memory operand or immediate integer.
// note: Clang converts "g" to "imr".
if (CallOperandVal->getType()->isIntegerTy())
weight = CW_Register;
break;
case 'X': // any operand.
default:
weight = CW_Default;
break;
}
return weight;
}
/// ChooseConstraint - If there are multiple different constraints that we
/// could pick for this operand (e.g. "imr") try to pick the 'best' one.
/// This is somewhat tricky: constraints fall into four classes:
/// Other -> immediates and magic values
/// Register -> one specific register
/// RegisterClass -> a group of regs
/// Memory -> memory
/// Ideally, we would pick the most specific constraint possible: if we have
/// something that fits into a register, we would pick it. The problem here
/// is that if we have something that could either be in a register or in
/// memory that use of the register could cause selection of *other*
/// operands to fail: they might only succeed if we pick memory. Because of
/// this the heuristic we use is:
///
/// 1) If there is an 'other' constraint, and if the operand is valid for
/// that constraint, use it. This makes us take advantage of 'i'
/// constraints when available.
/// 2) Otherwise, pick the most general constraint present. This prefers
/// 'm' over 'r', for example.
///
static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
const TargetLowering &TLI,
SDValue Op, SelectionDAG *DAG) {
assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
unsigned BestIdx = 0;
TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
int BestGenerality = -1;
// Loop over the options, keeping track of the most general one.
for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
TargetLowering::ConstraintType CType =
TLI.getConstraintType(OpInfo.Codes[i]);
// If this is an 'other' constraint, see if the operand is valid for it.
// For example, on X86 we might have an 'rI' constraint. If the operand
// is an integer in the range [0..31] we want to use I (saving a load
// of a register), otherwise we must use 'r'.
if (CType == TargetLowering::C_Other && Op.getNode()) {
assert(OpInfo.Codes[i].size() == 1 &&
"Unhandled multi-letter 'other' constraint");
std::vector<SDValue> ResultOps;
TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
ResultOps, *DAG);
if (!ResultOps.empty()) {
BestType = CType;
BestIdx = i;
break;
}
}
// Things with matching constraints can only be registers, per gcc
// documentation. This mainly affects "g" constraints.
if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
continue;
// This constraint letter is more general than the previous one, use it.
int Generality = getConstraintGenerality(CType);
if (Generality > BestGenerality) {
BestType = CType;
BestIdx = i;
BestGenerality = Generality;
}
}
OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
OpInfo.ConstraintType = BestType;
}
/// ComputeConstraintToUse - Determines the constraint code and constraint
/// type to use for the specific AsmOperandInfo, setting
/// OpInfo.ConstraintCode and OpInfo.ConstraintType.
void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
SelectionDAG *DAG) const {
assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
// Single-letter constraints ('r') are very common.
if (OpInfo.Codes.size() == 1) {
OpInfo.ConstraintCode = OpInfo.Codes[0];
OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
} else {
ChooseConstraint(OpInfo, *this, Op, DAG);
}
// 'X' matches anything.
if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
// Labels and constants are handled elsewhere ('X' is the only thing
// that matches labels). For Functions, the type here is the type of
// the result, which is not what we want to look at; leave them alone.
Value *v = OpInfo.CallOperandVal;
if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
OpInfo.CallOperandVal = v;
return;
}
// Otherwise, try to resolve it to something we know about by looking at
// the actual operand type.
if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
OpInfo.ConstraintCode = Repl;
OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
}
}
}
/// \brief Given an exact SDIV by a constant, create a multiplication
/// with the multiplicative inverse of the constant.
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
SDLoc dl, SelectionDAG &DAG,
std::vector<SDNode *> &Created) {
assert(d != 0 && "Division by zero!");
// Shift the value upfront if it is even, so the LSB is one.
unsigned ShAmt = d.countTrailingZeros();
if (ShAmt) {
// TODO: For UDIV use SRL instead of SRA.
SDValue Amt =
DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
DAG.getDataLayout()));
SDNodeFlags Flags;
Flags.setExact(true);
Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
Created.push_back(Op1.getNode());
d = d.ashr(ShAmt);
}
// Calculate the multiplicative inverse, using Newton's method.
APInt t, xn = d;
while ((t = d*xn) != 1)
xn *= APInt(d.getBitWidth(), 2) - t;
SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType());
SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
Created.push_back(Mul.getNode());
return Mul;
}
/// \brief Given an ISD::SDIV node expressing a divide by constant,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG, bool IsAfterLegalization,
std::vector<SDNode *> *Created) const {
assert(Created && "No vector to hold sdiv ops.");
EVT VT = N->getValueType(0);
SDLoc dl(N);
// Check to see if we can do this.
// FIXME: We should be more aggressive here.
if (!isTypeLegal(VT))
return SDValue();
// If the sdiv has an 'exact' bit we can use a simpler lowering.
if (cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact())
return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, *Created);
APInt::ms magics = Divisor.magic();
// Multiply the numerator (operand 0) by the magic value
// FIXME: We should support doing a MUL in a wider type
SDValue Q;
if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
isOperationLegalOrCustom(ISD::MULHS, VT))
Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
DAG.getConstant(magics.m, dl, VT));
else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
N->getOperand(0),
DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
else
return SDValue(); // No mulhs or equvialent
// If d > 0 and m < 0, add the numerator
if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
Created->push_back(Q.getNode());
}
// If d < 0 and m > 0, subtract the numerator.
if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
Created->push_back(Q.getNode());
}
auto &DL = DAG.getDataLayout();
// Shift right algebraic if shift value is nonzero
if (magics.s > 0) {
Q = DAG.getNode(
ISD::SRA, dl, VT, Q,
DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
Created->push_back(Q.getNode());
}
// Extract the sign bit and add it to the quotient
SDValue T =
DAG.getNode(ISD::SRL, dl, VT, Q,
DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
getShiftAmountTy(Q.getValueType(), DL)));
Created->push_back(T.getNode());
return DAG.getNode(ISD::ADD, dl, VT, Q, T);
}
/// \brief Given an ISD::UDIV node expressing a divide by constant,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG, bool IsAfterLegalization,
std::vector<SDNode *> *Created) const {
assert(Created && "No vector to hold udiv ops.");
EVT VT = N->getValueType(0);
SDLoc dl(N);
auto &DL = DAG.getDataLayout();
// Check to see if we can do this.
// FIXME: We should be more aggressive here.
if (!isTypeLegal(VT))
return SDValue();
// FIXME: We should use a narrower constant when the upper
// bits are known to be zero.
APInt::mu magics = Divisor.magicu();
SDValue Q = N->getOperand(0);
// If the divisor is even, we can avoid using the expensive fixup by shifting
// the divided value upfront.
if (magics.a != 0 && !Divisor[0]) {
unsigned Shift = Divisor.countTrailingZeros();
Q = DAG.getNode(
ISD::SRL, dl, VT, Q,
DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
Created->push_back(Q.getNode());
// Get magic number for the shifted divisor.
magics = Divisor.lshr(Shift).magicu(Shift);
assert(magics.a == 0 && "Should use cheap fixup now");
}
// Multiply the numerator (operand 0) by the magic value
// FIXME: We should support doing a MUL in a wider type
if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
isOperationLegalOrCustom(ISD::MULHU, VT))
Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, dl, VT));
else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
else
return SDValue(); // No mulhu or equvialent
Created->push_back(Q.getNode());
if (magics.a == 0) {
assert(magics.s < Divisor.getBitWidth() &&
"We shouldn't generate an undefined shift!");
return DAG.getNode(
ISD::SRL, dl, VT, Q,
DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
} else {
SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(
ISD::SRL, dl, VT, NPQ,
DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
Created->push_back(NPQ.getNode());
return DAG.getNode(
ISD::SRL, dl, VT, NPQ,
DAG.getConstant(magics.s - 1, dl,
getShiftAmountTy(NPQ.getValueType(), DL)));
}
}
bool TargetLowering::
verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
if (!isa<ConstantSDNode>(Op.getOperand(0))) {
DAG.getContext()->emitError("argument to '__builtin_return_address' must "
"be a constant integer");
return true;
}
return false;
}
//===----------------------------------------------------------------------===//
// Legalization Utilities
//===----------------------------------------------------------------------===//
bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
SelectionDAG &DAG, SDValue LL, SDValue LH,
SDValue RL, SDValue RH) const {
EVT VT = N->getValueType(0);
SDLoc dl(N);
bool HasMULHS = isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
bool HasMULHU = isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
bool HasSMUL_LOHI = isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
bool HasUMUL_LOHI = isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
unsigned OuterBitSize = VT.getSizeInBits();
unsigned InnerBitSize = HiLoVT.getSizeInBits();
unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
// LL, LH, RL, and RH must be either all NULL or all set to a value.
assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
(!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
if (!LL.getNode() && !RL.getNode() &&
isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(0));
RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(1));
}
if (!LL.getNode())
return false;
APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
// The inputs are both zero-extended.
if (HasUMUL_LOHI) {
// We can emit a umul_lohi.
Lo = DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
RL);
Hi = SDValue(Lo.getNode(), 1);
return true;
}
if (HasMULHU) {
// We can emit a mulhu+mul.
Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
return true;
}
}
if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
// The input values are both sign-extended.
if (HasSMUL_LOHI) {
// We can emit a smul_lohi.
Lo = DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
RL);
Hi = SDValue(Lo.getNode(), 1);
return true;
}
if (HasMULHS) {
// We can emit a mulhs+mul.
Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
Hi = DAG.getNode(ISD::MULHS, dl, HiLoVT, LL, RL);
return true;
}
}
if (!LH.getNode() && !RH.getNode() &&
isOperationLegalOrCustom(ISD::SRL, VT) &&
isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
auto &DL = DAG.getDataLayout();
unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits();
SDValue Shift = DAG.getConstant(ShiftAmt, dl, getShiftAmountTy(VT, DL));
LH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(0), Shift);
LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
RH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(1), Shift);
RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
}
if (!LH.getNode())
return false;
if (HasUMUL_LOHI) {
// Lo,Hi = umul LHS, RHS.
SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, dl,
DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
Lo = UMulLOHI;
Hi = UMulLOHI.getValue(1);
RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
return true;
}
if (HasMULHU) {
Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
return true;
}
}
return false;
}
bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
SelectionDAG &DAG) const {
EVT VT = Node->getOperand(0).getValueType();
EVT NVT = Node->getValueType(0);
SDLoc dl(SDValue(Node, 0));
// FIXME: Only f32 to i64 conversions are supported.
if (VT != MVT::f32 || NVT != MVT::i64)
return false;
// Expand f32 -> i64 conversion
// This algorithm comes from compiler-rt's implementation of fixsfdi:
// https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
VT.getSizeInBits());
SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
SDValue Bias = DAG.getConstant(127, dl, IntVT);
SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl,
IntVT);
SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
auto &DL = DAG.getDataLayout();
SDValue ExponentBits = DAG.getNode(
ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
SDValue Sign = DAG.getNode(
ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
DAG.getConstant(0x00800000, dl, IntVT));
R = DAG.getZExtOrTrunc(R, dl, NVT);
R = DAG.getSelectCC(
dl, Exponent, ExponentLoBit,
DAG.getNode(ISD::SHL, dl, NVT, R,
DAG.getZExtOrTrunc(
DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
dl, getShiftAmountTy(IntVT, DL))),
DAG.getNode(ISD::SRL, dl, NVT, R,
DAG.getZExtOrTrunc(
DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
dl, getShiftAmountTy(IntVT, DL))),
ISD::SETGT);
SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
Sign);
Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
DAG.getConstant(0, dl, NVT), Ret, ISD::SETLT);
return true;
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp | //===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements a fast scheduler.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "InstrEmitter.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "pre-RA-sched"
STATISTIC(NumUnfolds, "Number of nodes unfolded");
STATISTIC(NumDups, "Number of duplicated nodes");
STATISTIC(NumPRCopies, "Number of physical copies");
static RegisterScheduler
fastDAGScheduler("fast", "Fast suboptimal list scheduling",
createFastDAGScheduler);
static RegisterScheduler
linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
createDAGLinearizer);
namespace {
/// FastPriorityQueue - A degenerate priority queue that considers
/// all nodes to have the same priority.
///
struct FastPriorityQueue {
SmallVector<SUnit *, 16> Queue;
bool empty() const { return Queue.empty(); }
void push(SUnit *U) {
Queue.push_back(U);
}
SUnit *pop() {
if (empty()) return nullptr;
SUnit *V = Queue.back();
Queue.pop_back();
return V;
}
};
//===----------------------------------------------------------------------===//
/// ScheduleDAGFast - The actual "fast" list scheduler implementation.
///
class ScheduleDAGFast : public ScheduleDAGSDNodes {
private:
/// AvailableQueue - The priority queue to use for the available SUnits.
FastPriorityQueue AvailableQueue;
/// LiveRegDefs - A set of physical registers and their definition
/// that are "live". These nodes must be scheduled before any other nodes that
/// modifies the registers can be scheduled.
unsigned NumLiveRegs;
std::vector<SUnit*> LiveRegDefs;
std::vector<unsigned> LiveRegCycles;
public:
ScheduleDAGFast(MachineFunction &mf)
: ScheduleDAGSDNodes(mf) {}
void Schedule() override;
/// AddPred - adds a predecessor edge to SUnit SU.
/// This returns true if this is a new predecessor.
void AddPred(SUnit *SU, const SDep &D) {
SU->addPred(D);
}
/// RemovePred - removes a predecessor edge from SUnit SU.
/// This returns true if an edge was removed.
void RemovePred(SUnit *SU, const SDep &D) {
SU->removePred(D);
}
private:
void ReleasePred(SUnit *SU, SDep *PredEdge);
void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
void ScheduleNodeBottomUp(SUnit*, unsigned);
SUnit *CopyAndMoveSuccessors(SUnit*);
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
const TargetRegisterClass*,
const TargetRegisterClass*,
SmallVectorImpl<SUnit*>&);
bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
void ListScheduleBottomUp();
/// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
bool forceUnitLatencies() const override { return true; }
};
} // end anonymous namespace
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGFast::Schedule() {
DEBUG(dbgs() << "********** List Scheduling **********\n");
NumLiveRegs = 0;
LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
LiveRegCycles.resize(TRI->getNumRegs(), 0);
// Build the scheduling graph.
BuildSchedGraph(nullptr);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
// Execute the actual scheduling loop.
ListScheduleBottomUp();
}
//===----------------------------------------------------------------------===//
// Bottom-Up Scheduling
//===----------------------------------------------------------------------===//
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
#ifndef NDEBUG
if (PredSU->NumSuccsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
--PredSU->NumSuccsLeft;
// If all the node's successors are scheduled, this node is ready
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
PredSU->isAvailable = true;
AvailableQueue.push(PredSU);
}
}
void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
ReleasePred(SU, &*I);
if (I->isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
if (!LiveRegDefs[I->getReg()]) {
++NumLiveRegs;
LiveRegDefs[I->getReg()] = I->getSUnit();
LiveRegCycles[I->getReg()] = CurCycle;
}
}
}
}
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
SU->setHeightToAtLeast(CurCycle);
Sequence.push_back(SU);
ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isAssignedRegDep()) {
if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
assert(LiveRegDefs[I->getReg()] == SU &&
"Physical register dependency violated?");
--NumLiveRegs;
LiveRegDefs[I->getReg()] = nullptr;
LiveRegCycles[I->getReg()] = 0;
}
}
}
SU->isScheduled = true;
}
/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
/// successors to the newly created node.
SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
if (SU->getNode()->getGluedNode())
return nullptr;
SDNode *N = SU->getNode();
if (!N)
return nullptr;
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
MVT VT = N->getSimpleValueType(i);
if (VT == MVT::Glue)
return nullptr;
else if (VT == MVT::Other)
TryUnfold = true;
}
for (const SDValue &Op : N->op_values()) {
MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
if (VT == MVT::Glue)
return nullptr;
}
if (TryUnfold) {
SmallVector<SDNode*, 2> NewNodes;
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return nullptr;
DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
N = NewNodes[1];
SDNode *LoadNode = NewNodes[0];
unsigned NumVals = N->getNumValues();
unsigned OldNumVals = SU->getNode()->getNumValues();
for (unsigned i = 0; i != NumVals; ++i)
DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
SDValue(LoadNode, 1));
SUnit *NewSU = newSUnit(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum);
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
NewSU->isTwoAddress = true;
break;
}
}
if (MCID.isCommutable())
NewSU->isCommutable = true;
// LoadNode may already exist. This can happen when there is another
// load from the same location and producing the same type of value
// but it has different alignment or volatileness.
bool isNewLoad = true;
SUnit *LoadSU;
if (LoadNode->getNodeId() != -1) {
LoadSU = &SUnits[LoadNode->getNodeId()];
isNewLoad = false;
} else {
LoadSU = newSUnit(LoadNode);
LoadNode->setNodeId(LoadSU->NodeNum);
}
SDep ChainPred;
SmallVector<SDep, 4> ChainSuccs;
SmallVector<SDep, 4> LoadPreds;
SmallVector<SDep, 4> NodePreds;
SmallVector<SDep, 4> NodeSuccs;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
ChainPred = *I;
else if (I->getSUnit()->getNode() &&
I->getSUnit()->getNode()->isOperandOf(LoadNode))
LoadPreds.push_back(*I);
else
NodePreds.push_back(*I);
}
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl())
ChainSuccs.push_back(*I);
else
NodeSuccs.push_back(*I);
}
if (ChainPred.getSUnit()) {
RemovePred(SU, ChainPred);
if (isNewLoad)
AddPred(LoadSU, ChainPred);
}
for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
const SDep &Pred = LoadPreds[i];
RemovePred(SU, Pred);
if (isNewLoad) {
AddPred(LoadSU, Pred);
}
}
for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
const SDep &Pred = NodePreds[i];
RemovePred(SU, Pred);
AddPred(NewSU, Pred);
}
for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
SDep D = NodeSuccs[i];
SUnit *SuccDep = D.getSUnit();
D.setSUnit(SU);
RemovePred(SuccDep, D);
D.setSUnit(NewSU);
AddPred(SuccDep, D);
}
for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
SDep D = ChainSuccs[i];
SUnit *SuccDep = D.getSUnit();
D.setSUnit(SU);
RemovePred(SuccDep, D);
if (isNewLoad) {
D.setSUnit(LoadSU);
AddPred(SuccDep, D);
}
}
if (isNewLoad) {
SDep D(LoadSU, SDep::Barrier);
D.setLatency(LoadSU->Latency);
AddPred(NewSU, D);
}
++NumUnfolds;
if (NewSU->NumSuccsLeft == 0) {
NewSU->isAvailable = true;
return NewSU;
}
SU = NewSU;
}
DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
NewSU = Clone(SU);
// New SUnit has the exact same predecessors.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I)
if (!I->isArtificial())
AddPred(NewSU, *I);
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isArtificial())
continue;
SUnit *SuccSU = I->getSUnit();
if (SuccSU->isScheduled) {
SDep D = *I;
D.setSUnit(NewSU);
AddPred(SuccSU, D);
D.setSUnit(SU);
DelDeps.push_back(std::make_pair(SuccSU, D));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
RemovePred(DelDeps[i].first, DelDeps[i].second);
++NumDups;
return NewSU;
}
/// InsertCopiesAndMoveSuccs - Insert register copies and move all
/// scheduled successors of the given SUnit to the last copy.
void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
SmallVectorImpl<SUnit*> &Copies) {
SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
CopyToSU->CopySrcRC = DestRC;
CopyToSU->CopyDstRC = SrcRC;
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isArtificial())
continue;
SUnit *SuccSU = I->getSUnit();
if (SuccSU->isScheduled) {
SDep D = *I;
D.setSUnit(CopyToSU);
AddPred(SuccSU, D);
DelDeps.push_back(std::make_pair(SuccSU, *I));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
RemovePred(DelDeps[i].first, DelDeps[i].second);
}
SDep FromDep(SU, SDep::Data, Reg);
FromDep.setLatency(SU->Latency);
AddPred(CopyFromSU, FromDep);
SDep ToDep(CopyFromSU, SDep::Data, 0);
ToDep.setLatency(CopyFromSU->Latency);
AddPred(CopyToSU, ToDep);
Copies.push_back(CopyFromSU);
Copies.push_back(CopyToSU);
++NumPRCopies;
}
/// getPhysicalRegisterVT - Returns the ValueType of the physical register
/// definition of the specified node.
/// FIXME: Move to SelectionDAG?
static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const TargetInstrInfo *TII) {
unsigned NumRes;
if (N->getOpcode() == ISD::CopyFromReg) {
// CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
NumRes = 1;
} else {
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
NumRes = MCID.getNumDefs();
for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
if (Reg == *ImpDef)
break;
++NumRes;
}
}
return N->getSimpleValueType(NumRes);
}
/// CheckForLiveRegDef - Return true and update live register vector if the
/// specified register def of the specified SUnit clobbers any "live" registers.
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
SmallVectorImpl<unsigned> &LRegs,
const TargetRegisterInfo *TRI) {
bool Added = false;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
if (RegAdded.insert(*AI).second) {
LRegs.push_back(*AI);
Added = true;
}
}
}
return Added;
}
/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
/// scheduling of the given node to satisfy live physical register dependencies.
/// If the specific node is the last one that's available to schedule, do
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
SmallVectorImpl<unsigned> &LRegs){
if (NumLiveRegs == 0)
return false;
SmallSet<unsigned, 4> RegAdded;
// If this node would clobber any "live" register, then it's not ready.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isAssignedRegDep()) {
CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
RegAdded, LRegs, TRI);
}
}
for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
if (Node->getOpcode() == ISD::INLINEASM) {
// Inline asm can clobber physical defs.
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
--NumOps; // Ignore the glue operand.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
++i; // Skip the ID value.
if (InlineAsm::isRegDefKind(Flags) ||
InlineAsm::isRegDefEarlyClobberKind(Flags) ||
InlineAsm::isClobberKind(Flags)) {
// Check for def of register or earlyclobber register.
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
} else
i += NumVals;
}
continue;
}
if (!Node->isMachineOpcode())
continue;
const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
if (!MCID.ImplicitDefs)
continue;
for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
}
return !LRegs.empty();
}
/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
/// schedulers.
void ScheduleDAGFast::ListScheduleBottomUp() {
unsigned CurCycle = 0;
// Release any predecessors of the special Exit node.
ReleasePredecessors(&ExitSU, CurCycle);
// Add root to Available queue.
if (!SUnits.empty()) {
SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
RootSU->isAvailable = true;
AvailableQueue.push(RootSU);
}
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
SmallVector<SUnit*, 4> NotReady;
DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
Sequence.reserve(SUnits.size());
while (!AvailableQueue.empty()) {
bool Delayed = false;
LRegsMap.clear();
SUnit *CurSU = AvailableQueue.pop();
while (CurSU) {
SmallVector<unsigned, 4> LRegs;
if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
break;
Delayed = true;
LRegsMap.insert(std::make_pair(CurSU, LRegs));
CurSU->isPending = true; // This SU is not in AvailableQueue right now.
NotReady.push_back(CurSU);
CurSU = AvailableQueue.pop();
}
// All candidates are delayed due to live physical reg dependencies.
// Try code duplication or inserting cross class copies
// to resolve it.
if (Delayed && !CurSU) {
if (!CurSU) {
// Try duplicating the nodes that produces these
// "expensive to copy" values to break the dependency. In case even
// that doesn't work, insert cross class copies.
SUnit *TrySU = NotReady[0];
SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
TRI->getMinimalPhysRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
// If cross copy register class is the same as RC, then it must be
// possible copy the value directly. Do not try duplicate the def.
// If cross copy register class is not the same as RC, then it's
// possible to copy the value but it require cross register class copies
// and it is expensive.
// If cross copy register class is null, then it's not possible to copy
// the value at all.
SUnit *NewDef = nullptr;
if (DestRC != RC) {
NewDef = CopyAndMoveSuccessors(LRDef);
if (!DestRC && !NewDef)
report_fatal_error("Can't handle live physical "
"register dependency!");
}
if (!NewDef) {
// Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
<< " to SU #" << Copies.front()->NodeNum << "\n");
AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
NewDef = Copies.back();
}
DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
<< " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
AddPred(NewDef, SDep(TrySU, SDep::Artificial));
TrySU->isAvailable = false;
CurSU = NewDef;
}
if (!CurSU) {
llvm_unreachable("Unable to resolve live physical register dependencies!");
}
}
// Add the nodes that aren't ready back onto the available list.
for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
NotReady[i]->isPending = false;
// May no longer be available due to backtracking.
if (NotReady[i]->isAvailable)
AvailableQueue.push(NotReady[i]);
}
NotReady.clear();
if (CurSU)
ScheduleNodeBottomUp(CurSU, CurCycle);
++CurCycle;
}
// Reverse the order since it is bottom up.
std::reverse(Sequence.begin(), Sequence.end());
#ifndef NDEBUG
VerifyScheduledSequence(/*isBottomUp=*/true);
#endif
}
namespace {
//===----------------------------------------------------------------------===//
// ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
// DAG in topological order.
// IMPORTANT: this may not work for targets with phyreg dependency.
//
class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
public:
ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
void Schedule() override;
MachineBasicBlock *
EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
private:
std::vector<SDNode*> Sequence;
DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user
void ScheduleNode(SDNode *N);
};
} // end anonymous namespace
void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
if (N->getNodeId() != 0)
llvm_unreachable(nullptr);
if (!N->isMachineOpcode() &&
(N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
// These nodes do not need to be translated into MIs.
return;
DEBUG(dbgs() << "\n*** Scheduling: ");
DEBUG(N->dump(DAG));
Sequence.push_back(N);
unsigned NumOps = N->getNumOperands();
if (unsigned NumLeft = NumOps) {
SDNode *GluedOpN = nullptr;
do {
const SDValue &Op = N->getOperand(NumLeft-1);
SDNode *OpN = Op.getNode();
if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
// Schedule glue operand right above N.
GluedOpN = OpN;
assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
OpN->setNodeId(0);
ScheduleNode(OpN);
continue;
}
if (OpN == GluedOpN)
// Glue operand is already scheduled.
continue;
DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
if (DI != GluedMap.end() && DI->second != N)
// Users of glues are counted against the glued users.
OpN = DI->second;
unsigned Degree = OpN->getNodeId();
assert(Degree > 0 && "Predecessor over-released!");
OpN->setNodeId(--Degree);
if (Degree == 0)
ScheduleNode(OpN);
} while (--NumLeft);
}
}
/// findGluedUser - Find the representative use of a glue value by walking
/// the use chain.
static SDNode *findGluedUser(SDNode *N) {
while (SDNode *Glued = N->getGluedUser())
N = Glued;
return N;
}
void ScheduleDAGLinearize::Schedule() {
DEBUG(dbgs() << "********** DAG Linearization **********\n");
SmallVector<SDNode*, 8> Glues;
unsigned DAGSize = 0;
for (SDNode &Node : DAG->allnodes()) {
SDNode *N = &Node;
// Use node id to record degree.
unsigned Degree = N->use_size();
N->setNodeId(Degree);
unsigned NumVals = N->getNumValues();
if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
N->hasAnyUseOfValue(NumVals-1)) {
SDNode *User = findGluedUser(N);
if (User) {
Glues.push_back(N);
GluedMap.insert(std::make_pair(N, User));
}
}
if (N->isMachineOpcode() ||
(N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
++DAGSize;
}
for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
SDNode *Glue = Glues[i];
SDNode *GUser = GluedMap[Glue];
unsigned Degree = Glue->getNodeId();
unsigned UDegree = GUser->getNodeId();
// Glue user must be scheduled together with the glue operand. So other
// users of the glue operand must be treated as its users.
SDNode *ImmGUser = Glue->getGluedUser();
for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end();
ui != ue; ++ui)
if (*ui == ImmGUser)
--Degree;
GUser->setNodeId(UDegree + Degree);
Glue->setNodeId(1);
}
Sequence.reserve(DAGSize);
ScheduleNode(DAG->getRoot().getNode());
}
MachineBasicBlock*
ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
InstrEmitter Emitter(BB, InsertPos);
DenseMap<SDValue, unsigned> VRBaseMap;
DEBUG({
dbgs() << "\n*** Final schedule ***\n";
});
// FIXME: Handle dbg_values.
unsigned NumNodes = Sequence.size();
for (unsigned i = 0; i != NumNodes; ++i) {
SDNode *N = Sequence[NumNodes-i-1];
DEBUG(N->dump(DAG));
Emitter.EmitNode(N, false, false, VRBaseMap);
}
DEBUG(dbgs() << '\n');
InsertPos = Emitter.getInsertPos();
return Emitter.getBlock();
}
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
llvm::ScheduleDAGSDNodes *
llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
return new ScheduleDAGFast(*IS->MF);
}
llvm::ScheduleDAGSDNodes *
llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) {
return new ScheduleDAGLinearize(*IS->MF);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the SelectionDAGISel class.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCStrategy.h"
#include "ScheduleDAGSDNodes.h"
#include "SelectionDAGBuilder.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "isel"
STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
STATISTIC(NumFastIselFailLowerArguments,
"Number of entry blocks where fast isel failed to lower arguments");
#ifndef NDEBUG
static cl::opt<bool>
EnableFastISelVerbose2("fast-isel-verbose2", cl::Hidden,
cl::desc("Enable extra verbose messages in the \"fast\" "
"instruction selector"));
// Terminators
STATISTIC(NumFastIselFailRet,"Fast isel fails on Ret");
STATISTIC(NumFastIselFailBr,"Fast isel fails on Br");
STATISTIC(NumFastIselFailSwitch,"Fast isel fails on Switch");
STATISTIC(NumFastIselFailIndirectBr,"Fast isel fails on IndirectBr");
STATISTIC(NumFastIselFailInvoke,"Fast isel fails on Invoke");
STATISTIC(NumFastIselFailResume,"Fast isel fails on Resume");
STATISTIC(NumFastIselFailUnreachable,"Fast isel fails on Unreachable");
// Standard binary operators...
STATISTIC(NumFastIselFailAdd,"Fast isel fails on Add");
STATISTIC(NumFastIselFailFAdd,"Fast isel fails on FAdd");
STATISTIC(NumFastIselFailSub,"Fast isel fails on Sub");
STATISTIC(NumFastIselFailFSub,"Fast isel fails on FSub");
STATISTIC(NumFastIselFailMul,"Fast isel fails on Mul");
STATISTIC(NumFastIselFailFMul,"Fast isel fails on FMul");
STATISTIC(NumFastIselFailUDiv,"Fast isel fails on UDiv");
STATISTIC(NumFastIselFailSDiv,"Fast isel fails on SDiv");
STATISTIC(NumFastIselFailFDiv,"Fast isel fails on FDiv");
STATISTIC(NumFastIselFailURem,"Fast isel fails on URem");
STATISTIC(NumFastIselFailSRem,"Fast isel fails on SRem");
STATISTIC(NumFastIselFailFRem,"Fast isel fails on FRem");
// Logical operators...
STATISTIC(NumFastIselFailAnd,"Fast isel fails on And");
STATISTIC(NumFastIselFailOr,"Fast isel fails on Or");
STATISTIC(NumFastIselFailXor,"Fast isel fails on Xor");
// Memory instructions...
STATISTIC(NumFastIselFailAlloca,"Fast isel fails on Alloca");
STATISTIC(NumFastIselFailLoad,"Fast isel fails on Load");
STATISTIC(NumFastIselFailStore,"Fast isel fails on Store");
STATISTIC(NumFastIselFailAtomicCmpXchg,"Fast isel fails on AtomicCmpXchg");
STATISTIC(NumFastIselFailAtomicRMW,"Fast isel fails on AtomicRWM");
STATISTIC(NumFastIselFailFence,"Fast isel fails on Frence");
STATISTIC(NumFastIselFailGetElementPtr,"Fast isel fails on GetElementPtr");
// Convert instructions...
STATISTIC(NumFastIselFailTrunc,"Fast isel fails on Trunc");
STATISTIC(NumFastIselFailZExt,"Fast isel fails on ZExt");
STATISTIC(NumFastIselFailSExt,"Fast isel fails on SExt");
STATISTIC(NumFastIselFailFPTrunc,"Fast isel fails on FPTrunc");
STATISTIC(NumFastIselFailFPExt,"Fast isel fails on FPExt");
STATISTIC(NumFastIselFailFPToUI,"Fast isel fails on FPToUI");
STATISTIC(NumFastIselFailFPToSI,"Fast isel fails on FPToSI");
STATISTIC(NumFastIselFailUIToFP,"Fast isel fails on UIToFP");
STATISTIC(NumFastIselFailSIToFP,"Fast isel fails on SIToFP");
STATISTIC(NumFastIselFailIntToPtr,"Fast isel fails on IntToPtr");
STATISTIC(NumFastIselFailPtrToInt,"Fast isel fails on PtrToInt");
STATISTIC(NumFastIselFailBitCast,"Fast isel fails on BitCast");
// Other instructions...
STATISTIC(NumFastIselFailICmp,"Fast isel fails on ICmp");
STATISTIC(NumFastIselFailFCmp,"Fast isel fails on FCmp");
STATISTIC(NumFastIselFailPHI,"Fast isel fails on PHI");
STATISTIC(NumFastIselFailSelect,"Fast isel fails on Select");
STATISTIC(NumFastIselFailCall,"Fast isel fails on Call");
STATISTIC(NumFastIselFailShl,"Fast isel fails on Shl");
STATISTIC(NumFastIselFailLShr,"Fast isel fails on LShr");
STATISTIC(NumFastIselFailAShr,"Fast isel fails on AShr");
STATISTIC(NumFastIselFailVAArg,"Fast isel fails on VAArg");
STATISTIC(NumFastIselFailExtractElement,"Fast isel fails on ExtractElement");
STATISTIC(NumFastIselFailInsertElement,"Fast isel fails on InsertElement");
STATISTIC(NumFastIselFailShuffleVector,"Fast isel fails on ShuffleVector");
STATISTIC(NumFastIselFailExtractValue,"Fast isel fails on ExtractValue");
STATISTIC(NumFastIselFailInsertValue,"Fast isel fails on InsertValue");
STATISTIC(NumFastIselFailLandingPad,"Fast isel fails on LandingPad");
// Intrinsic instructions...
STATISTIC(NumFastIselFailIntrinsicCall, "Fast isel fails on Intrinsic call");
STATISTIC(NumFastIselFailSAddWithOverflow,
"Fast isel fails on sadd.with.overflow");
STATISTIC(NumFastIselFailUAddWithOverflow,
"Fast isel fails on uadd.with.overflow");
STATISTIC(NumFastIselFailSSubWithOverflow,
"Fast isel fails on ssub.with.overflow");
STATISTIC(NumFastIselFailUSubWithOverflow,
"Fast isel fails on usub.with.overflow");
STATISTIC(NumFastIselFailSMulWithOverflow,
"Fast isel fails on smul.with.overflow");
STATISTIC(NumFastIselFailUMulWithOverflow,
"Fast isel fails on umul.with.overflow");
STATISTIC(NumFastIselFailFrameaddress, "Fast isel fails on Frameaddress");
STATISTIC(NumFastIselFailSqrt, "Fast isel fails on sqrt call");
STATISTIC(NumFastIselFailStackMap, "Fast isel fails on StackMap call");
STATISTIC(NumFastIselFailPatchPoint, "Fast isel fails on PatchPoint call");
#endif
static cl::opt<bool>
EnableFastISelVerbose("fast-isel-verbose", cl::Hidden,
cl::desc("Enable verbose messages in the \"fast\" "
"instruction selector"));
static cl::opt<int> EnableFastISelAbort(
"fast-isel-abort", cl::Hidden,
cl::desc("Enable abort calls when \"fast\" instruction selection "
"fails to lower an instruction: 0 disable the abort, 1 will "
"abort but for args, calls and terminators, 2 will also "
"abort for argument lowering, and 3 will never fallback "
"to SelectionDAG."));
static cl::opt<bool>
UseMBPI("use-mbpi",
cl::desc("use Machine Branch Probability Info"),
cl::init(true), cl::Hidden);
#ifndef NDEBUG
static cl::opt<std::string>
FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
cl::desc("Only display the basic block whose name "
"matches this for all view-*-dags options"));
static cl::opt<bool>
ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before the first "
"dag combine pass"));
static cl::opt<bool>
ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before legalize types"));
static cl::opt<bool>
ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before legalize"));
static cl::opt<bool>
ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before the second "
"dag combine pass"));
static cl::opt<bool>
ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before the post legalize types"
" dag combine pass"));
static cl::opt<bool>
ViewISelDAGs("view-isel-dags", cl::Hidden,
cl::desc("Pop up a window to show isel dags as they are selected"));
static cl::opt<bool>
ViewSchedDAGs("view-sched-dags", cl::Hidden,
cl::desc("Pop up a window to show sched dags as they are processed"));
static cl::opt<bool>
ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
cl::desc("Pop up a window to show SUnit dags after they are processed"));
#else
static const bool ViewDAGCombine1 = false,
ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
ViewDAGCombine2 = false,
ViewDAGCombineLT = false,
ViewISelDAGs = false, ViewSchedDAGs = false,
ViewSUnitDAGs = false;
#endif
//===---------------------------------------------------------------------===//
///
/// RegisterScheduler class - Track the registration of instruction schedulers.
///
//===---------------------------------------------------------------------===//
MachinePassRegistry RegisterScheduler::Registry;
//===---------------------------------------------------------------------===//
///
/// ISHeuristic command line option for instruction schedulers.
///
//===---------------------------------------------------------------------===//
static cl::opt<RegisterScheduler::FunctionPassCtor, false,
RegisterPassParser<RegisterScheduler> >
ISHeuristic("pre-RA-sched",
cl::init(&createDefaultScheduler), cl::Hidden,
cl::desc("Instruction schedulers available (before register"
" allocation):"));
static RegisterScheduler
defaultListDAGScheduler("default", "Best scheduler for the target",
createDefaultScheduler);
namespace llvm {
//===--------------------------------------------------------------------===//
/// \brief This class is used by SelectionDAGISel to temporarily override
/// the optimization level on a per-function basis.
class OptLevelChanger {
SelectionDAGISel &IS;
CodeGenOpt::Level SavedOptLevel;
bool SavedFastISel;
public:
OptLevelChanger(SelectionDAGISel &ISel,
CodeGenOpt::Level NewOptLevel) : IS(ISel) {
SavedOptLevel = IS.OptLevel;
if (NewOptLevel == SavedOptLevel)
return;
IS.OptLevel = NewOptLevel;
IS.TM.setOptLevel(NewOptLevel);
SavedFastISel = IS.TM.Options.EnableFastISel;
if (NewOptLevel == CodeGenOpt::None)
IS.TM.setFastISel(true);
DEBUG(dbgs() << "\nChanging optimization level for Function "
<< IS.MF->getFunction()->getName() << "\n");
DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel
<< " ; After: -O" << NewOptLevel << "\n");
}
~OptLevelChanger() {
if (IS.OptLevel == SavedOptLevel)
return;
DEBUG(dbgs() << "\nRestoring optimization level for Function "
<< IS.MF->getFunction()->getName() << "\n");
DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel
<< " ; After: -O" << SavedOptLevel << "\n");
IS.OptLevel = SavedOptLevel;
IS.TM.setOptLevel(SavedOptLevel);
IS.TM.setFastISel(SavedFastISel);
}
};
//===--------------------------------------------------------------------===//
/// createDefaultScheduler - This creates an instruction scheduler appropriate
/// for the target.
ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetLowering *TLI = IS->TLI;
const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
if (OptLevel == CodeGenOpt::None ||
(ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
TLI->getSchedulingPreference() == Sched::Source)
return createSourceListDAGScheduler(IS, OptLevel);
if (TLI->getSchedulingPreference() == Sched::RegPressure)
return createBURRListDAGScheduler(IS, OptLevel);
if (TLI->getSchedulingPreference() == Sched::Hybrid)
return createHybridListDAGScheduler(IS, OptLevel);
if (TLI->getSchedulingPreference() == Sched::VLIW)
return createVLIWDAGScheduler(IS, OptLevel);
assert(TLI->getSchedulingPreference() == Sched::ILP &&
"Unknown sched type!");
return createILPListDAGScheduler(IS, OptLevel);
}
}
// EmitInstrWithCustomInserter - This method should be implemented by targets
// that mark instructions with the 'usesCustomInserter' flag. These
// instructions are special in various ways, which require special support to
// insert. The specified MachineInstr is created but not inserted into any
// basic blocks, and this method is called to expand it into a sequence of
// instructions, potentially also creating new basic blocks and control flow.
// When new basic blocks are inserted and the edges from MBB to its successors
// are modified, the method should insert pairs of <OldSucc, NewSucc> into the
// DenseMap.
MachineBasicBlock *
TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const {
#ifndef NDEBUG
dbgs() << "If a target marks an instruction with "
"'usesCustomInserter', it must implement "
"TargetLowering::EmitInstrWithCustomInserter!";
#endif
llvm_unreachable(nullptr);
}
void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
SDNode *Node) const {
assert(!MI->hasPostISelHook() &&
"If a target marks an instruction with 'hasPostISelHook', "
"it must implement TargetLowering::AdjustInstrPostInstrSelection!");
}
//===----------------------------------------------------------------------===//
// SelectionDAGISel code
//===----------------------------------------------------------------------===//
SelectionDAGISel::SelectionDAGISel(TargetMachine &tm,
CodeGenOpt::Level OL) :
MachineFunctionPass(ID), TM(tm),
FuncInfo(new FunctionLoweringInfo()),
CurDAG(new SelectionDAG(tm, OL)),
SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
GFI(),
OptLevel(OL),
DAGSize(0) {
initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
initializeAliasAnalysisAnalysisGroup(*PassRegistry::getPassRegistry());
initializeBranchProbabilityInfoPass(*PassRegistry::getPassRegistry());
initializeTargetLibraryInfoWrapperPassPass(
*PassRegistry::getPassRegistry());
}
SelectionDAGISel::~SelectionDAGISel() {
delete SDB;
delete CurDAG;
delete FuncInfo;
}
void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<GCModuleInfo>();
AU.addPreserved<GCModuleInfo>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
if (UseMBPI && OptLevel != CodeGenOpt::None)
AU.addRequired<BranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
/// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
/// may trap on it. In this case we have to split the edge so that the path
/// through the predecessor block that doesn't go to the phi block doesn't
/// execute the possibly trapping instruction.
///
/// This is required for correctness, so it must be done at -O0.
///
static void SplitCriticalSideEffectEdges(Function &Fn, AliasAnalysis *AA) {
// Loop for blocks with phi nodes.
for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
PHINode *PN = dyn_cast<PHINode>(BB->begin());
if (!PN) continue;
ReprocessBlock:
// For each block with a PHI node, check to see if any of the input values
// are potentially trapping constant expressions. Constant expressions are
// the only potentially trapping value that can occur as the argument to a
// PHI.
for (BasicBlock::iterator I = BB->begin(); (PN = dyn_cast<PHINode>(I)); ++I)
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i));
if (!CE || !CE->canTrap()) continue;
// The only case we have to worry about is when the edge is critical.
// Since this block has a PHI Node, we assume it has multiple input
// edges: check to see if the pred has multiple successors.
BasicBlock *Pred = PN->getIncomingBlock(i);
if (Pred->getTerminator()->getNumSuccessors() == 1)
continue;
// Okay, we have to split this edge.
SplitCriticalEdge(
Pred->getTerminator(), GetSuccessorNumber(Pred, BB),
CriticalEdgeSplittingOptions(AA).setMergeIdenticalEdges());
goto ReprocessBlock;
}
}
}
bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Do some sanity-checking on the command-line options.
assert((!EnableFastISelVerbose || TM.Options.EnableFastISel) &&
"-fast-isel-verbose requires -fast-isel");
assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
"-fast-isel-abort > 0 requires -fast-isel");
const Function &Fn = *mf.getFunction();
MF = &mf;
// Reset the target options before resetting the optimization
// level below.
// FIXME: This is a horrible hack and should be processed via
// codegen looking at the optimization level explicitly when
// it wants to look at it.
TM.resetTargetOptions(Fn);
// Reset OptLevel to None for optnone functions.
CodeGenOpt::Level NewOptLevel = OptLevel;
if (Fn.hasFnAttribute(Attribute::OptimizeNone))
NewOptLevel = CodeGenOpt::None;
OptLevelChanger OLC(*this, NewOptLevel);
TII = MF->getSubtarget().getInstrInfo();
TLI = MF->getSubtarget().getTargetLowering();
RegInfo = &MF->getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
SplitCriticalSideEffectEdges(const_cast<Function&>(Fn), AA);
CurDAG->init(*MF);
FuncInfo->set(Fn, *MF, CurDAG);
if (UseMBPI && OptLevel != CodeGenOpt::None)
FuncInfo->BPI = &getAnalysis<BranchProbabilityInfo>();
else
FuncInfo->BPI = nullptr;
SDB->init(GFI, *AA, LibInfo);
MF->setHasInlineAsm(false);
SelectAllBasicBlocks(Fn);
// If the first basic block in the function has live ins that need to be
// copied into vregs, emit the copies into the top of the block before
// emitting the code for the block.
MachineBasicBlock *EntryMBB = MF->begin();
const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
DenseMap<unsigned, unsigned> LiveInMap;
if (!FuncInfo->ArgDbgValues.empty())
for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
E = RegInfo->livein_end(); LI != E; ++LI)
if (LI->second)
LiveInMap.insert(std::make_pair(LI->first, LI->second));
// Insert DBG_VALUE instructions for function arguments to the entry block.
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
bool hasFI = MI->getOperand(0).isFI();
unsigned Reg =
hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);
else {
MachineInstr *Def = RegInfo->getVRegDef(Reg);
if (Def) {
MachineBasicBlock::iterator InsertPos = Def;
// FIXME: VR def may not be in entry block.
Def->getParent()->insert(std::next(InsertPos), MI);
} else
DEBUG(dbgs() << "Dropping debug info for dead vreg"
<< TargetRegisterInfo::virtReg2Index(Reg) << "\n");
}
// If Reg is live-in then update debug info to track its copy in a vreg.
DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
if (LDI != LiveInMap.end()) {
assert(!hasFI && "There's no handling of frame pointer updating here yet "
"- add if needed");
MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
MachineBasicBlock::iterator InsertPos = Def;
const MDNode *Variable = MI->getDebugVariable();
const MDNode *Expr = MI->getDebugExpression();
DebugLoc DL = MI->getDebugLoc();
bool IsIndirect = MI->isIndirectDebugValue();
unsigned Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
// Def is never a terminator here, so it is ok to increment InsertPos.
BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
IsIndirect, LDI->second, Offset, Variable, Expr);
// If this vreg is directly copied into an exported register then
// that COPY instructions also need DBG_VALUE, if it is the only
// user of LDI->second.
MachineInstr *CopyUseMI = nullptr;
for (MachineRegisterInfo::use_instr_iterator
UI = RegInfo->use_instr_begin(LDI->second),
E = RegInfo->use_instr_end(); UI != E; ) {
MachineInstr *UseMI = &*(UI++);
if (UseMI->isDebugValue()) continue;
if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
CopyUseMI = UseMI; continue;
}
// Otherwise this is another use or second copy use.
CopyUseMI = nullptr; break;
}
if (CopyUseMI) {
// Use MI's debug location, which describes where Variable was
// declared, rather than whatever is attached to CopyUseMI.
MachineInstr *NewMI =
BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
CopyUseMI->getOperand(0).getReg(), Offset, Variable, Expr);
MachineBasicBlock::iterator Pos = CopyUseMI;
EntryMBB->insertAfter(Pos, NewMI);
}
}
}
// Determine if there are any calls in this machine function.
MachineFrameInfo *MFI = MF->getFrameInfo();
for (const auto &MBB : *MF) {
if (MFI->hasCalls() && MF->hasInlineAsm())
break;
for (const auto &MI : MBB) {
const MCInstrDesc &MCID = TII->get(MI.getOpcode());
if ((MCID.isCall() && !MCID.isReturn()) ||
MI.isStackAligningInlineAsm()) {
MFI->setHasCalls(true);
}
if (MI.isInlineAsm()) {
MF->setHasInlineAsm(true);
}
}
}
// Determine if there is a call to setjmp in the machine function.
MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
// Replace forward-declared registers with the registers containing
// the desired value.
MachineRegisterInfo &MRI = MF->getRegInfo();
for (DenseMap<unsigned, unsigned>::iterator
I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
I != E; ++I) {
unsigned From = I->first;
unsigned To = I->second;
// If To is also scheduled to be replaced, find what its ultimate
// replacement is.
for (;;) {
DenseMap<unsigned, unsigned>::iterator J = FuncInfo->RegFixups.find(To);
if (J == E) break;
To = J->second;
}
// Make sure the new register has a sufficiently constrained register class.
if (TargetRegisterInfo::isVirtualRegister(From) &&
TargetRegisterInfo::isVirtualRegister(To))
MRI.constrainRegClass(To, MRI.getRegClass(From));
// Replace it.
// Replacing one register with another won't touch the kill flags.
// We need to conservatively clear the kill flags as a kill on the old
// register might dominate existing uses of the new register.
if (!MRI.use_empty(To))
MRI.clearKillFlags(From);
MRI.replaceRegWith(From, To);
}
// Freeze the set of reserved registers now that MachineFrameInfo has been
// set up. All the information required by getReservedRegs() should be
// available now.
MRI.freezeReservedRegs(*MF);
// Release function-specific state. SDB and CurDAG are already cleared
// at this point.
FuncInfo->clear();
DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
DEBUG(MF->print(dbgs()));
return true;
}
void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall) {
// Lower the instructions. If a call is emitted as a tail call, cease emitting
// nodes for this block.
for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I)
SDB->visit(*I);
// Make sure the root of the DAG is up-to-date.
CurDAG->setRoot(SDB->getControlRoot());
HadTailCall = SDB->HasTailCall;
SDB->clear();
// Final step, emit the lowered DAG as machine code.
CodeGenAndEmitDAG();
}
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
SmallPtrSet<SDNode*, 128> VisitedNodes;
SmallVector<SDNode*, 128> Worklist;
Worklist.push_back(CurDAG->getRoot().getNode());
APInt KnownZero;
APInt KnownOne;
do {
SDNode *N = Worklist.pop_back_val();
// If we've already seen this node, ignore it.
if (!VisitedNodes.insert(N).second)
continue;
// Otherwise, add all chain operands to the worklist.
for (const SDValue &Op : N->op_values())
if (Op.getValueType() == MVT::Other)
Worklist.push_back(Op.getNode());
// If this is a CopyToReg with a vreg dest, process it.
if (N->getOpcode() != ISD::CopyToReg)
continue;
unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
if (!TargetRegisterInfo::isVirtualRegister(DestReg))
continue;
// Ignore non-scalar or non-integer values.
SDValue Src = N->getOperand(2);
EVT SrcVT = Src.getValueType();
if (!SrcVT.isInteger() || SrcVT.isVector())
continue;
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
CurDAG->computeKnownBits(Src, KnownZero, KnownOne);
FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, KnownZero, KnownOne);
} while (!Worklist.empty());
}
void SelectionDAGISel::CodeGenAndEmitDAG() {
std::string GroupName;
if (TimePassesIsEnabled)
GroupName = "Instruction Selection and Scheduling";
std::string BlockName;
int BlockNumber = -1;
(void)BlockNumber;
bool MatchFilterBB = false; (void)MatchFilterBB;
#ifndef NDEBUG
MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
FilterDAGBasicBlockName ==
FuncInfo->MBB->getBasicBlock()->getName().str());
#endif
#ifdef NDEBUG
if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
ViewSUnitDAGs)
#endif
{
BlockNumber = FuncInfo->MBB->getNumber();
BlockName =
(MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
}
DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (ViewDAGCombine1 && MatchFilterBB)
CurDAG->viewGraph("dag-combine1 input for " + BlockName);
// Run the DAG combiner in pre-legalize mode.
{
NamedRegionTimer T("DAG Combining 1", GroupName, TimePassesIsEnabled);
CurDAG->Combine(BeforeLegalizeTypes, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
if (ViewLegalizeTypesDAGs && MatchFilterBB)
CurDAG->viewGraph("legalize-types input for " + BlockName);
bool Changed;
{
NamedRegionTimer T("Type Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeTypes();
}
DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
CurDAG->NewNodesMustHaveLegalTypes = true;
if (Changed) {
if (ViewDAGCombineLT && MatchFilterBB)
CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
{
NamedRegionTimer T("DAG Combining after legalize types", GroupName,
TimePassesIsEnabled);
CurDAG->Combine(AfterLegalizeTypes, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
}
{
NamedRegionTimer T("Vector Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeVectors();
}
if (Changed) {
{
NamedRegionTimer T("Type Legalization 2", GroupName, TimePassesIsEnabled);
CurDAG->LegalizeTypes();
}
if (ViewDAGCombineLT && MatchFilterBB)
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
{
NamedRegionTimer T("DAG Combining after legalize vectors", GroupName,
TimePassesIsEnabled);
CurDAG->Combine(AfterLegalizeVectorOps, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#"
<< BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump());
}
if (ViewLegalizeDAGs && MatchFilterBB)
CurDAG->viewGraph("legalize input for " + BlockName);
{
NamedRegionTimer T("DAG Legalization", GroupName, TimePassesIsEnabled);
CurDAG->Legalize();
}
DEBUG(dbgs() << "Legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (ViewDAGCombine2 && MatchFilterBB)
CurDAG->viewGraph("dag-combine2 input for " + BlockName);
// Run the DAG combiner in post-legalize mode.
{
NamedRegionTimer T("DAG Combining 2", GroupName, TimePassesIsEnabled);
CurDAG->Combine(AfterLegalizeDAG, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
if (ViewISelDAGs && MatchFilterBB)
CurDAG->viewGraph("isel input for " + BlockName);
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
{
NamedRegionTimer T("Instruction Selection", GroupName, TimePassesIsEnabled);
DoInstructionSelection();
}
DEBUG(dbgs() << "Selected selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (ViewSchedDAGs && MatchFilterBB)
CurDAG->viewGraph("scheduler input for " + BlockName);
// Schedule machine code.
ScheduleDAGSDNodes *Scheduler = CreateScheduler();
{
NamedRegionTimer T("Instruction Scheduling", GroupName,
TimePassesIsEnabled);
Scheduler->Run(CurDAG, FuncInfo->MBB);
}
if (ViewSUnitDAGs && MatchFilterBB) Scheduler->viewGraph();
// Emit machine code to BB. This can change 'BB' to the last block being
// inserted into.
MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
{
NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
// FuncInfo->InsertPt is passed by reference and set to the end of the
// scheduled instructions.
LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
}
// If the block was split, make sure we update any references that are used to
// update PHI nodes later on.
if (FirstMBB != LastMBB)
SDB->UpdateSplitBlock(FirstMBB, LastMBB);
// Free the scheduler state.
{
NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName,
TimePassesIsEnabled);
delete Scheduler;
}
// Free the SelectionDAG state, now that we're finished with it.
CurDAG->clear();
}
namespace {
/// ISelUpdater - helper class to handle updates of the instruction selection
/// graph.
class ISelUpdater : public SelectionDAG::DAGUpdateListener {
SelectionDAG::allnodes_iterator &ISelPosition;
public:
ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
: SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
/// NodeDeleted - Handle nodes deleted from the graph. If the node being
/// deleted is the current ISelPosition node, update ISelPosition.
///
void NodeDeleted(SDNode *N, SDNode *E) override {
if (ISelPosition == SelectionDAG::allnodes_iterator(N))
++ISelPosition;
}
};
} // end anonymous namespace
void SelectionDAGISel::DoInstructionSelection() {
DEBUG(dbgs() << "===== Instruction selection begins: BB#"
<< FuncInfo->MBB->getNumber()
<< " '" << FuncInfo->MBB->getName() << "'\n");
PreprocessISelDAG();
// Select target instructions for the DAG.
{
// Number all nodes with a topological order and set DAGSize.
DAGSize = CurDAG->AssignTopologicalOrder();
// Create a dummy node (which is not added to allnodes), that adds
// a reference to the root node, preventing it from being deleted,
// and tracking any changes of the root.
HandleSDNode Dummy(CurDAG->getRoot());
SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
++ISelPosition;
// Make sure that ISelPosition gets properly updated when nodes are deleted
// in calls made from this function.
ISelUpdater ISU(*CurDAG, ISelPosition);
// The AllNodes list is now topological-sorted. Visit the
// nodes by starting at the end of the list (the root of the
// graph) and preceding back toward the beginning (the entry
// node).
while (ISelPosition != CurDAG->allnodes_begin()) {
SDNode *Node = --ISelPosition;
// Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
// but there are currently some corner cases that it misses. Also, this
// makes it theoretically possible to disable the DAGCombiner.
if (Node->use_empty())
continue;
SDNode *ResNode = Select(Node);
// FIXME: This is pretty gross. 'Select' should be changed to not return
// anything at all and this code should be nuked with a tactical strike.
// If node should not be replaced, continue with the next one.
if (ResNode == Node || Node->getOpcode() == ISD::DELETED_NODE)
continue;
// Replace node.
if (ResNode) {
ReplaceUses(Node, ResNode);
}
// If after the replacement this node is not used any more,
// remove this dead node.
if (Node->use_empty()) // Don't delete EntryToken, etc.
CurDAG->RemoveDeadNode(Node);
}
CurDAG->setRoot(Dummy.getValue());
}
DEBUG(dbgs() << "===== Instruction selection ends:\n");
PostprocessISelDAG();
}
/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
/// do other setup for EH landing-pad blocks.
bool SelectionDAGISel::PrepareEHLandingPad() {
MachineBasicBlock *MBB = FuncInfo->MBB;
const TargetRegisterClass *PtrRC =
TLI->getRegClassFor(TLI->getPointerTy(CurDAG->getDataLayout()));
// Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo.
MCSymbol *Label = MF->getMMI().addLandingPad(MBB);
// Assign the call site to the landing pad's begin label.
MF->getMMI().setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
.addSym(Label);
// If this is an MSVC-style personality function, we need to split the landing
// pad into several BBs.
const BasicBlock *LLVMBB = MBB->getBasicBlock();
const LandingPadInst *LPadInst = LLVMBB->getLandingPadInst();
MF->getMMI().addPersonality(MBB, cast<Function>(LPadInst->getParent()
->getParent()
->getPersonalityFn()
->stripPointerCasts()));
EHPersonality Personality = MF->getMMI().getPersonalityType();
if (isMSVCEHPersonality(Personality)) {
SmallVector<MachineBasicBlock *, 4> ClauseBBs;
const IntrinsicInst *ActionsCall =
dyn_cast<IntrinsicInst>(LLVMBB->getFirstInsertionPt());
// Get all invoke BBs that unwind to this landingpad.
SmallVector<MachineBasicBlock *, 4> InvokeBBs(MBB->pred_begin(),
MBB->pred_end());
if (ActionsCall && ActionsCall->getIntrinsicID() == Intrinsic::eh_actions) {
// If this is a call to llvm.eh.actions followed by indirectbr, then we've
// run WinEHPrepare, and we should remove this block from the machine CFG.
// Mark the targets of the indirectbr as landingpads instead.
for (const BasicBlock *LLVMSucc : successors(LLVMBB)) {
MachineBasicBlock *ClauseBB = FuncInfo->MBBMap[LLVMSucc];
// Add the edge from the invoke to the clause.
for (MachineBasicBlock *InvokeBB : InvokeBBs)
InvokeBB->addSuccessor(ClauseBB);
// Mark the clause as a landing pad or MI passes will delete it.
ClauseBB->setIsLandingPad();
}
}
// Remove the edge from the invoke to the lpad.
for (MachineBasicBlock *InvokeBB : InvokeBBs)
InvokeBB->removeSuccessor(MBB);
// Don't select instructions for the landingpad.
return false;
}
// Mark exception register as live in.
if (unsigned Reg = TLI->getExceptionPointerRegister())
FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC);
// Mark exception selector register as live in.
if (unsigned Reg = TLI->getExceptionSelectorRegister())
FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(Reg, PtrRC);
return true;
}
/// isFoldedOrDeadInstruction - Return true if the specified instruction is
/// side-effect free and is either dead or folded into a generated instruction.
/// Return false if it needs to be emitted.
static bool isFoldedOrDeadInstruction(const Instruction *I,
FunctionLoweringInfo *FuncInfo) {
return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
!isa<TerminatorInst>(I) && // Terminators aren't folded.
!isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
!isa<LandingPadInst>(I) && // Landingpad instructions aren't folded.
!FuncInfo->isExportedInst(I); // Exported instrs must be computed.
}
#ifndef NDEBUG
// Collect per Instruction statistics for fast-isel misses. Only those
// instructions that cause the bail are accounted for. It does not account for
// instructions higher in the block. Thus, summing the per instructions stats
// will not add up to what is reported by NumFastIselFailures.
static void collectFailStats(const Instruction *I) {
switch (I->getOpcode()) {
default: assert (0 && "<Invalid operator> ");
// Terminators
case Instruction::Ret: NumFastIselFailRet++; return;
case Instruction::Br: NumFastIselFailBr++; return;
case Instruction::Switch: NumFastIselFailSwitch++; return;
case Instruction::IndirectBr: NumFastIselFailIndirectBr++; return;
case Instruction::Invoke: NumFastIselFailInvoke++; return;
case Instruction::Resume: NumFastIselFailResume++; return;
case Instruction::Unreachable: NumFastIselFailUnreachable++; return;
// Standard binary operators...
case Instruction::Add: NumFastIselFailAdd++; return;
case Instruction::FAdd: NumFastIselFailFAdd++; return;
case Instruction::Sub: NumFastIselFailSub++; return;
case Instruction::FSub: NumFastIselFailFSub++; return;
case Instruction::Mul: NumFastIselFailMul++; return;
case Instruction::FMul: NumFastIselFailFMul++; return;
case Instruction::UDiv: NumFastIselFailUDiv++; return;
case Instruction::SDiv: NumFastIselFailSDiv++; return;
case Instruction::FDiv: NumFastIselFailFDiv++; return;
case Instruction::URem: NumFastIselFailURem++; return;
case Instruction::SRem: NumFastIselFailSRem++; return;
case Instruction::FRem: NumFastIselFailFRem++; return;
// Logical operators...
case Instruction::And: NumFastIselFailAnd++; return;
case Instruction::Or: NumFastIselFailOr++; return;
case Instruction::Xor: NumFastIselFailXor++; return;
// Memory instructions...
case Instruction::Alloca: NumFastIselFailAlloca++; return;
case Instruction::Load: NumFastIselFailLoad++; return;
case Instruction::Store: NumFastIselFailStore++; return;
case Instruction::AtomicCmpXchg: NumFastIselFailAtomicCmpXchg++; return;
case Instruction::AtomicRMW: NumFastIselFailAtomicRMW++; return;
case Instruction::Fence: NumFastIselFailFence++; return;
case Instruction::GetElementPtr: NumFastIselFailGetElementPtr++; return;
// Convert instructions...
case Instruction::Trunc: NumFastIselFailTrunc++; return;
case Instruction::ZExt: NumFastIselFailZExt++; return;
case Instruction::SExt: NumFastIselFailSExt++; return;
case Instruction::FPTrunc: NumFastIselFailFPTrunc++; return;
case Instruction::FPExt: NumFastIselFailFPExt++; return;
case Instruction::FPToUI: NumFastIselFailFPToUI++; return;
case Instruction::FPToSI: NumFastIselFailFPToSI++; return;
case Instruction::UIToFP: NumFastIselFailUIToFP++; return;
case Instruction::SIToFP: NumFastIselFailSIToFP++; return;
case Instruction::IntToPtr: NumFastIselFailIntToPtr++; return;
case Instruction::PtrToInt: NumFastIselFailPtrToInt++; return;
case Instruction::BitCast: NumFastIselFailBitCast++; return;
// Other instructions...
case Instruction::ICmp: NumFastIselFailICmp++; return;
case Instruction::FCmp: NumFastIselFailFCmp++; return;
case Instruction::PHI: NumFastIselFailPHI++; return;
case Instruction::Select: NumFastIselFailSelect++; return;
case Instruction::Call: {
if (auto const *Intrinsic = dyn_cast<IntrinsicInst>(I)) {
switch (Intrinsic->getIntrinsicID()) {
default:
NumFastIselFailIntrinsicCall++; return;
case Intrinsic::sadd_with_overflow:
NumFastIselFailSAddWithOverflow++; return;
case Intrinsic::uadd_with_overflow:
NumFastIselFailUAddWithOverflow++; return;
case Intrinsic::ssub_with_overflow:
NumFastIselFailSSubWithOverflow++; return;
case Intrinsic::usub_with_overflow:
NumFastIselFailUSubWithOverflow++; return;
case Intrinsic::smul_with_overflow:
NumFastIselFailSMulWithOverflow++; return;
case Intrinsic::umul_with_overflow:
NumFastIselFailUMulWithOverflow++; return;
case Intrinsic::frameaddress:
NumFastIselFailFrameaddress++; return;
case Intrinsic::sqrt:
NumFastIselFailSqrt++; return;
case Intrinsic::experimental_stackmap:
NumFastIselFailStackMap++; return;
case Intrinsic::experimental_patchpoint_void: // fall-through
case Intrinsic::experimental_patchpoint_i64:
NumFastIselFailPatchPoint++; return;
}
}
NumFastIselFailCall++;
return;
}
case Instruction::Shl: NumFastIselFailShl++; return;
case Instruction::LShr: NumFastIselFailLShr++; return;
case Instruction::AShr: NumFastIselFailAShr++; return;
case Instruction::VAArg: NumFastIselFailVAArg++; return;
case Instruction::ExtractElement: NumFastIselFailExtractElement++; return;
case Instruction::InsertElement: NumFastIselFailInsertElement++; return;
case Instruction::ShuffleVector: NumFastIselFailShuffleVector++; return;
case Instruction::ExtractValue: NumFastIselFailExtractValue++; return;
case Instruction::InsertValue: NumFastIselFailInsertValue++; return;
case Instruction::LandingPad: NumFastIselFailLandingPad++; return;
}
}
#endif
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = nullptr;
if (TM.Options.EnableFastISel)
FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
// Iterate over all basic blocks in the function.
ReversePostOrderTraversal<const Function*> RPOT(&Fn);
for (ReversePostOrderTraversal<const Function*>::rpo_iterator
I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
const BasicBlock *LLVMBB = *I;
if (OptLevel != CodeGenOpt::None) {
bool AllPredsVisited = true;
for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB);
PI != PE; ++PI) {
if (!FuncInfo->VisitedBBs.count(*PI)) {
AllPredsVisited = false;
break;
}
}
if (AllPredsVisited) {
for (BasicBlock::const_iterator I = LLVMBB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I)
FuncInfo->ComputePHILiveOutRegInfo(PN);
} else {
for (BasicBlock::const_iterator I = LLVMBB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I)
FuncInfo->InvalidatePHILiveOutRegInfo(PN);
}
FuncInfo->VisitedBBs.insert(LLVMBB);
}
BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI();
BasicBlock::const_iterator const End = LLVMBB->end();
BasicBlock::const_iterator BI = End;
FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
// Setup an EH landing-pad block.
FuncInfo->ExceptionPointerVirtReg = 0;
FuncInfo->ExceptionSelectorVirtReg = 0;
if (LLVMBB->isLandingPad())
if (!PrepareEHLandingPad())
continue;
// Before doing SelectionDAG ISel, see if FastISel has been requested.
if (FastIS) {
FastIS->startNewBlock();
// Emit code for any incoming arguments. This must happen before
// beginning FastISel on the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {
++NumEntryBlocks;
// Lower any arguments needed in this block if this is the entry block.
if (!FastIS->lowerArguments()) {
// Fast isel failed to lower these arguments
++NumFastIselFailLowerArguments;
if (EnableFastISelAbort > 1)
report_fatal_error("FastISel didn't lower all arguments");
// Use SelectionDAG argument lowering
LowerArguments(Fn);
CurDAG->setRoot(SDB->getControlRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
// If we inserted any instructions at the beginning, make a note of
// where they are, so we can be sure to emit subsequent instructions
// after them.
if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
FastIS->setLastLocalValue(std::prev(FuncInfo->InsertPt));
else
FastIS->setLastLocalValue(nullptr);
}
unsigned NumFastIselRemaining = std::distance(Begin, End);
// Do FastISel on as many instructions as possible.
for (; BI != Begin; --BI) {
const Instruction *Inst = std::prev(BI);
// If we no longer require this instruction, skip it.
if (isFoldedOrDeadInstruction(Inst, FuncInfo)) {
--NumFastIselRemaining;
continue;
}
// Bottom-up: reset the insert pos at the top, after any local-value
// instructions.
FastIS->recomputeInsertPt();
// Try to select the instruction with FastISel.
if (FastIS->selectInstruction(Inst)) {
--NumFastIselRemaining;
++NumFastIselSuccess;
// If fast isel succeeded, skip over all the folded instructions, and
// then see if there is a load right before the selected instructions.
// Try to fold the load if so.
const Instruction *BeforeInst = Inst;
while (BeforeInst != Begin) {
BeforeInst = std::prev(BasicBlock::const_iterator(BeforeInst));
if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
break;
}
if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
BeforeInst->hasOneUse() &&
FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
// If we succeeded, don't re-select the load.
BI = std::next(BasicBlock::const_iterator(BeforeInst));
--NumFastIselRemaining;
++NumFastIselSuccess;
}
continue;
}
#ifndef NDEBUG
if (EnableFastISelVerbose2)
collectFailStats(Inst);
#endif
// Then handle certain instructions as single-LLVM-Instruction blocks.
if (isa<CallInst>(Inst)) {
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel missed call: ";
Inst->dump();
}
if (EnableFastISelAbort > 2)
// FastISel selector couldn't handle something and bailed.
// For the purpose of debugging, just abort.
report_fatal_error("FastISel didn't select the entire block");
if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) {
unsigned &R = FuncInfo->ValueMap[Inst];
if (!R)
R = FuncInfo->CreateRegs(Inst->getType());
}
bool HadTailCall = false;
MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
SelectBasicBlock(Inst, BI, HadTailCall);
// If the call was emitted as a tail call, we're done with the block.
// We also need to delete any previously emitted instructions.
if (HadTailCall) {
FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
--BI;
break;
}
// Recompute NumFastIselRemaining as Selection DAG instruction
// selection may have handled the call, input args, etc.
unsigned RemainingNow = std::distance(Begin, BI);
NumFastIselFailures += NumFastIselRemaining - RemainingNow;
NumFastIselRemaining = RemainingNow;
continue;
}
bool ShouldAbort = EnableFastISelAbort;
if (EnableFastISelVerbose || EnableFastISelAbort) {
if (isa<TerminatorInst>(Inst)) {
// Use a different message for terminator misses.
dbgs() << "FastISel missed terminator: ";
// Don't abort unless for terminator unless the level is really high
ShouldAbort = (EnableFastISelAbort > 2);
} else {
dbgs() << "FastISel miss: ";
}
Inst->dump();
}
if (ShouldAbort)
// FastISel selector couldn't handle something and bailed.
// For the purpose of debugging, just abort.
report_fatal_error("FastISel didn't select the entire block");
NumFastIselFailures += NumFastIselRemaining;
break;
}
FastIS->recomputeInsertPt();
} else {
// Lower any arguments needed in this block if this is the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {
++NumEntryBlocks;
LowerArguments(Fn);
}
}
if (Begin != BI)
++NumDAGBlocks;
else
++NumFastIselBlocks;
if (Begin != BI) {
// Run SelectionDAG instruction selection on the remainder of the block
// not handled by FastISel. If FastISel is not run, this is the entire
// block.
bool HadTailCall;
SelectBasicBlock(Begin, BI, HadTailCall);
}
FinishBasicBlock();
FuncInfo->PHINodesToUpdate.clear();
}
delete FastIS;
SDB->clearDanglingDebugInfo();
SDB->SPDescriptor.resetPerFunctionState();
}
/// Given that the input MI is before a partial terminator sequence TSeq, return
/// true if M + TSeq also a partial terminator sequence.
///
/// A Terminator sequence is a sequence of MachineInstrs which at this point in
/// lowering copy vregs into physical registers, which are then passed into
/// terminator instructors so we can satisfy ABI constraints. A partial
/// terminator sequence is an improper subset of a terminator sequence (i.e. it
/// may be the whole terminator sequence).
static bool MIIsInTerminatorSequence(const MachineInstr *MI) {
// If we do not have a copy or an implicit def, we return true if and only if
// MI is a debug value.
if (!MI->isCopy() && !MI->isImplicitDef())
// Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
// physical registers if there is debug info associated with the terminator
// of our mbb. We want to include said debug info in our terminator
// sequence, so we return true in that case.
return MI->isDebugValue();
// We have left the terminator sequence if we are not doing one of the
// following:
//
// 1. Copying a vreg into a physical register.
// 2. Copying a vreg into a vreg.
// 3. Defining a register via an implicit def.
// OPI should always be a register definition...
MachineInstr::const_mop_iterator OPI = MI->operands_begin();
if (!OPI->isReg() || !OPI->isDef())
return false;
// Defining any register via an implicit def is always ok.
if (MI->isImplicitDef())
return true;
// Grab the copy source...
MachineInstr::const_mop_iterator OPI2 = OPI;
++OPI2;
assert(OPI2 != MI->operands_end()
&& "Should have a copy implying we should have 2 arguments.");
// Make sure that the copy dest is not a vreg when the copy source is a
// physical register.
if (!OPI2->isReg() ||
(!TargetRegisterInfo::isPhysicalRegister(OPI->getReg()) &&
TargetRegisterInfo::isPhysicalRegister(OPI2->getReg())))
return false;
return true;
}
/// Find the split point at which to splice the end of BB into its success stack
/// protector check machine basic block.
///
/// On many platforms, due to ABI constraints, terminators, even before register
/// allocation, use physical registers. This creates an issue for us since
/// physical registers at this point can not travel across basic
/// blocks. Luckily, selectiondag always moves physical registers into vregs
/// when they enter functions and moves them through a sequence of copies back
/// into the physical registers right before the terminator creating a
/// ``Terminator Sequence''. This function is searching for the beginning of the
/// terminator sequence so that we can ensure that we splice off not just the
/// terminator, but additionally the copies that move the vregs into the
/// physical registers.
static MachineBasicBlock::iterator
FindSplitPointForStackProtector(MachineBasicBlock *BB, DebugLoc DL) {
MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator();
//
if (SplitPoint == BB->begin())
return SplitPoint;
MachineBasicBlock::iterator Start = BB->begin();
MachineBasicBlock::iterator Previous = SplitPoint;
--Previous;
while (MIIsInTerminatorSequence(Previous)) {
SplitPoint = Previous;
if (Previous == Start)
break;
--Previous;
}
return SplitPoint;
}
void
SelectionDAGISel::FinishBasicBlock() {
DEBUG(dbgs() << "Total amount of phi nodes to update: "
<< FuncInfo->PHINodesToUpdate.size() << "\n";
for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
dbgs() << "Node " << i << " : ("
<< FuncInfo->PHINodesToUpdate[i].first
<< ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
// Next, now that we know what the last MBB the LLVM BB expanded is, update
// PHI nodes in successors.
for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
continue;
PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
}
// Handle stack protector.
if (SDB->SPDescriptor.shouldEmitStackProtector()) {
MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
// Find the split point to split the parent mbb. At the same time copy all
// physical registers used in the tail of parent mbb into virtual registers
// before the split point and back into physical registers after the split
// point. This prevents us needing to deal with Live-ins and many other
// register allocation issues caused by us splitting the parent mbb. The
// register allocator will clean up said virtual copies later on.
MachineBasicBlock::iterator SplitPoint =
FindSplitPointForStackProtector(ParentMBB, SDB->getCurDebugLoc());
// Splice the terminator of ParentMBB into SuccessMBB.
SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
SplitPoint,
ParentMBB->end());
// Add compare/jump on neq/jump to the parent BB.
FuncInfo->MBB = ParentMBB;
FuncInfo->InsertPt = ParentMBB->end();
SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// CodeGen Failure MBB if we have not codegened it yet.
MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
if (!FailureMBB->size()) {
FuncInfo->MBB = FailureMBB;
FuncInfo->InsertPt = FailureMBB->end();
SDB->visitSPDescriptorFailure(SDB->SPDescriptor);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
// Clear the Per-BB State.
SDB->SPDescriptor.resetPerBBState();
}
for (unsigned i = 0, e = SDB->BitTestCases.size(); i != e; ++i) {
// Lower header first, if it wasn't already lowered
if (!SDB->BitTestCases[i].Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->BitTestCases[i].Parent;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
uint32_t UnhandledWeight = 0;
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j)
UnhandledWeight += SDB->BitTestCases[i].Cases[j].ExtraWeight;
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
UnhandledWeight -= SDB->BitTestCases[i].Cases[j].ExtraWeight;
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
if (j+1 != ej)
SDB->visitBitTestCase(SDB->BitTestCases[i],
SDB->BitTestCases[i].Cases[j+1].ThisBB,
UnhandledWeight,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB);
else
SDB->visitBitTestCase(SDB->BitTestCases[i],
SDB->BitTestCases[i].Default,
UnhandledWeight,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
// Update PHI Nodes
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
pi != pe; ++pi) {
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
MachineBasicBlock *PHIBB = PHI->getParent();
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
// This is "default" BB. We have two jumps to it. From "header" BB and
// from last "case" BB.
if (PHIBB == SDB->BitTestCases[i].Default)
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
.addMBB(SDB->BitTestCases[i].Parent)
.addReg(FuncInfo->PHINodesToUpdate[pi].second)
.addMBB(SDB->BitTestCases[i].Cases.back().ThisBB);
// One of "cases" BB.
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size();
j != ej; ++j) {
MachineBasicBlock* cBB = SDB->BitTestCases[i].Cases[j].ThisBB;
if (cBB->isSuccessor(PHIBB))
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB);
}
}
}
SDB->BitTestCases.clear();
// If the JumpTable record is filled in, then we need to emit a jump table.
// Updating the PHI nodes is tricky in this case, since we need to determine
// whether the PHI is a successor of the range check MBB or the jump table MBB
for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
// Lower header first, if it wasn't already lowered
if (!SDB->JTCases[i].first.Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->JTCases[i].second.MBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitJumpTable(SDB->JTCases[i].second);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// Update PHI Nodes
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
pi != pe; ++pi) {
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
MachineBasicBlock *PHIBB = PHI->getParent();
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
// "default" BB. We can go there only from header BB.
if (PHIBB == SDB->JTCases[i].second.Default)
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
.addMBB(SDB->JTCases[i].first.HeaderBB);
// JT BB. Just iterate over successors here
if (FuncInfo->MBB->isSuccessor(PHIBB))
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
}
}
SDB->JTCases.clear();
// If we generated any switch lowering information, build and codegen any
// additional DAGs necessary.
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Determine the unique successors.
SmallVector<MachineBasicBlock *, 2> Succs;
Succs.push_back(SDB->SwitchCases[i].TrueBB);
if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
Succs.push_back(SDB->SwitchCases[i].FalseBB);
// Emit the code. Note that this could result in FuncInfo->MBB being split.
SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// Remember the last block, now that any splitting is done, for use in
// populating PHI nodes in successors.
MachineBasicBlock *ThisBB = FuncInfo->MBB;
// Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can
// occur multiple times in PHINodesToUpdate. We have to be very careful to
// handle them the right number of times.
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
FuncInfo->MBB = Succs[i];
FuncInfo->InsertPt = FuncInfo->MBB->end();
// FuncInfo->MBB may have been removed from the CFG if a branch was
// constant folded.
if (ThisBB->isSuccessor(FuncInfo->MBB)) {
for (MachineBasicBlock::iterator
MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
MachineInstrBuilder PHI(*MF, MBBI);
// This value for this PHI node is recorded in PHINodesToUpdate.
for (unsigned pn = 0; ; ++pn) {
assert(pn != FuncInfo->PHINodesToUpdate.size() &&
"Didn't find PHI entry!");
if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
break;
}
}
}
}
}
}
SDB->SwitchCases.clear();
}
/// Create the scheduler. If a specific scheduler was specified
/// via the SchedulerRegistry, use it, otherwise select the
/// one preferred by the target.
///
ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault();
if (!Ctor) {
Ctor = ISHeuristic;
RegisterScheduler::setDefault(Ctor);
}
return Ctor(this, OptLevel);
}
//===----------------------------------------------------------------------===//
// Helper functions used by the generated instruction selector.
//===----------------------------------------------------------------------===//
// Calls to these methods are generated by tblgen.
/// CheckAndMask - The isel is trying to match something like (and X, 255). If
/// the dag combiner simplified the 255, we still want to match. RHS is the
/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
/// specified in the .td file (e.g. 255).
bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
return true;
// If the actual AND mask is allowing unallowed bits, this doesn't match.
if (ActualMask.intersects(~DesiredMask))
return false;
// Otherwise, the DAG Combiner may have proven that the value coming in is
// either already zero or is not demanded. Check for known zero input bits.
APInt NeededMask = DesiredMask & ~ActualMask;
if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
return true;
// TODO: check to see if missing bits are just not demanded.
// Otherwise, this pattern doesn't match.
return false;
}
/// CheckOrMask - The isel is trying to match something like (or X, 255). If
/// the dag combiner simplified the 255, we still want to match. RHS is the
/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
/// specified in the .td file (e.g. 255).
bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
return true;
// If the actual AND mask is allowing unallowed bits, this doesn't match.
if (ActualMask.intersects(~DesiredMask))
return false;
// Otherwise, the DAG Combiner may have proven that the value coming in is
// either already zero or is not demanded. Check for known zero input bits.
APInt NeededMask = DesiredMask & ~ActualMask;
APInt KnownZero, KnownOne;
CurDAG->computeKnownBits(LHS, KnownZero, KnownOne);
// If all the missing bits in the or are already known to be set, match!
if ((NeededMask & KnownOne) == NeededMask)
return true;
// TODO: check to see if missing bits are just not demanded.
// Otherwise, this pattern doesn't match.
return false;
}
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
/// by tblgen. Others should not call it.
void SelectionDAGISel::
SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SDLoc DL) {
std::vector<SDValue> InOps;
std::swap(InOps, Ops);
Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
if (InOps[e-1].getValueType() == MVT::Glue)
--e; // Don't process a glue operand if it is here.
while (i != e) {
unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
if (!InlineAsm::isMemKind(Flags)) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin()+i,
InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
i += InlineAsm::getNumOperandRegisters(Flags) + 1;
} else {
assert(InlineAsm::getNumOperandRegisters(Flags) == 1 &&
"Memory operand with multiple values?");
unsigned TiedToOperand;
if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) {
// We need the constraint ID from the operand this is tied to.
unsigned CurOp = InlineAsm::Op_FirstOperand;
Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
for (; TiedToOperand; --TiedToOperand) {
CurOp += InlineAsm::getNumOperandRegisters(Flags)+1;
Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
}
}
// Otherwise, this is a memory operand. Ask the target to select it.
std::vector<SDValue> SelOps;
if (SelectInlineAsmMemoryOperand(InOps[i+1],
InlineAsm::getMemoryConstraintID(Flags),
SelOps))
report_fatal_error("Could not match memory address. Inline asm"
" failure!");
// Add this to the output node.
unsigned NewFlags =
InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
i += 2;
}
}
// Add the glue input back if present.
if (e != InOps.size())
Ops.push_back(InOps.back());
}
/// findGlueUse - Return use of MVT::Glue value produced by the specified
/// SDNode.
///
static SDNode *findGlueUse(SDNode *N) {
unsigned FlagResNo = N->getNumValues()-1;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
SDUse &Use = I.getUse();
if (Use.getResNo() == FlagResNo)
return Use.getUser();
}
return nullptr;
}
/// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
/// This function recursively traverses up the operand chain, ignoring
/// certain nodes.
static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
SDNode *Root, SmallPtrSetImpl<SDNode*> &Visited,
bool IgnoreChains) {
// The NodeID's are given uniques ID's where a node ID is guaranteed to be
// greater than all of its (recursive) operands. If we scan to a point where
// 'use' is smaller than the node we're scanning for, then we know we will
// never find it.
//
// The Use may be -1 (unassigned) if it is a newly allocated node. This can
// happen because we scan down to newly selected nodes in the case of glue
// uses.
if ((Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1))
return false;
// Don't revisit nodes if we already scanned it and didn't fail, we know we
// won't fail if we scan it again.
if (!Visited.insert(Use).second)
return false;
for (const SDValue &Op : Use->op_values()) {
// Ignore chain uses, they are validated by HandleMergeInputChains.
if (Op.getValueType() == MVT::Other && IgnoreChains)
continue;
SDNode *N = Op.getNode();
if (N == Def) {
if (Use == ImmedUse || Use == Root)
continue; // We are not looking for immediate use.
assert(N != Root);
return true;
}
// Traverse up the operand chain.
if (findNonImmUse(N, Def, ImmedUse, Root, Visited, IgnoreChains))
return true;
}
return false;
}
/// IsProfitableToFold - Returns true if it's profitable to fold the specific
/// operand node N of U during instruction selection that starts at Root.
bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
SDNode *Root) const {
if (OptLevel == CodeGenOpt::None) return false;
return N.hasOneUse();
}
/// IsLegalToFold - Returns true if the specific operand node N of
/// U can be folded during instruction selection that starts at Root.
bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
CodeGenOpt::Level OptLevel,
bool IgnoreChains) {
if (OptLevel == CodeGenOpt::None) return false;
// If Root use can somehow reach N through a path that that doesn't contain
// U then folding N would create a cycle. e.g. In the following
// diagram, Root can reach N through X. If N is folded into into Root, then
// X is both a predecessor and a successor of U.
//
// [N*] //
// ^ ^ //
// / \ //
// [U*] [X]? //
// ^ ^ //
// \ / //
// \ / //
// [Root*] //
//
// * indicates nodes to be folded together.
//
// If Root produces glue, then it gets (even more) interesting. Since it
// will be "glued" together with its glue use in the scheduler, we need to
// check if it might reach N.
//
// [N*] //
// ^ ^ //
// / \ //
// [U*] [X]? //
// ^ ^ //
// \ \ //
// \ | //
// [Root*] | //
// ^ | //
// f | //
// | / //
// [Y] / //
// ^ / //
// f / //
// | / //
// [GU] //
//
// If GU (glue use) indirectly reaches N (the load), and Root folds N
// (call it Fold), then X is a predecessor of GU and a successor of
// Fold. But since Fold and GU are glued together, this will create
// a cycle in the scheduling graph.
// If the node has glue, walk down the graph to the "lowest" node in the
// glueged set.
EVT VT = Root->getValueType(Root->getNumValues()-1);
while (VT == MVT::Glue) {
SDNode *GU = findGlueUse(Root);
if (!GU)
break;
Root = GU;
VT = Root->getValueType(Root->getNumValues()-1);
// If our query node has a glue result with a use, we've walked up it. If
// the user (which has already been selected) has a chain or indirectly uses
// the chain, our WalkChainUsers predicate will not consider it. Because of
// this, we cannot ignore chains in this predicate.
IgnoreChains = false;
}
SmallPtrSet<SDNode*, 16> Visited;
return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
}
SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
SDLoc DL(N);
std::vector<SDValue> Ops(N->op_begin(), N->op_end());
SelectInlineAsmMemoryOperands(Ops, DL);
const EVT VTs[] = {MVT::Other, MVT::Glue};
SDValue New = CurDAG->getNode(ISD::INLINEASM, DL, VTs, Ops);
New->setNodeId(-1);
return New.getNode();
}
SDNode
*SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
SDLoc dl(Op);
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
unsigned Reg =
TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0),
*CurDAG);
SDValue New = CurDAG->getCopyFromReg(
Op->getOperand(0), dl, Reg, Op->getValueType(0));
New->setNodeId(-1);
return New.getNode();
}
SDNode
*SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
SDLoc dl(Op);
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(),
Op->getOperand(2).getValueType(),
*CurDAG);
SDValue New = CurDAG->getCopyToReg(
Op->getOperand(0), dl, Reg, Op->getOperand(2));
New->setNodeId(-1);
return New.getNode();
}
SDNode *SelectionDAGISel::Select_UNDEF(SDNode *N) {
return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF,N->getValueType(0));
}
/// GetVBR - decode a vbr encoding whose top bit is set.
LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
assert(Val >= 128 && "Not a VBR");
Val &= 127; // Remove first vbr bit.
unsigned Shift = 7;
uint64_t NextBits;
do {
NextBits = MatcherTable[Idx++];
Val |= (NextBits&127) << Shift;
Shift += 7;
} while (NextBits & 128);
return Val;
}
/// UpdateChainsAndGlue - When a match is complete, this method updates uses of
/// interior glue and chain results to use the new glue and chain results.
void SelectionDAGISel::
UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
const SmallVectorImpl<SDNode*> &ChainNodesMatched,
SDValue InputGlue,
const SmallVectorImpl<SDNode*> &GlueResultNodesMatched,
bool isMorphNodeTo) {
SmallVector<SDNode*, 4> NowDeadNodes;
// Now that all the normal results are replaced, we replace the chain and
// glue results if present.
if (!ChainNodesMatched.empty()) {
assert(InputChain.getNode() &&
"Matched input chains but didn't produce a chain");
// Loop over all of the nodes we matched that produced a chain result.
// Replace all the chain results with the final chain we ended up with.
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
SDNode *ChainNode = ChainNodesMatched[i];
// If this node was already deleted, don't look at it.
if (ChainNode->getOpcode() == ISD::DELETED_NODE)
continue;
// Don't replace the results of the root node if we're doing a
// MorphNodeTo.
if (ChainNode == NodeToMatch && isMorphNodeTo)
continue;
SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
if (ChainVal.getValueType() == MVT::Glue)
ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain);
// If the node became dead and we haven't already seen it, delete it.
if (ChainNode->use_empty() &&
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
NowDeadNodes.push_back(ChainNode);
}
}
// If the result produces glue, update any glue results in the matched
// pattern with the glue result.
if (InputGlue.getNode()) {
// Handle any interior nodes explicitly marked.
for (unsigned i = 0, e = GlueResultNodesMatched.size(); i != e; ++i) {
SDNode *FRN = GlueResultNodesMatched[i];
// If this node was already deleted, don't look at it.
if (FRN->getOpcode() == ISD::DELETED_NODE)
continue;
assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Glue &&
"Doesn't have a glue result");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1),
InputGlue);
// If the node became dead and we haven't already seen it, delete it.
if (FRN->use_empty() &&
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), FRN))
NowDeadNodes.push_back(FRN);
}
}
if (!NowDeadNodes.empty())
CurDAG->RemoveDeadNodes(NowDeadNodes);
DEBUG(dbgs() << "ISEL: Match complete!\n");
}
enum ChainResult {
CR_Simple,
CR_InducesCycle,
CR_LeadsToInteriorNode
};
/// WalkChainUsers - Walk down the users of the specified chained node that is
/// part of the pattern we're matching, looking at all of the users we find.
/// This determines whether something is an interior node, whether we have a
/// non-pattern node in between two pattern nodes (which prevent folding because
/// it would induce a cycle) and whether we have a TokenFactor node sandwiched
/// between pattern nodes (in which case the TF becomes part of the pattern).
///
/// The walk we do here is guaranteed to be small because we quickly get down to
/// already selected nodes "below" us.
static ChainResult
WalkChainUsers(const SDNode *ChainedNode,
SmallVectorImpl<SDNode*> &ChainedNodesInPattern,
SmallVectorImpl<SDNode*> &InteriorChainedNodes) {
ChainResult Result = CR_Simple;
for (SDNode::use_iterator UI = ChainedNode->use_begin(),
E = ChainedNode->use_end(); UI != E; ++UI) {
// Make sure the use is of the chain, not some other value we produce.
if (UI.getUse().getValueType() != MVT::Other) continue;
SDNode *User = *UI;
if (User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
continue;
// If we see an already-selected machine node, then we've gone beyond the
// pattern that we're selecting down into the already selected chunk of the
// DAG.
unsigned UserOpcode = User->getOpcode();
if (User->isMachineOpcode() ||
UserOpcode == ISD::CopyToReg ||
UserOpcode == ISD::CopyFromReg ||
UserOpcode == ISD::INLINEASM ||
UserOpcode == ISD::EH_LABEL ||
UserOpcode == ISD::LIFETIME_START ||
UserOpcode == ISD::LIFETIME_END) {
// If their node ID got reset to -1 then they've already been selected.
// Treat them like a MachineOpcode.
if (User->getNodeId() == -1)
continue;
}
// If we have a TokenFactor, we handle it specially.
if (User->getOpcode() != ISD::TokenFactor) {
// If the node isn't a token factor and isn't part of our pattern, then it
// must be a random chained node in between two nodes we're selecting.
// This happens when we have something like:
// x = load ptr
// call
// y = x+4
// store y -> ptr
// Because we structurally match the load/store as a read/modify/write,
// but the call is chained between them. We cannot fold in this case
// because it would induce a cycle in the graph.
if (!std::count(ChainedNodesInPattern.begin(),
ChainedNodesInPattern.end(), User))
return CR_InducesCycle;
// Otherwise we found a node that is part of our pattern. For example in:
// x = load ptr
// y = x+4
// store y -> ptr
// This would happen when we're scanning down from the load and see the
// store as a user. Record that there is a use of ChainedNode that is
// part of the pattern and keep scanning uses.
Result = CR_LeadsToInteriorNode;
InteriorChainedNodes.push_back(User);
continue;
}
// If we found a TokenFactor, there are two cases to consider: first if the
// TokenFactor is just hanging "below" the pattern we're matching (i.e. no
// uses of the TF are in our pattern) we just want to ignore it. Second,
// the TokenFactor can be sandwiched in between two chained nodes, like so:
// [Load chain]
// ^
// |
// [Load]
// ^ ^
// | \ DAG's like cheese
// / \ do you?
// / |
// [TokenFactor] [Op]
// ^ ^
// | |
// \ /
// \ /
// [Store]
//
// In this case, the TokenFactor becomes part of our match and we rewrite it
// as a new TokenFactor.
//
// To distinguish these two cases, do a recursive walk down the uses.
switch (WalkChainUsers(User, ChainedNodesInPattern, InteriorChainedNodes)) {
case CR_Simple:
// If the uses of the TokenFactor are just already-selected nodes, ignore
// it, it is "below" our pattern.
continue;
case CR_InducesCycle:
// If the uses of the TokenFactor lead to nodes that are not part of our
// pattern that are not selected, folding would turn this into a cycle,
// bail out now.
return CR_InducesCycle;
case CR_LeadsToInteriorNode:
break; // Otherwise, keep processing.
}
// Okay, we know we're in the interesting interior case. The TokenFactor
// is now going to be considered part of the pattern so that we rewrite its
// uses (it may have uses that are not part of the pattern) with the
// ultimate chain result of the generated code. We will also add its chain
// inputs as inputs to the ultimate TokenFactor we create.
Result = CR_LeadsToInteriorNode;
ChainedNodesInPattern.push_back(User);
InteriorChainedNodes.push_back(User);
continue;
}
return Result;
}
/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
/// operation for when the pattern matched at least one node with a chains. The
/// input vector contains a list of all of the chained nodes that we match. We
/// must determine if this is a valid thing to cover (i.e. matching it won't
/// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
/// be used as the input node chain for the generated nodes.
static SDValue
HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
SelectionDAG *CurDAG) {
// Walk all of the chained nodes we've matched, recursively scanning down the
// users of the chain result. This adds any TokenFactor nodes that are caught
// in between chained nodes to the chained and interior nodes list.
SmallVector<SDNode*, 3> InteriorChainedNodes;
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
if (WalkChainUsers(ChainNodesMatched[i], ChainNodesMatched,
InteriorChainedNodes) == CR_InducesCycle)
return SDValue(); // Would induce a cycle.
}
// Okay, we have walked all the matched nodes and collected TokenFactor nodes
// that we are interested in. Form our input TokenFactor node.
SmallVector<SDValue, 3> InputChains;
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
// Add the input chain of this node to the InputChains list (which will be
// the operands of the generated TokenFactor) if it's not an interior node.
SDNode *N = ChainNodesMatched[i];
if (N->getOpcode() != ISD::TokenFactor) {
if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
continue;
// Otherwise, add the input chain.
SDValue InChain = ChainNodesMatched[i]->getOperand(0);
assert(InChain.getValueType() == MVT::Other && "Not a chain");
InputChains.push_back(InChain);
continue;
}
// If we have a token factor, we want to add all inputs of the token factor
// that are not part of the pattern we're matching.
for (const SDValue &Op : N->op_values()) {
if (!std::count(ChainNodesMatched.begin(), ChainNodesMatched.end(),
Op.getNode()))
InputChains.push_back(Op);
}
}
if (InputChains.size() == 1)
return InputChains[0];
return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
MVT::Other, InputChains);
}
/// MorphNode - Handle morphing a node in place for the selector.
SDNode *SelectionDAGISel::
MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
// It is possible we're using MorphNodeTo to replace a node with no
// normal results with one that has a normal result (or we could be
// adding a chain) and the input could have glue and chains as well.
// In this case we need to shift the operands down.
// FIXME: This is a horrible hack and broken in obscure cases, no worse
// than the old isel though.
int OldGlueResultNo = -1, OldChainResultNo = -1;
unsigned NTMNumResults = Node->getNumValues();
if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
OldGlueResultNo = NTMNumResults-1;
if (NTMNumResults != 1 &&
Node->getValueType(NTMNumResults-2) == MVT::Other)
OldChainResultNo = NTMNumResults-2;
} else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
OldChainResultNo = NTMNumResults-1;
// Call the underlying SelectionDAG routine to do the transmogrification. Note
// that this deletes operands of the old node that become dead.
SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
// MorphNodeTo can operate in two ways: if an existing node with the
// specified operands exists, it can just return it. Otherwise, it
// updates the node in place to have the requested operands.
if (Res == Node) {
// If we updated the node in place, reset the node ID. To the isel,
// this should be just like a newly allocated machine node.
Res->setNodeId(-1);
}
unsigned ResNumResults = Res->getNumValues();
// Move the glue if needed.
if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
(unsigned)OldGlueResultNo != ResNumResults-1)
CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
SDValue(Res, ResNumResults-1));
if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
--ResNumResults;
// Move the chain reference if needed.
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
(unsigned)OldChainResultNo != ResNumResults-1)
CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
SDValue(Res, ResNumResults-1));
// Otherwise, no replacement happened because the node already exists. Replace
// Uses of the old node with the new one.
if (Res != Node)
CurDAG->ReplaceAllUsesWith(Node, Res);
return Res;
}
/// CheckSame - Implements OP_CheckSame.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N,
const SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes) {
// Accept if it is exactly the same as a previously recorded node.
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
return N == RecordedNodes[RecNo].first;
}
/// CheckChildSame - Implements OP_CheckChildXSame.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N,
const SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes,
unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
RecordedNodes);
}
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
const SelectionDAGISel &SDISel) {
return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
}
/// CheckNodePredicate - Implements OP_CheckNodePredicate.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
const SelectionDAGISel &SDISel, SDNode *N) {
return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDNode *N) {
uint16_t Opc = MatcherTable[MatcherIndex++];
Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
return N->getOpcode() == Opc;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (N.getValueType() == VT) return true;
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering *TLI, const DataLayout &DL,
unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
return cast<CondCodeSDNode>(N)->get() ==
(ISD::CondCode)MatcherTable[MatcherIndex++];
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (cast<VTSDNode>(N)->getVT() == VT)
return true;
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
return C && C->getSExtValue() == Val;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
if (N->getOpcode() != ISD::AND) return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
if (N->getOpcode() != ISD::OR) return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
}
/// IsPredicateKnownToFail - If we know how and can do so without pushing a
/// scope, evaluate the current node. If the current predicate is known to
/// fail, set Result=true and return anything. If the current predicate is
/// known to pass, set Result=false and return the MatcherIndex to continue
/// with. If the current predicate is unknown, set Result=false and return the
/// MatcherIndex to continue with.
static unsigned IsPredicateKnownToFail(const unsigned char *Table,
unsigned Index, SDValue N,
bool &Result,
const SelectionDAGISel &SDISel,
SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes) {
switch (Table[Index++]) {
default:
Result = false;
return Index-1; // Could not evaluate this predicate.
case SelectionDAGISel::OPC_CheckSame:
Result = !::CheckSame(Table, Index, N, RecordedNodes);
return Index;
case SelectionDAGISel::OPC_CheckChild0Same:
case SelectionDAGISel::OPC_CheckChild1Same:
case SelectionDAGISel::OPC_CheckChild2Same:
case SelectionDAGISel::OPC_CheckChild3Same:
Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
return Index;
case SelectionDAGISel::OPC_CheckPatternPredicate:
Result = !::CheckPatternPredicate(Table, Index, SDISel);
return Index;
case SelectionDAGISel::OPC_CheckPredicate:
Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
return Index;
case SelectionDAGISel::OPC_CheckOpcode:
Result = !::CheckOpcode(Table, Index, N.getNode());
return Index;
case SelectionDAGISel::OPC_CheckType:
Result = !::CheckType(Table, Index, N, SDISel.TLI,
SDISel.CurDAG->getDataLayout());
return Index;
case SelectionDAGISel::OPC_CheckChild0Type:
case SelectionDAGISel::OPC_CheckChild1Type:
case SelectionDAGISel::OPC_CheckChild2Type:
case SelectionDAGISel::OPC_CheckChild3Type:
case SelectionDAGISel::OPC_CheckChild4Type:
case SelectionDAGISel::OPC_CheckChild5Type:
case SelectionDAGISel::OPC_CheckChild6Type:
case SelectionDAGISel::OPC_CheckChild7Type:
Result = !::CheckChildType(
Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
return Index;
case SelectionDAGISel::OPC_CheckCondCode:
Result = !::CheckCondCode(Table, Index, N);
return Index;
case SelectionDAGISel::OPC_CheckValueType:
Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
SDISel.CurDAG->getDataLayout());
return Index;
case SelectionDAGISel::OPC_CheckInteger:
Result = !::CheckInteger(Table, Index, N);
return Index;
case SelectionDAGISel::OPC_CheckChild0Integer:
case SelectionDAGISel::OPC_CheckChild1Integer:
case SelectionDAGISel::OPC_CheckChild2Integer:
case SelectionDAGISel::OPC_CheckChild3Integer:
case SelectionDAGISel::OPC_CheckChild4Integer:
Result = !::CheckChildInteger(Table, Index, N,
Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
return Index;
case SelectionDAGISel::OPC_CheckAndImm:
Result = !::CheckAndImm(Table, Index, N, SDISel);
return Index;
case SelectionDAGISel::OPC_CheckOrImm:
Result = !::CheckOrImm(Table, Index, N, SDISel);
return Index;
}
}
namespace {
struct MatchScope {
/// FailIndex - If this match fails, this is the index to continue with.
unsigned FailIndex;
/// NodeStack - The node stack when the scope was formed.
SmallVector<SDValue, 4> NodeStack;
/// NumRecordedNodes - The number of recorded nodes when the scope was formed.
unsigned NumRecordedNodes;
/// NumMatchedMemRefs - The number of matched memref entries.
unsigned NumMatchedMemRefs;
/// InputChain/InputGlue - The current chain/glue
SDValue InputChain, InputGlue;
/// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
bool HasChainNodesMatched, HasGlueResultNodesMatched;
};
/// \\brief A DAG update listener to keep the matching state
/// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
/// change the DAG while matching. X86 addressing mode matcher is an example
/// for this.
class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
{
SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes;
SmallVectorImpl<MatchScope> &MatchScopes;
public:
MatchStateUpdater(SelectionDAG &DAG,
SmallVectorImpl<std::pair<SDValue, SDNode*> > &RN,
SmallVectorImpl<MatchScope> &MS) :
SelectionDAG::DAGUpdateListener(DAG),
RecordedNodes(RN), MatchScopes(MS) { }
void NodeDeleted(SDNode *N, SDNode *E) override {
// Some early-returns here to avoid the search if we deleted the node or
// if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
// do, so it's unnecessary to update matching state at that point).
// Neither of these can occur currently because we only install this
// update listener during matching a complex patterns.
if (!E || E->isMachineOpcode())
return;
// Performing linear search here does not matter because we almost never
// run this code. You'd have to have a CSE during complex pattern
// matching.
for (auto &I : RecordedNodes)
if (I.first.getNode() == N)
I.first.setNode(E);
for (auto &I : MatchScopes)
for (auto &J : I.NodeStack)
if (J.getNode() == N)
J.setNode(E);
}
};
}
SDNode *SelectionDAGISel::
SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned TableSize) {
// FIXME: Should these even be selected? Handle these cases in the caller?
switch (NodeToMatch->getOpcode()) {
default:
break;
case ISD::EntryToken: // These nodes remain the same.
case ISD::BasicBlock:
case ISD::Register:
case ISD::RegisterMask:
case ISD::HANDLENODE:
case ISD::MDNODE_SDNODE:
case ISD::TargetConstant:
case ISD::TargetConstantFP:
case ISD::TargetConstantPool:
case ISD::TargetFrameIndex:
case ISD::TargetExternalSymbol:
case ISD::MCSymbol:
case ISD::TargetBlockAddress:
case ISD::TargetJumpTable:
case ISD::TargetGlobalTLSAddress:
case ISD::TargetGlobalAddress:
case ISD::TokenFactor:
case ISD::CopyFromReg:
case ISD::CopyToReg:
case ISD::EH_LABEL:
case ISD::LIFETIME_START:
case ISD::LIFETIME_END:
NodeToMatch->setNodeId(-1); // Mark selected.
return nullptr;
case ISD::AssertSext:
case ISD::AssertZext:
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0),
NodeToMatch->getOperand(0));
return nullptr;
case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch);
case ISD::READ_REGISTER: return Select_READ_REGISTER(NodeToMatch);
case ISD::WRITE_REGISTER: return Select_WRITE_REGISTER(NodeToMatch);
case ISD::UNDEF: return Select_UNDEF(NodeToMatch);
}
assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
// Set up the node stack with NodeToMatch as the only node on the stack.
SmallVector<SDValue, 8> NodeStack;
SDValue N = SDValue(NodeToMatch, 0);
NodeStack.push_back(N);
// MatchScopes - Scopes used when matching, if a match failure happens, this
// indicates where to continue checking.
SmallVector<MatchScope, 8> MatchScopes;
// RecordedNodes - This is the set of nodes that have been recorded by the
// state machine. The second value is the parent of the node, or null if the
// root is recorded.
SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
// MatchedMemRefs - This is the set of MemRef's we've seen in the input
// pattern.
SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
// These are the current input chain and glue for use when generating nodes.
// Various Emit operations change these. For example, emitting a copytoreg
// uses and updates these.
SDValue InputChain, InputGlue;
// ChainNodesMatched - If a pattern matches nodes that have input/output
// chains, the OPC_EmitMergeInputChains operation is emitted which indicates
// which ones they are. The result is captured into this list so that we can
// update the chain results when the pattern is complete.
SmallVector<SDNode*, 3> ChainNodesMatched;
SmallVector<SDNode*, 3> GlueResultNodesMatched;
DEBUG(dbgs() << "ISEL: Starting pattern match on root node: ";
NodeToMatch->dump(CurDAG);
dbgs() << '\n');
// Determine where to start the interpreter. Normally we start at opcode #0,
// but if the state machine starts with an OPC_SwitchOpcode, then we
// accelerate the first lookup (which is guaranteed to be hot) with the
// OpcodeOffset table.
unsigned MatcherIndex = 0;
if (!OpcodeOffset.empty()) {
// Already computed the OpcodeOffset table, just index into it.
if (N.getOpcode() < OpcodeOffset.size())
MatcherIndex = OpcodeOffset[N.getOpcode()];
DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
} else if (MatcherTable[0] == OPC_SwitchOpcode) {
// Otherwise, the table isn't computed, but the state machine does start
// with an OPC_SwitchOpcode instruction. Populate the table now, since this
// is the first time we're selecting an instruction.
unsigned Idx = 1;
while (1) {
// Get the size of this case.
unsigned CaseSize = MatcherTable[Idx++];
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
if (CaseSize == 0) break;
// Get the opcode, add the index to the table.
uint16_t Opc = MatcherTable[Idx++];
Opc |= (unsigned short)MatcherTable[Idx++] << 8;
if (Opc >= OpcodeOffset.size())
OpcodeOffset.resize((Opc+1)*2);
OpcodeOffset[Opc] = Idx;
Idx += CaseSize;
}
// Okay, do the lookup for the first opcode.
if (N.getOpcode() < OpcodeOffset.size())
MatcherIndex = OpcodeOffset[N.getOpcode()];
}
while (1) {
assert(MatcherIndex < TableSize && "Invalid index");
#ifndef NDEBUG
unsigned CurrentOpcodeIndex = MatcherIndex;
#endif
BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
switch (Opcode) {
case OPC_Scope: {
// Okay, the semantics of this operation are that we should push a scope
// then evaluate the first child. However, pushing a scope only to have
// the first check fail (which then pops it) is inefficient. If we can
// determine immediately that the first check (or first several) will
// immediately fail, don't even bother pushing a scope for them.
unsigned FailIndex;
while (1) {
unsigned NumToSkip = MatcherTable[MatcherIndex++];
if (NumToSkip & 128)
NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
// Found the end of the scope with no match.
if (NumToSkip == 0) {
FailIndex = 0;
break;
}
FailIndex = MatcherIndex+NumToSkip;
unsigned MatcherIndexOfPredicate = MatcherIndex;
(void)MatcherIndexOfPredicate; // silence warning.
// If we can't evaluate this predicate without pushing a scope (e.g. if
// it is a 'MoveParent') or if the predicate succeeds on this node, we
// push the scope and evaluate the full predicate chain.
bool Result;
MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
Result, *this, RecordedNodes);
if (!Result)
break;
DEBUG(dbgs() << " Skipped scope entry (due to false predicate) at "
<< "index " << MatcherIndexOfPredicate
<< ", continuing at " << FailIndex << "\n");
++NumDAGIselRetries;
// Otherwise, we know that this case of the Scope is guaranteed to fail,
// move to the next case.
MatcherIndex = FailIndex;
}
// If the whole scope failed to match, bail.
if (FailIndex == 0) break;
// Push a MatchScope which indicates where to go if the first child fails
// to match.
MatchScope NewEntry;
NewEntry.FailIndex = FailIndex;
NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
NewEntry.NumRecordedNodes = RecordedNodes.size();
NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
NewEntry.InputChain = InputChain;
NewEntry.InputGlue = InputGlue;
NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
NewEntry.HasGlueResultNodesMatched = !GlueResultNodesMatched.empty();
MatchScopes.push_back(NewEntry);
continue;
}
case OPC_RecordNode: {
// Remember this node, it may end up being an operand in the pattern.
SDNode *Parent = nullptr;
if (NodeStack.size() > 1)
Parent = NodeStack[NodeStack.size()-2].getNode();
RecordedNodes.push_back(std::make_pair(N, Parent));
continue;
}
case OPC_RecordChild0: case OPC_RecordChild1:
case OPC_RecordChild2: case OPC_RecordChild3:
case OPC_RecordChild4: case OPC_RecordChild5:
case OPC_RecordChild6: case OPC_RecordChild7: {
unsigned ChildNo = Opcode-OPC_RecordChild0;
if (ChildNo >= N.getNumOperands())
break; // Match fails if out of range child #.
RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
N.getNode()));
continue;
}
case OPC_RecordMemRef:
MatchedMemRefs.push_back(cast<MemSDNode>(N)->getMemOperand());
continue;
case OPC_CaptureGlueInput:
// If the current node has an input glue, capture it in InputGlue.
if (N->getNumOperands() != 0 &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
InputGlue = N->getOperand(N->getNumOperands()-1);
continue;
case OPC_MoveChild: {
unsigned ChildNo = MatcherTable[MatcherIndex++];
if (ChildNo >= N.getNumOperands())
break; // Match fails if out of range child #.
N = N.getOperand(ChildNo);
NodeStack.push_back(N);
continue;
}
case OPC_MoveParent:
// Pop the current node off the NodeStack.
NodeStack.pop_back();
assert(!NodeStack.empty() && "Node stack imbalance!");
N = NodeStack.back();
continue;
case OPC_CheckSame:
if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
continue;
case OPC_CheckChild0Same: case OPC_CheckChild1Same:
case OPC_CheckChild2Same: case OPC_CheckChild3Same:
if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
Opcode-OPC_CheckChild0Same))
break;
continue;
case OPC_CheckPatternPredicate:
if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
continue;
case OPC_CheckPredicate:
if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
N.getNode()))
break;
continue;
case OPC_CheckComplexPat: {
unsigned CPNum = MatcherTable[MatcherIndex++];
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
// If target can modify DAG during matching, keep the matching state
// consistent.
std::unique_ptr<MatchStateUpdater> MSU;
if (ComplexPatternFuncMutatesDAG())
MSU.reset(new MatchStateUpdater(*CurDAG, RecordedNodes,
MatchScopes));
if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
RecordedNodes[RecNo].first, CPNum,
RecordedNodes))
break;
continue;
}
case OPC_CheckOpcode:
if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
continue;
case OPC_CheckType:
if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
CurDAG->getDataLayout()))
break;
continue;
case OPC_SwitchOpcode: {
unsigned CurNodeOpcode = N.getOpcode();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
unsigned CaseSize;
while (1) {
// Get the size of this case.
CaseSize = MatcherTable[MatcherIndex++];
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
uint16_t Opc = MatcherTable[MatcherIndex++];
Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
// If the opcode matches, then we will execute this case.
if (CurNodeOpcode == Opc)
break;
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
// If no cases matched, bail out.
if (CaseSize == 0) break;
// Otherwise, execute the case we found.
DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart
<< " to " << MatcherIndex << "\n");
continue;
}
case OPC_SwitchType: {
MVT CurNodeVT = N.getSimpleValueType();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
unsigned CaseSize;
while (1) {
// Get the size of this case.
CaseSize = MatcherTable[MatcherIndex++];
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (CaseVT == MVT::iPTR)
CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
// If the VT matches, then we will execute this case.
if (CurNodeVT == CaseVT)
break;
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
// If no cases matched, bail out.
if (CaseSize == 0) break;
// Otherwise, execute the case we found.
DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
<< "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
continue;
}
case OPC_CheckChild0Type: case OPC_CheckChild1Type:
case OPC_CheckChild2Type: case OPC_CheckChild3Type:
case OPC_CheckChild4Type: case OPC_CheckChild5Type:
case OPC_CheckChild6Type: case OPC_CheckChild7Type:
if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
CurDAG->getDataLayout(),
Opcode - OPC_CheckChild0Type))
break;
continue;
case OPC_CheckCondCode:
if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
continue;
case OPC_CheckValueType:
if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
CurDAG->getDataLayout()))
break;
continue;
case OPC_CheckInteger:
if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
continue;
case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
case OPC_CheckChild4Integer:
if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
Opcode-OPC_CheckChild0Integer)) break;
continue;
case OPC_CheckAndImm:
if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
case OPC_CheckOrImm:
if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
case OPC_CheckFoldableChainNode: {
assert(NodeStack.size() != 1 && "No parent node");
// Verify that all intermediate nodes between the root and this one have
// a single use.
bool HasMultipleUses = false;
for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
if (!NodeStack[i].hasOneUse()) {
HasMultipleUses = true;
break;
}
if (HasMultipleUses) break;
// Check to see that the target thinks this is profitable to fold and that
// we can fold it without inducing cycles in the graph.
if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
NodeToMatch) ||
!IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
NodeToMatch, OptLevel,
true/*We validate our own chains*/))
break;
continue;
}
case OPC_EmitInteger: {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch),
VT), nullptr));
continue;
}
case OPC_EmitRegister: {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
unsigned RegNo = MatcherTable[MatcherIndex++];
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
CurDAG->getRegister(RegNo, VT), nullptr));
continue;
}
case OPC_EmitRegister2: {
// For targets w/ more than 256 register names, the register enum
// values are stored in two bytes in the matcher table (just like
// opcodes).
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
unsigned RegNo = MatcherTable[MatcherIndex++];
RegNo |= MatcherTable[MatcherIndex++] << 8;
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
CurDAG->getRegister(RegNo, VT), nullptr));
continue;
}
case OPC_EmitConvertToTarget: {
// Convert from IMM/FPIMM to target version.
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
SDValue Imm = RecordedNodes[RecNo].first;
if (Imm->getOpcode() == ISD::Constant) {
const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
Imm = CurDAG->getConstant(*Val, SDLoc(NodeToMatch), Imm.getValueType(),
true);
} else if (Imm->getOpcode() == ISD::ConstantFP) {
const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
Imm = CurDAG->getConstantFP(*Val, SDLoc(NodeToMatch),
Imm.getValueType(), true);
}
RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
continue;
}
case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1
// These are space-optimized forms of OPC_EmitMergeInputChains.
assert(!InputChain.getNode() &&
"EmitMergeInputChains should be the first chain producing node");
assert(ChainNodesMatched.empty() &&
"Should only have one EmitMergeInputChains per match");
// Read all of the chained nodes.
unsigned RecNo = Opcode == OPC_EmitMergeInputChains1_1;
assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
!RecordedNodes[RecNo].first.hasOneUse()) {
ChainNodesMatched.clear();
break;
}
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
if (!InputChain.getNode())
break; // Failed to merge.
continue;
}
case OPC_EmitMergeInputChains: {
assert(!InputChain.getNode() &&
"EmitMergeInputChains should be the first chain producing node");
// This node gets a list of nodes we matched in the input that have
// chains. We want to token factor all of the input chains to these nodes
// together. However, if any of the input chains is actually one of the
// nodes matched in this pattern, then we have an intra-match reference.
// Ignore these because the newly token factored chain should not refer to
// the old nodes.
unsigned NumChains = MatcherTable[MatcherIndex++];
assert(NumChains != 0 && "Can't TF zero chains");
assert(ChainNodesMatched.empty() &&
"Should only have one EmitMergeInputChains per match");
// Read all of the chained nodes.
for (unsigned i = 0; i != NumChains; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
!RecordedNodes[RecNo].first.hasOneUse()) {
ChainNodesMatched.clear();
break;
}
}
// If the inner loop broke out, the match fails.
if (ChainNodesMatched.empty())
break;
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
if (!InputChain.getNode())
break; // Failed to merge.
continue;
}
case OPC_EmitCopyToReg: {
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
unsigned DestPhysReg = MatcherTable[MatcherIndex++];
if (!InputChain.getNode())
InputChain = CurDAG->getEntryNode();
InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
DestPhysReg, RecordedNodes[RecNo].first,
InputGlue);
InputGlue = InputChain.getValue(1);
continue;
}
case OPC_EmitNodeXForm: {
unsigned XFormNo = MatcherTable[MatcherIndex++];
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
continue;
}
case OPC_EmitNode:
case OPC_MorphNodeTo: {
uint16_t TargetOpc = MatcherTable[MatcherIndex++];
TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
// Get the result VT list.
unsigned NumVTs = MatcherTable[MatcherIndex++];
SmallVector<EVT, 4> VTs;
for (unsigned i = 0; i != NumVTs; ++i) {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (VT == MVT::iPTR)
VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
VTs.push_back(VT);
}
if (EmitNodeInfo & OPFL_Chain)
VTs.push_back(MVT::Other);
if (EmitNodeInfo & OPFL_GlueOutput)
VTs.push_back(MVT::Glue);
// This is hot code, so optimize the two most common cases of 1 and 2
// results.
SDVTList VTList;
if (VTs.size() == 1)
VTList = CurDAG->getVTList(VTs[0]);
else if (VTs.size() == 2)
VTList = CurDAG->getVTList(VTs[0], VTs[1]);
else
VTList = CurDAG->getVTList(VTs);
// Get the operand list.
unsigned NumOps = MatcherTable[MatcherIndex++];
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i != NumOps; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
if (RecNo & 128)
RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
Ops.push_back(RecordedNodes[RecNo].first);
}
// If there are variadic operands to add, handle them now.
if (EmitNodeInfo & OPFL_VariadicInfo) {
// Determine the start index to copy from.
unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
"Invalid variadic node");
// Copy all of the variadic operands, not including a potential glue
// input.
for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
i != e; ++i) {
SDValue V = NodeToMatch->getOperand(i);
if (V.getValueType() == MVT::Glue) break;
Ops.push_back(V);
}
}
// If this has chain/glue inputs, add them.
if (EmitNodeInfo & OPFL_Chain)
Ops.push_back(InputChain);
if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
Ops.push_back(InputGlue);
// Create the node.
SDNode *Res = nullptr;
if (Opcode != OPC_MorphNodeTo) {
// If this is a normal EmitNode command, just create the new node and
// add the results to the RecordedNodes list.
Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
VTList, Ops);
// Add all the non-glue/non-chain results to the RecordedNodes list.
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
nullptr));
}
} else if (NodeToMatch->getOpcode() != ISD::DELETED_NODE) {
Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops, EmitNodeInfo);
} else {
// NodeToMatch was eliminated by CSE when the target changed the DAG.
// We will visit the equivalent node later.
DEBUG(dbgs() << "Node was eliminated by CSE\n");
return nullptr;
}
// If the node had chain/glue results, update our notion of the current
// chain and glue.
if (EmitNodeInfo & OPFL_GlueOutput) {
InputGlue = SDValue(Res, VTs.size()-1);
if (EmitNodeInfo & OPFL_Chain)
InputChain = SDValue(Res, VTs.size()-2);
} else if (EmitNodeInfo & OPFL_Chain)
InputChain = SDValue(Res, VTs.size()-1);
// If the OPFL_MemRefs glue is set on this node, slap all of the
// accumulated memrefs onto it.
//
// FIXME: This is vastly incorrect for patterns with multiple outputs
// instructions that access memory and for ComplexPatterns that match
// loads.
if (EmitNodeInfo & OPFL_MemRefs) {
// Only attach load or store memory operands if the generated
// instruction may load or store.
const MCInstrDesc &MCID = TII->get(TargetOpc);
bool mayLoad = MCID.mayLoad();
bool mayStore = MCID.mayStore();
unsigned NumMemRefs = 0;
for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
if ((*I)->isLoad()) {
if (mayLoad)
++NumMemRefs;
} else if ((*I)->isStore()) {
if (mayStore)
++NumMemRefs;
} else {
++NumMemRefs;
}
}
MachineSDNode::mmo_iterator MemRefs =
MF->allocateMemRefsArray(NumMemRefs);
MachineSDNode::mmo_iterator MemRefsPos = MemRefs;
for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
if ((*I)->isLoad()) {
if (mayLoad)
*MemRefsPos++ = *I;
} else if ((*I)->isStore()) {
if (mayStore)
*MemRefsPos++ = *I;
} else {
*MemRefsPos++ = *I;
}
}
cast<MachineSDNode>(Res)
->setMemRefs(MemRefs, MemRefs + NumMemRefs);
}
DEBUG(dbgs() << " "
<< (Opcode == OPC_MorphNodeTo ? "Morphed" : "Created")
<< " node: "; Res->dump(CurDAG); dbgs() << "\n");
// If this was a MorphNodeTo then we're completely done!
if (Opcode == OPC_MorphNodeTo) {
// Update chain and glue uses.
UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched,
InputGlue, GlueResultNodesMatched, true);
return Res;
}
continue;
}
case OPC_MarkGlueResults: {
unsigned NumNodes = MatcherTable[MatcherIndex++];
// Read and remember all the glue-result nodes.
for (unsigned i = 0; i != NumNodes; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
if (RecNo & 128)
RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
assert(RecNo < RecordedNodes.size() && "Invalid MarkGlueResults");
GlueResultNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
}
continue;
}
case OPC_CompleteMatch: {
// The match has been completed, and any new nodes (if any) have been
// created. Patch up references to the matched dag to use the newly
// created nodes.
unsigned NumResults = MatcherTable[MatcherIndex++];
for (unsigned i = 0; i != NumResults; ++i) {
unsigned ResSlot = MatcherTable[MatcherIndex++];
if (ResSlot & 128)
ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
SDValue Res = RecordedNodes[ResSlot].first;
assert(i < NodeToMatch->getNumValues() &&
NodeToMatch->getValueType(i) != MVT::Other &&
NodeToMatch->getValueType(i) != MVT::Glue &&
"Invalid number of results to complete!");
assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
NodeToMatch->getValueType(i) == MVT::iPTR ||
Res.getValueType() == MVT::iPTR ||
NodeToMatch->getValueType(i).getSizeInBits() ==
Res.getValueType().getSizeInBits()) &&
"invalid replacement");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
}
// If the root node defines glue, add it to the glue nodes to update list.
if (NodeToMatch->getValueType(NodeToMatch->getNumValues()-1) == MVT::Glue)
GlueResultNodesMatched.push_back(NodeToMatch);
// Update chain and glue uses.
UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched,
InputGlue, GlueResultNodesMatched, false);
assert(NodeToMatch->use_empty() &&
"Didn't replace all uses of the node?");
// FIXME: We just return here, which interacts correctly with SelectRoot
// above. We should fix this to not return an SDNode* anymore.
return nullptr;
}
}
// If the code reached this point, then the match failed. See if there is
// another child to try in the current 'Scope', otherwise pop it until we
// find a case to check.
DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
++NumDAGIselRetries;
while (1) {
if (MatchScopes.empty()) {
CannotYetSelect(NodeToMatch);
return nullptr;
}
// Restore the interpreter state back to the point where the scope was
// formed.
MatchScope &LastScope = MatchScopes.back();
RecordedNodes.resize(LastScope.NumRecordedNodes);
NodeStack.clear();
NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
N = NodeStack.back();
if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
MatcherIndex = LastScope.FailIndex;
DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
InputChain = LastScope.InputChain;
InputGlue = LastScope.InputGlue;
if (!LastScope.HasChainNodesMatched)
ChainNodesMatched.clear();
if (!LastScope.HasGlueResultNodesMatched)
GlueResultNodesMatched.clear();
// Check to see what the offset is at the new MatcherIndex. If it is zero
// we have reached the end of this scope, otherwise we have another child
// in the current scope to try.
unsigned NumToSkip = MatcherTable[MatcherIndex++];
if (NumToSkip & 128)
NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
// If we have another child in this scope to match, update FailIndex and
// try it.
if (NumToSkip != 0) {
LastScope.FailIndex = MatcherIndex+NumToSkip;
break;
}
// End of this scope, pop it and try the next child in the containing
// scope.
MatchScopes.pop_back();
}
}
}
void SelectionDAGISel::CannotYetSelect(SDNode *N) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Cannot select: ";
if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_VOID) {
N->printrFull(Msg, CurDAG);
Msg << "\nIn function: " << MF->getName();
} else {
bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
unsigned iid =
cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
if (iid < Intrinsic::num_intrinsics)
Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid);
else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
Msg << "target intrinsic %" << TII->getName(iid);
else
Msg << "unknown intrinsic #" << iid;
}
report_fatal_error(Msg.str());
}
char SelectionDAGISel::ID = 0;
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp | //===-- SelectionDAGDumper.cpp - Implement SelectionDAG::dump() -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the SelectionDAG::dump method and friends.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAG.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
std::string SDNode::getOperationName(const SelectionDAG *G) const {
switch (getOpcode()) {
default:
if (getOpcode() < ISD::BUILTIN_OP_END)
return "<<Unknown DAG Node>>";
if (isMachineOpcode()) {
if (G)
if (const TargetInstrInfo *TII = G->getSubtarget().getInstrInfo())
if (getMachineOpcode() < TII->getNumOpcodes())
return TII->getName(getMachineOpcode());
return "<<Unknown Machine Node #" + utostr(getOpcode()) + ">>";
}
if (G) {
const TargetLowering &TLI = G->getTargetLoweringInfo();
const char *Name = TLI.getTargetNodeName(getOpcode());
if (Name) return Name;
return "<<Unknown Target Node #" + utostr(getOpcode()) + ">>";
}
return "<<Unknown Node #" + utostr(getOpcode()) + ">>";
#ifndef NDEBUG
case ISD::DELETED_NODE: return "<<Deleted Node!>>";
#endif
case ISD::PREFETCH: return "Prefetch";
case ISD::ATOMIC_FENCE: return "AtomicFence";
case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: return "AtomicCmpSwapWithSuccess";
case ISD::ATOMIC_SWAP: return "AtomicSwap";
case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd";
case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub";
case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd";
case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr";
case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor";
case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand";
case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin";
case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
case ISD::ATOMIC_LOAD: return "AtomicLoad";
case ISD::ATOMIC_STORE: return "AtomicStore";
case ISD::PCMARKER: return "PCMarker";
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
case ISD::SRCVALUE: return "SrcValue";
case ISD::MDNODE_SDNODE: return "MDNode";
case ISD::EntryToken: return "EntryToken";
case ISD::TokenFactor: return "TokenFactor";
case ISD::AssertSext: return "AssertSext";
case ISD::AssertZext: return "AssertZext";
case ISD::BasicBlock: return "BasicBlock";
case ISD::VALUETYPE: return "ValueType";
case ISD::Register: return "Register";
case ISD::RegisterMask: return "RegisterMask";
case ISD::Constant:
if (cast<ConstantSDNode>(this)->isOpaque())
return "OpaqueConstant";
return "Constant";
case ISD::ConstantFP: return "ConstantFP";
case ISD::GlobalAddress: return "GlobalAddress";
case ISD::GlobalTLSAddress: return "GlobalTLSAddress";
case ISD::FrameIndex: return "FrameIndex";
case ISD::JumpTable: return "JumpTable";
case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
case ISD::RETURNADDR: return "RETURNADDR";
case ISD::FRAMEADDR: return "FRAMEADDR";
case ISD::LOCAL_RECOVER: return "LOCAL_RECOVER";
case ISD::READ_REGISTER: return "READ_REGISTER";
case ISD::WRITE_REGISTER: return "WRITE_REGISTER";
case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
case ISD::EH_RETURN: return "EH_RETURN";
case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
case ISD::ConstantPool: return "ConstantPool";
case ISD::TargetIndex: return "TargetIndex";
case ISD::ExternalSymbol: return "ExternalSymbol";
case ISD::BlockAddress: return "BlockAddress";
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
if (IID < Intrinsic::num_intrinsics)
return Intrinsic::getName((Intrinsic::ID)IID);
else if (const TargetIntrinsicInfo *TII = G->getTarget().getIntrinsicInfo())
return TII->getName(IID);
llvm_unreachable("Invalid intrinsic ID");
}
case ISD::BUILD_VECTOR: return "BUILD_VECTOR";
case ISD::TargetConstant:
if (cast<ConstantSDNode>(this)->isOpaque())
return "OpaqueTargetConstant";
return "TargetConstant";
case ISD::TargetConstantFP: return "TargetConstantFP";
case ISD::TargetGlobalAddress: return "TargetGlobalAddress";
case ISD::TargetGlobalTLSAddress: return "TargetGlobalTLSAddress";
case ISD::TargetFrameIndex: return "TargetFrameIndex";
case ISD::TargetJumpTable: return "TargetJumpTable";
case ISD::TargetConstantPool: return "TargetConstantPool";
case ISD::TargetExternalSymbol: return "TargetExternalSymbol";
case ISD::MCSymbol: return "MCSymbol";
case ISD::TargetBlockAddress: return "TargetBlockAddress";
case ISD::CopyToReg: return "CopyToReg";
case ISD::CopyFromReg: return "CopyFromReg";
case ISD::UNDEF: return "undef";
case ISD::MERGE_VALUES: return "merge_values";
case ISD::INLINEASM: return "inlineasm";
case ISD::EH_LABEL: return "eh_label";
case ISD::HANDLENODE: return "handlenode";
// Unary operators
case ISD::FABS: return "fabs";
case ISD::FMINNUM: return "fminnum";
case ISD::FMAXNUM: return "fmaxnum";
case ISD::FNEG: return "fneg";
case ISD::FSQRT: return "fsqrt";
case ISD::FSIN: return "fsin";
case ISD::FCOS: return "fcos";
case ISD::FSINCOS: return "fsincos";
case ISD::FTRUNC: return "ftrunc";
case ISD::FFLOOR: return "ffloor";
case ISD::FCEIL: return "fceil";
case ISD::FRINT: return "frint";
case ISD::FNEARBYINT: return "fnearbyint";
case ISD::FROUND: return "fround";
case ISD::FEXP: return "fexp";
case ISD::FEXP2: return "fexp2";
case ISD::FLOG: return "flog";
case ISD::FLOG2: return "flog2";
case ISD::FLOG10: return "flog10";
// Binary operators
case ISD::ADD: return "add";
case ISD::SUB: return "sub";
case ISD::MUL: return "mul";
case ISD::MULHU: return "mulhu";
case ISD::MULHS: return "mulhs";
case ISD::SDIV: return "sdiv";
case ISD::UDIV: return "udiv";
case ISD::SREM: return "srem";
case ISD::UREM: return "urem";
case ISD::SMUL_LOHI: return "smul_lohi";
case ISD::UMUL_LOHI: return "umul_lohi";
case ISD::SDIVREM: return "sdivrem";
case ISD::UDIVREM: return "udivrem";
case ISD::AND: return "and";
case ISD::OR: return "or";
case ISD::XOR: return "xor";
case ISD::SHL: return "shl";
case ISD::SRA: return "sra";
case ISD::SRL: return "srl";
case ISD::ROTL: return "rotl";
case ISD::ROTR: return "rotr";
case ISD::FADD: return "fadd";
case ISD::FSUB: return "fsub";
case ISD::FMUL: return "fmul";
case ISD::FDIV: return "fdiv";
case ISD::FMA: return "fma";
case ISD::FMAD: return "fmad";
case ISD::FREM: return "frem";
case ISD::FCOPYSIGN: return "fcopysign";
case ISD::FGETSIGN: return "fgetsign";
case ISD::FPOW: return "fpow";
case ISD::SMIN: return "smin";
case ISD::SMAX: return "smax";
case ISD::UMIN: return "umin";
case ISD::UMAX: return "umax";
case ISD::FPOWI: return "fpowi";
case ISD::SETCC: return "setcc";
case ISD::SELECT: return "select";
case ISD::VSELECT: return "vselect";
case ISD::SELECT_CC: return "select_cc";
case ISD::INSERT_VECTOR_ELT: return "insert_vector_elt";
case ISD::EXTRACT_VECTOR_ELT: return "extract_vector_elt";
case ISD::CONCAT_VECTORS: return "concat_vectors";
case ISD::INSERT_SUBVECTOR: return "insert_subvector";
case ISD::EXTRACT_SUBVECTOR: return "extract_subvector";
case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector";
case ISD::VECTOR_SHUFFLE: return "vector_shuffle";
case ISD::CARRY_FALSE: return "carry_false";
case ISD::ADDC: return "addc";
case ISD::ADDE: return "adde";
case ISD::SADDO: return "saddo";
case ISD::UADDO: return "uaddo";
case ISD::SSUBO: return "ssubo";
case ISD::USUBO: return "usubo";
case ISD::SMULO: return "smulo";
case ISD::UMULO: return "umulo";
case ISD::SUBC: return "subc";
case ISD::SUBE: return "sube";
case ISD::SHL_PARTS: return "shl_parts";
case ISD::SRA_PARTS: return "sra_parts";
case ISD::SRL_PARTS: return "srl_parts";
// Conversion operators.
case ISD::SIGN_EXTEND: return "sign_extend";
case ISD::ZERO_EXTEND: return "zero_extend";
case ISD::ANY_EXTEND: return "any_extend";
case ISD::SIGN_EXTEND_INREG: return "sign_extend_inreg";
case ISD::ANY_EXTEND_VECTOR_INREG: return "any_extend_vector_inreg";
case ISD::SIGN_EXTEND_VECTOR_INREG: return "sign_extend_vector_inreg";
case ISD::ZERO_EXTEND_VECTOR_INREG: return "zero_extend_vector_inreg";
case ISD::TRUNCATE: return "truncate";
case ISD::FP_ROUND: return "fp_round";
case ISD::FLT_ROUNDS_: return "flt_rounds";
case ISD::FP_ROUND_INREG: return "fp_round_inreg";
case ISD::FP_EXTEND: return "fp_extend";
case ISD::SINT_TO_FP: return "sint_to_fp";
case ISD::UINT_TO_FP: return "uint_to_fp";
case ISD::FP_TO_SINT: return "fp_to_sint";
case ISD::FP_TO_UINT: return "fp_to_uint";
case ISD::BITCAST: return "bitcast";
case ISD::ADDRSPACECAST: return "addrspacecast";
case ISD::FP16_TO_FP: return "fp16_to_fp";
case ISD::FP_TO_FP16: return "fp_to_fp16";
case ISD::CONVERT_RNDSAT: {
switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
default: llvm_unreachable("Unknown cvt code!");
case ISD::CVT_FF: return "cvt_ff";
case ISD::CVT_FS: return "cvt_fs";
case ISD::CVT_FU: return "cvt_fu";
case ISD::CVT_SF: return "cvt_sf";
case ISD::CVT_UF: return "cvt_uf";
case ISD::CVT_SS: return "cvt_ss";
case ISD::CVT_SU: return "cvt_su";
case ISD::CVT_US: return "cvt_us";
case ISD::CVT_UU: return "cvt_uu";
}
}
// Control flow instructions
case ISD::BR: return "br";
case ISD::BRIND: return "brind";
case ISD::BR_JT: return "br_jt";
case ISD::BRCOND: return "brcond";
case ISD::BR_CC: return "br_cc";
case ISD::CALLSEQ_START: return "callseq_start";
case ISD::CALLSEQ_END: return "callseq_end";
// Other operators
case ISD::LOAD: return "load";
case ISD::STORE: return "store";
case ISD::MLOAD: return "masked_load";
case ISD::MSTORE: return "masked_store";
case ISD::MGATHER: return "masked_gather";
case ISD::MSCATTER: return "masked_scatter";
case ISD::VAARG: return "vaarg";
case ISD::VACOPY: return "vacopy";
case ISD::VAEND: return "vaend";
case ISD::VASTART: return "vastart";
case ISD::DYNAMIC_STACKALLOC: return "dynamic_stackalloc";
case ISD::EXTRACT_ELEMENT: return "extract_element";
case ISD::BUILD_PAIR: return "build_pair";
case ISD::STACKSAVE: return "stacksave";
case ISD::STACKRESTORE: return "stackrestore";
case ISD::TRAP: return "trap";
case ISD::DEBUGTRAP: return "debugtrap";
case ISD::LIFETIME_START: return "lifetime.start";
case ISD::LIFETIME_END: return "lifetime.end";
case ISD::GC_TRANSITION_START: return "gc_transition.start";
case ISD::GC_TRANSITION_END: return "gc_transition.end";
// Bit manipulation
case ISD::BSWAP: return "bswap";
case ISD::CTPOP: return "ctpop";
case ISD::CTTZ: return "cttz";
case ISD::CTTZ_ZERO_UNDEF: return "cttz_zero_undef";
case ISD::CTLZ: return "ctlz";
case ISD::CTLZ_ZERO_UNDEF: return "ctlz_zero_undef";
// Trampolines
case ISD::INIT_TRAMPOLINE: return "init_trampoline";
case ISD::ADJUST_TRAMPOLINE: return "adjust_trampoline";
case ISD::CONDCODE:
switch (cast<CondCodeSDNode>(this)->get()) {
default: llvm_unreachable("Unknown setcc condition!");
case ISD::SETOEQ: return "setoeq";
case ISD::SETOGT: return "setogt";
case ISD::SETOGE: return "setoge";
case ISD::SETOLT: return "setolt";
case ISD::SETOLE: return "setole";
case ISD::SETONE: return "setone";
case ISD::SETO: return "seto";
case ISD::SETUO: return "setuo";
case ISD::SETUEQ: return "setue";
case ISD::SETUGT: return "setugt";
case ISD::SETUGE: return "setuge";
case ISD::SETULT: return "setult";
case ISD::SETULE: return "setule";
case ISD::SETUNE: return "setune";
case ISD::SETEQ: return "seteq";
case ISD::SETGT: return "setgt";
case ISD::SETGE: return "setge";
case ISD::SETLT: return "setlt";
case ISD::SETLE: return "setle";
case ISD::SETNE: return "setne";
case ISD::SETTRUE: return "settrue";
case ISD::SETTRUE2: return "settrue2";
case ISD::SETFALSE: return "setfalse";
case ISD::SETFALSE2: return "setfalse2";
}
}
}
const char *SDNode::getIndexedModeName(ISD::MemIndexedMode AM) {
switch (AM) {
default: return "";
case ISD::PRE_INC: return "<pre-inc>";
case ISD::PRE_DEC: return "<pre-dec>";
case ISD::POST_INC: return "<post-inc>";
case ISD::POST_DEC: return "<post-dec>";
}
}
void SDNode::dump() const { dump(nullptr); }
void SDNode::dump(const SelectionDAG *G) const {
print(dbgs(), G);
dbgs() << '\n';
}
void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
OS << (const void*)this << ": ";
for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
if (i) OS << ",";
if (getValueType(i) == MVT::Other)
OS << "ch";
else
OS << getValueType(i).getEVTString();
}
OS << " = " << getOperationName(G);
}
void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
if (const MachineSDNode *MN = dyn_cast<MachineSDNode>(this)) {
if (!MN->memoperands_empty()) {
OS << "<";
OS << "Mem:";
for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
e = MN->memoperands_end(); i != e; ++i) {
OS << **i;
if (std::next(i) != e)
OS << " ";
}
OS << ">";
}
} else if (const ShuffleVectorSDNode *SVN =
dyn_cast<ShuffleVectorSDNode>(this)) {
OS << "<";
for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
int Idx = SVN->getMaskElt(i);
if (i) OS << ",";
if (Idx < 0)
OS << "u";
else
OS << Idx;
}
OS << ">";
} else if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
OS << '<' << CSDN->getAPIntValue() << '>';
} else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle)
OS << '<' << CSDN->getValueAPF().convertToFloat() << '>';
else if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEdouble)
OS << '<' << CSDN->getValueAPF().convertToDouble() << '>';
else {
OS << "<APFloat(";
CSDN->getValueAPF().bitcastToAPInt().dump();
OS << ")>";
}
} else if (const GlobalAddressSDNode *GADN =
dyn_cast<GlobalAddressSDNode>(this)) {
int64_t offset = GADN->getOffset();
OS << '<';
GADN->getGlobal()->printAsOperand(OS);
OS << '>';
if (offset > 0)
OS << " + " << offset;
else
OS << " " << offset;
if (unsigned int TF = GADN->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const FrameIndexSDNode *FIDN = dyn_cast<FrameIndexSDNode>(this)) {
OS << "<" << FIDN->getIndex() << ">";
} else if (const JumpTableSDNode *JTDN = dyn_cast<JumpTableSDNode>(this)) {
OS << "<" << JTDN->getIndex() << ">";
if (unsigned int TF = JTDN->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(this)){
int offset = CP->getOffset();
if (CP->isMachineConstantPoolEntry())
OS << "<" << *CP->getMachineCPVal() << ">";
else
OS << "<" << *CP->getConstVal() << ">";
if (offset > 0)
OS << " + " << offset;
else
OS << " " << offset;
if (unsigned int TF = CP->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(this)) {
OS << "<" << TI->getIndex() << '+' << TI->getOffset() << ">";
if (unsigned TF = TI->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
OS << "<";
const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock();
if (LBB)
OS << LBB->getName() << " ";
OS << (const void*)BBDN->getBasicBlock() << ">";
} else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
OS << ' ' << PrintReg(R->getReg(),
G ? G->getSubtarget().getRegisterInfo() : nullptr);
} else if (const ExternalSymbolSDNode *ES =
dyn_cast<ExternalSymbolSDNode>(this)) {
OS << "'" << ES->getSymbol() << "'";
if (unsigned int TF = ES->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const SrcValueSDNode *M = dyn_cast<SrcValueSDNode>(this)) {
if (M->getValue())
OS << "<" << M->getValue() << ">";
else
OS << "<null>";
} else if (const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(this)) {
if (MD->getMD())
OS << "<" << MD->getMD() << ">";
else
OS << "<null>";
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
OS << ":" << N->getVT().getEVTString();
}
else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
OS << "<" << *LD->getMemOperand();
bool doExt = true;
switch (LD->getExtensionType()) {
default: doExt = false; break;
case ISD::EXTLOAD: OS << ", anyext"; break;
case ISD::SEXTLOAD: OS << ", sext"; break;
case ISD::ZEXTLOAD: OS << ", zext"; break;
}
if (doExt)
OS << " from " << LD->getMemoryVT().getEVTString();
const char *AM = getIndexedModeName(LD->getAddressingMode());
if (*AM)
OS << ", " << AM;
OS << ">";
} else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
OS << "<" << *ST->getMemOperand();
if (ST->isTruncatingStore())
OS << ", trunc to " << ST->getMemoryVT().getEVTString();
const char *AM = getIndexedModeName(ST->getAddressingMode());
if (*AM)
OS << ", " << AM;
OS << ">";
} else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
OS << "<" << *M->getMemOperand() << ">";
} else if (const BlockAddressSDNode *BA =
dyn_cast<BlockAddressSDNode>(this)) {
int64_t offset = BA->getOffset();
OS << "<";
BA->getBlockAddress()->getFunction()->printAsOperand(OS, false);
OS << ", ";
BA->getBlockAddress()->getBasicBlock()->printAsOperand(OS, false);
OS << ">";
if (offset > 0)
OS << " + " << offset;
else
OS << " " << offset;
if (unsigned int TF = BA->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const AddrSpaceCastSDNode *ASC =
dyn_cast<AddrSpaceCastSDNode>(this)) {
OS << '['
<< ASC->getSrcAddressSpace()
<< " -> "
<< ASC->getDestAddressSpace()
<< ']';
}
if (unsigned Order = getIROrder())
OS << " [ORD=" << Order << ']';
if (getNodeId() != -1)
OS << " [ID=" << getNodeId() << ']';
if (!G)
return;
DILocation *L = getDebugLoc();
if (!L)
return;
if (auto *Scope = L->getScope())
OS << Scope->getFilename();
else
OS << "<unknown>";
OS << ':' << L->getLine();
if (unsigned C = L->getColumn())
OS << ':' << C;
}
static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
for (const SDValue &Op : N->op_values())
if (Op.getNode()->hasOneUse())
DumpNodes(Op.getNode(), indent+2, G);
else
dbgs() << "\n" << std::string(indent+2, ' ')
<< (void*)Op.getNode() << ": <multiple use>";
dbgs() << '\n';
dbgs().indent(indent);
N->dump(G);
}
void SelectionDAG::dump() const {
dbgs() << "SelectionDAG has " << AllNodes.size() << " nodes:";
for (allnodes_const_iterator I = allnodes_begin(), E = allnodes_end();
I != E; ++I) {
const SDNode *N = I;
if (!N->hasOneUse() && N != getRoot().getNode())
DumpNodes(N, 2, this);
}
if (getRoot().getNode()) DumpNodes(getRoot().getNode(), 2, this);
dbgs() << "\n\n";
}
void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const {
print_types(OS, G);
print_details(OS, G);
}
typedef SmallPtrSet<const SDNode *, 128> VisitedSDNodeSet;
static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
const SelectionDAG *G, VisitedSDNodeSet &once) {
if (!once.insert(N).second) // If we've been here before, return now.
return;
// Dump the current SDNode, but don't end the line yet.
OS.indent(indent);
N->printr(OS, G);
// Having printed this SDNode, walk the children:
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDNode *child = N->getOperand(i).getNode();
if (i) OS << ",";
OS << " ";
if (child->getNumOperands() == 0) {
// This child has no grandchildren; print it inline right here.
child->printr(OS, G);
once.insert(child);
} else { // Just the address. FIXME: also print the child's opcode.
OS << (const void*)child;
if (unsigned RN = N->getOperand(i).getResNo())
OS << ":" << RN;
}
}
OS << "\n";
// Dump children that have grandchildren on their own line(s).
for (const SDValue &Op : N->op_values())
DumpNodesr(OS, Op.getNode(), indent+2, G, once);
}
void SDNode::dumpr() const {
VisitedSDNodeSet once;
DumpNodesr(dbgs(), this, 0, nullptr, once);
}
void SDNode::dumpr(const SelectionDAG *G) const {
VisitedSDNodeSet once;
DumpNodesr(dbgs(), this, 0, G, once);
}
static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
const SelectionDAG *G, unsigned depth,
unsigned indent) {
if (depth == 0)
return;
OS.indent(indent);
N->print(OS, G);
if (depth < 1)
return;
for (const SDValue &Op : N->op_values()) {
// Don't follow chain operands.
if (Op.getValueType() == MVT::Other)
continue;
OS << '\n';
printrWithDepthHelper(OS, Op.getNode(), G, depth-1, indent+2);
}
}
void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G,
unsigned depth) const {
printrWithDepthHelper(OS, this, G, depth, 0);
}
void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const {
// Don't print impossibly deep things.
printrWithDepth(OS, G, 10);
}
void SDNode::dumprWithDepth(const SelectionDAG *G, unsigned depth) const {
printrWithDepth(dbgs(), G, depth);
}
void SDNode::dumprFull(const SelectionDAG *G) const {
// Don't print impossibly deep things.
dumprWithDepth(G, 10);
}
void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
print_types(OS, G);
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
if (i) OS << ", "; else OS << " ";
OS << (void*)getOperand(i).getNode();
if (unsigned RN = getOperand(i).getResNo())
OS << ":" << RN;
}
print_details(OS, G);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp | //===-------- LegalizeFloatTypes.cpp - Legalization of float types --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements float type expansion and softening for LegalizeTypes.
// Softening is the act of turning a computation in an illegal floating point
// type into a computation in an integer type of the same size; also known as
// "soft float". For example, turning f32 arithmetic into operations using i32.
// The resulting integer value is the same as what you would get by performing
// the floating point operation and bitcasting the result to the integer type.
// Expansion is the act of changing a computation in an illegal type to be a
// computation in two identical registers of a smaller type. For example,
// implementing ppcf128 arithmetic in two f64 registers.
//
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "legalize-types"
/// GetFPLibCall - Return the right libcall for the given floating point type.
static RTLIB::Libcall GetFPLibCall(EVT VT,
RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64,
RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_F128,
RTLIB::Libcall Call_PPCF128) {
return
VT == MVT::f32 ? Call_F32 :
VT == MVT::f64 ? Call_F64 :
VT == MVT::f80 ? Call_F80 :
VT == MVT::f128 ? Call_F128 :
VT == MVT::ppcf128 ? Call_PPCF128 :
RTLIB::UNKNOWN_LIBCALL;
}
//===----------------------------------------------------------------------===//
// Result Float to Integer Conversion.
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
DEBUG(dbgs() << "Soften float result " << ResNo << ": "; N->dump(&DAG);
dbgs() << "\n");
SDValue R = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "SoftenFloatResult #" << ResNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to soften the result of this operator!");
case ISD::MERGE_VALUES:R = SoftenFloatRes_MERGE_VALUES(N, ResNo); break;
case ISD::BITCAST: R = SoftenFloatRes_BITCAST(N); break;
case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break;
case ISD::ConstantFP:
R = SoftenFloatRes_ConstantFP(cast<ConstantFPSDNode>(N));
break;
case ISD::EXTRACT_VECTOR_ELT:
R = SoftenFloatRes_EXTRACT_VECTOR_ELT(N); break;
case ISD::FABS: R = SoftenFloatRes_FABS(N); break;
case ISD::FMINNUM: R = SoftenFloatRes_FMINNUM(N); break;
case ISD::FMAXNUM: R = SoftenFloatRes_FMAXNUM(N); break;
case ISD::FADD: R = SoftenFloatRes_FADD(N); break;
case ISD::FCEIL: R = SoftenFloatRes_FCEIL(N); break;
case ISD::FCOPYSIGN: R = SoftenFloatRes_FCOPYSIGN(N); break;
case ISD::FCOS: R = SoftenFloatRes_FCOS(N); break;
case ISD::FDIV: R = SoftenFloatRes_FDIV(N); break;
case ISD::FEXP: R = SoftenFloatRes_FEXP(N); break;
case ISD::FEXP2: R = SoftenFloatRes_FEXP2(N); break;
case ISD::FFLOOR: R = SoftenFloatRes_FFLOOR(N); break;
case ISD::FLOG: R = SoftenFloatRes_FLOG(N); break;
case ISD::FLOG2: R = SoftenFloatRes_FLOG2(N); break;
case ISD::FLOG10: R = SoftenFloatRes_FLOG10(N); break;
case ISD::FMA: R = SoftenFloatRes_FMA(N); break;
case ISD::FMUL: R = SoftenFloatRes_FMUL(N); break;
case ISD::FNEARBYINT: R = SoftenFloatRes_FNEARBYINT(N); break;
case ISD::FNEG: R = SoftenFloatRes_FNEG(N); break;
case ISD::FP_EXTEND: R = SoftenFloatRes_FP_EXTEND(N); break;
case ISD::FP_ROUND: R = SoftenFloatRes_FP_ROUND(N); break;
case ISD::FP16_TO_FP: R = SoftenFloatRes_FP16_TO_FP(N); break;
case ISD::FPOW: R = SoftenFloatRes_FPOW(N); break;
case ISD::FPOWI: R = SoftenFloatRes_FPOWI(N); break;
case ISD::FREM: R = SoftenFloatRes_FREM(N); break;
case ISD::FRINT: R = SoftenFloatRes_FRINT(N); break;
case ISD::FROUND: R = SoftenFloatRes_FROUND(N); break;
case ISD::FSIN: R = SoftenFloatRes_FSIN(N); break;
case ISD::FSQRT: R = SoftenFloatRes_FSQRT(N); break;
case ISD::FSUB: R = SoftenFloatRes_FSUB(N); break;
case ISD::FTRUNC: R = SoftenFloatRes_FTRUNC(N); break;
case ISD::LOAD: R = SoftenFloatRes_LOAD(N); break;
case ISD::SELECT: R = SoftenFloatRes_SELECT(N); break;
case ISD::SELECT_CC: R = SoftenFloatRes_SELECT_CC(N); break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP: R = SoftenFloatRes_XINT_TO_FP(N); break;
case ISD::UNDEF: R = SoftenFloatRes_UNDEF(N); break;
case ISD::VAARG: R = SoftenFloatRes_VAARG(N); break;
}
// If R is null, the sub-method took care of registering the result.
if (R.getNode())
SetSoftenedFloat(SDValue(N, ResNo), R);
}
SDValue DAGTypeLegalizer::SoftenFloatRes_BITCAST(SDNode *N) {
return BitConvertToInteger(N->getOperand(0));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_MERGE_VALUES(SDNode *N,
unsigned ResNo) {
SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
return BitConvertToInteger(Op);
}
SDValue DAGTypeLegalizer::SoftenFloatRes_BUILD_PAIR(SDNode *N) {
// Convert the inputs to integers, and build a new pair out of them.
return DAG.getNode(ISD::BUILD_PAIR, SDLoc(N),
TLI.getTypeToTransformTo(*DAG.getContext(),
N->getValueType(0)),
BitConvertToInteger(N->getOperand(0)),
BitConvertToInteger(N->getOperand(1)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(ConstantFPSDNode *N) {
return DAG.getConstant(N->getValueAPF().bitcastToAPInt(), SDLoc(N),
TLI.getTypeToTransformTo(*DAG.getContext(),
N->getValueType(0)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
SDValue NewOp = BitConvertVectorToIntegerVector(N->getOperand(0));
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
NewOp.getValueType().getVectorElementType(),
NewOp, N->getOperand(1));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FABS(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned Size = NVT.getSizeInBits();
// Mask = ~(1 << (Size-1))
APInt API = APInt::getAllOnesValue(Size);
API.clearBit(Size - 1);
SDValue Mask = DAG.getConstant(API, SDLoc(N), NVT);
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return DAG.getNode(ISD::AND, SDLoc(N), NVT, Op, Mask);
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FMINNUM(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::FMIN_F32,
RTLIB::FMIN_F64,
RTLIB::FMIN_F80,
RTLIB::FMIN_F128,
RTLIB::FMIN_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FMAXNUM(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::FMAX_F32,
RTLIB::FMAX_F64,
RTLIB::FMAX_F80,
RTLIB::FMAX_F128,
RTLIB::FMAX_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FADD(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::ADD_F32,
RTLIB::ADD_F64,
RTLIB::ADD_F80,
RTLIB::ADD_F128,
RTLIB::ADD_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FCEIL(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::CEIL_F32,
RTLIB::CEIL_F64,
RTLIB::CEIL_F80,
RTLIB::CEIL_F128,
RTLIB::CEIL_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) {
SDValue LHS = GetSoftenedFloat(N->getOperand(0));
SDValue RHS = BitConvertToInteger(N->getOperand(1));
SDLoc dl(N);
EVT LVT = LHS.getValueType();
EVT RVT = RHS.getValueType();
unsigned LSize = LVT.getSizeInBits();
unsigned RSize = RVT.getSizeInBits();
// First get the sign bit of second operand.
SDValue SignBit = DAG.getNode(
ISD::SHL, dl, RVT, DAG.getConstant(1, dl, RVT),
DAG.getConstant(RSize - 1, dl,
TLI.getShiftAmountTy(RVT, DAG.getDataLayout())));
SignBit = DAG.getNode(ISD::AND, dl, RVT, RHS, SignBit);
// Shift right or sign-extend it if the two operands have different types.
int SizeDiff = RVT.getSizeInBits() - LVT.getSizeInBits();
if (SizeDiff > 0) {
SignBit =
DAG.getNode(ISD::SRL, dl, RVT, SignBit,
DAG.getConstant(SizeDiff, dl,
TLI.getShiftAmountTy(SignBit.getValueType(),
DAG.getDataLayout())));
SignBit = DAG.getNode(ISD::TRUNCATE, dl, LVT, SignBit);
} else if (SizeDiff < 0) {
SignBit = DAG.getNode(ISD::ANY_EXTEND, dl, LVT, SignBit);
SignBit =
DAG.getNode(ISD::SHL, dl, LVT, SignBit,
DAG.getConstant(-SizeDiff, dl,
TLI.getShiftAmountTy(SignBit.getValueType(),
DAG.getDataLayout())));
}
// Clear the sign bit of the first operand.
SDValue Mask = DAG.getNode(
ISD::SHL, dl, LVT, DAG.getConstant(1, dl, LVT),
DAG.getConstant(LSize - 1, dl,
TLI.getShiftAmountTy(LVT, DAG.getDataLayout())));
Mask = DAG.getNode(ISD::SUB, dl, LVT, Mask, DAG.getConstant(1, dl, LVT));
LHS = DAG.getNode(ISD::AND, dl, LVT, LHS, Mask);
// Or the value with the sign bit.
return DAG.getNode(ISD::OR, dl, LVT, LHS, SignBit);
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FCOS(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::COS_F32,
RTLIB::COS_F64,
RTLIB::COS_F80,
RTLIB::COS_F128,
RTLIB::COS_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FDIV(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::DIV_F32,
RTLIB::DIV_F64,
RTLIB::DIV_F80,
RTLIB::DIV_F128,
RTLIB::DIV_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FEXP(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::EXP_F32,
RTLIB::EXP_F64,
RTLIB::EXP_F80,
RTLIB::EXP_F128,
RTLIB::EXP_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FEXP2(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::EXP2_F32,
RTLIB::EXP2_F64,
RTLIB::EXP2_F80,
RTLIB::EXP2_F128,
RTLIB::EXP2_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FFLOOR(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::FLOOR_F32,
RTLIB::FLOOR_F64,
RTLIB::FLOOR_F80,
RTLIB::FLOOR_F128,
RTLIB::FLOOR_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::LOG_F32,
RTLIB::LOG_F64,
RTLIB::LOG_F80,
RTLIB::LOG_F128,
RTLIB::LOG_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG2(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::LOG2_F32,
RTLIB::LOG2_F64,
RTLIB::LOG2_F80,
RTLIB::LOG2_F128,
RTLIB::LOG2_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG10(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::LOG10_F32,
RTLIB::LOG10_F64,
RTLIB::LOG10_F80,
RTLIB::LOG10_F128,
RTLIB::LOG10_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FMA(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[3] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)),
GetSoftenedFloat(N->getOperand(2)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::FMA_F32,
RTLIB::FMA_F64,
RTLIB::FMA_F80,
RTLIB::FMA_F128,
RTLIB::FMA_PPCF128),
NVT, Ops, 3, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::MUL_F32,
RTLIB::MUL_F64,
RTLIB::MUL_F80,
RTLIB::MUL_F128,
RTLIB::MUL_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FNEARBYINT(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::NEARBYINT_F32,
RTLIB::NEARBYINT_F64,
RTLIB::NEARBYINT_F80,
RTLIB::NEARBYINT_F128,
RTLIB::NEARBYINT_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FNEG(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
// Expand Y = FNEG(X) -> Y = SUB -0.0, X
SDValue Ops[2] = { DAG.getConstantFP(-0.0, dl, N->getValueType(0)),
GetSoftenedFloat(N->getOperand(0)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::SUB_F32,
RTLIB::SUB_F64,
RTLIB::SUB_F80,
RTLIB::SUB_F128,
RTLIB::SUB_PPCF128),
NVT, Ops, 2, false, dl).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = N->getOperand(0);
// There's only a libcall for f16 -> f32, so proceed in two stages. Also, it's
// entirely possible for both f16 and f32 to be legal, so use the fully
// hard-float FP_EXTEND rather than FP16_TO_FP.
if (Op.getValueType() == MVT::f16 && N->getValueType(0) != MVT::f32) {
Op = DAG.getNode(ISD::FP_EXTEND, SDLoc(N), MVT::f32, Op);
if (getTypeAction(MVT::f32) == TargetLowering::TypeSoftenFloat)
SoftenFloatResult(Op.getNode(), 0);
}
RTLIB::Libcall LC = RTLIB::getFPEXT(Op.getValueType(), N->getValueType(0));
if (getTypeAction(Op.getValueType()) == TargetLowering::TypeSoftenFloat)
Op = GetSoftenedFloat(Op);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND!");
return TLI.makeLibCall(DAG, LC, NVT, &Op, 1, false, SDLoc(N)).first;
}
// FIXME: Should we just use 'normal' FP_EXTEND / FP_TRUNC instead of special
// nodes?
SDValue DAGTypeLegalizer::SoftenFloatRes_FP16_TO_FP(SDNode *N) {
EVT MidVT = TLI.getTypeToTransformTo(*DAG.getContext(), MVT::f32);
SDValue Op = N->getOperand(0);
SDValue Res32 = TLI.makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MidVT, &Op, 1,
false, SDLoc(N)).first;
if (N->getValueType(0) == MVT::f32)
return Res32;
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
RTLIB::Libcall LC = RTLIB::getFPEXT(MVT::f32, N->getValueType(0));
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND!");
return TLI.makeLibCall(DAG, LC, NVT, &Res32, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_ROUND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = N->getOperand(0);
if (N->getValueType(0) == MVT::f16) {
// Semi-soften first, to FP_TO_FP16, so that targets which support f16 as a
// storage-only type get a chance to select things.
return DAG.getNode(ISD::FP_TO_FP16, SDLoc(N), NVT, Op);
}
RTLIB::Libcall LC = RTLIB::getFPROUND(Op.getValueType(), N->getValueType(0));
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND!");
return TLI.makeLibCall(DAG, LC, NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FPOW(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::POW_F32,
RTLIB::POW_F64,
RTLIB::POW_F80,
RTLIB::POW_F128,
RTLIB::POW_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) {
assert(N->getOperand(1).getValueType() == MVT::i32 &&
"Unsupported power type!");
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), N->getOperand(1) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::POWI_F32,
RTLIB::POWI_F64,
RTLIB::POWI_F80,
RTLIB::POWI_F128,
RTLIB::POWI_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FREM(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::REM_F32,
RTLIB::REM_F64,
RTLIB::REM_F80,
RTLIB::REM_F128,
RTLIB::REM_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FRINT(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::RINT_F32,
RTLIB::RINT_F64,
RTLIB::RINT_F80,
RTLIB::RINT_F128,
RTLIB::RINT_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FROUND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::ROUND_F32,
RTLIB::ROUND_F64,
RTLIB::ROUND_F80,
RTLIB::ROUND_F128,
RTLIB::ROUND_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FSIN(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::SIN_F32,
RTLIB::SIN_F64,
RTLIB::SIN_F80,
RTLIB::SIN_F128,
RTLIB::SIN_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FSQRT(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::SQRT_F32,
RTLIB::SQRT_F64,
RTLIB::SQRT_F80,
RTLIB::SQRT_F128,
RTLIB::SQRT_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::SUB_F32,
RTLIB::SUB_F64,
RTLIB::SUB_F80,
RTLIB::SUB_F128,
RTLIB::SUB_PPCF128),
NVT, Ops, 2, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FTRUNC(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
if (N->getValueType(0) == MVT::f16)
return DAG.getNode(ISD::FP_TO_FP16, SDLoc(N), NVT, N->getOperand(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::TRUNC_F32,
RTLIB::TRUNC_F64,
RTLIB::TRUNC_F80,
RTLIB::TRUNC_F128,
RTLIB::TRUNC_PPCF128),
NVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
LoadSDNode *L = cast<LoadSDNode>(N);
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDLoc dl(N);
SDValue NewL;
if (L->getExtensionType() == ISD::NON_EXTLOAD) {
NewL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
NVT, dl, L->getChain(), L->getBasePtr(), L->getOffset(),
L->getPointerInfo(), NVT, L->isVolatile(),
L->isNonTemporal(), false, L->getAlignment(),
L->getAAInfo());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), NewL.getValue(1));
return NewL;
}
// Do a non-extending load followed by FP_EXTEND.
NewL = DAG.getLoad(L->getAddressingMode(), ISD::NON_EXTLOAD,
L->getMemoryVT(), dl, L->getChain(),
L->getBasePtr(), L->getOffset(), L->getPointerInfo(),
L->getMemoryVT(), L->isVolatile(),
L->isNonTemporal(), false, L->getAlignment(),
L->getAAInfo());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), NewL.getValue(1));
return BitConvertToInteger(DAG.getNode(ISD::FP_EXTEND, dl, VT, NewL));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT(SDNode *N) {
SDValue LHS = GetSoftenedFloat(N->getOperand(1));
SDValue RHS = GetSoftenedFloat(N->getOperand(2));
return DAG.getSelect(SDLoc(N),
LHS.getValueType(), N->getOperand(0), LHS, RHS);
}
SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT_CC(SDNode *N) {
SDValue LHS = GetSoftenedFloat(N->getOperand(2));
SDValue RHS = GetSoftenedFloat(N->getOperand(3));
return DAG.getNode(ISD::SELECT_CC, SDLoc(N),
LHS.getValueType(), N->getOperand(0),
N->getOperand(1), LHS, RHS, N->getOperand(4));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_UNDEF(SDNode *N) {
return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(),
N->getValueType(0)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_VAARG(SDNode *N) {
SDValue Chain = N->getOperand(0); // Get the chain.
SDValue Ptr = N->getOperand(1); // Get the pointer.
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDLoc dl(N);
SDValue NewVAARG;
NewVAARG = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2),
N->getConstantOperandVal(3));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), NewVAARG.getValue(1));
return NewVAARG;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
EVT SVT = N->getOperand(0).getValueType();
EVT RVT = N->getValueType(0);
EVT NVT = EVT();
SDLoc dl(N);
// If the input is not legal, eg: i1 -> fp, then it needs to be promoted to
// a larger type, eg: i8 -> fp. Even if it is legal, no libcall may exactly
// match. Look for an appropriate libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
for (unsigned t = MVT::FIRST_INTEGER_VALUETYPE;
t <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL; ++t) {
NVT = (MVT::SimpleValueType)t;
// The source needs to big enough to hold the operand.
if (NVT.bitsGE(SVT))
LC = Signed ? RTLIB::getSINTTOFP(NVT, RVT):RTLIB::getUINTTOFP (NVT, RVT);
}
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported XINT_TO_FP!");
// Sign/zero extend the argument if the libcall takes a larger type.
SDValue Op = DAG.getNode(Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
NVT, N->getOperand(0));
return TLI.makeLibCall(DAG, LC,
TLI.getTypeToTransformTo(*DAG.getContext(), RVT),
&Op, 1, Signed, dl).first;
}
//===----------------------------------------------------------------------===//
// Operand Float to Integer Conversion..
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Soften float operand " << OpNo << ": "; N->dump(&DAG);
dbgs() << "\n");
SDValue Res = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "SoftenFloatOperand Op #" << OpNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to soften this operator's operand!");
case ISD::BITCAST: Res = SoftenFloatOp_BITCAST(N); break;
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
case ISD::FP_EXTEND: Res = SoftenFloatOp_FP_EXTEND(N); break;
case ISD::FP_TO_FP16: // Same as FP_ROUND for softening purposes
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT: Res = SoftenFloatOp_FP_TO_SINT(N); break;
case ISD::FP_TO_UINT: Res = SoftenFloatOp_FP_TO_UINT(N); break;
case ISD::SELECT_CC: Res = SoftenFloatOp_SELECT_CC(N); break;
case ISD::SETCC: Res = SoftenFloatOp_SETCC(N); break;
case ISD::STORE: Res = SoftenFloatOp_STORE(N, OpNo); break;
}
// If the result is null, the sub-method took care of registering results etc.
if (!Res.getNode()) return false;
// If the result is N, the sub-method updated N in place. Tell the legalizer
// core about this.
if (Res.getNode() == N)
return true;
assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
return false;
}
SDValue DAGTypeLegalizer::SoftenFloatOp_BITCAST(SDNode *N) {
return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0),
GetSoftenedFloat(N->getOperand(0)));
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_EXTEND(SDNode *N) {
// If we get here, the result must be legal but the source illegal.
EVT SVT = N->getOperand(0).getValueType();
EVT RVT = N->getValueType(0);
SDValue Op = GetSoftenedFloat(N->getOperand(0));
if (SVT == MVT::f16)
return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), RVT, Op);
RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND libcall");
return TLI.makeLibCall(DAG, LC, RVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_ROUND(SDNode *N) {
// We actually deal with the partially-softened FP_TO_FP16 node too, which
// returns an i16 so doesn't meet the constraints necessary for FP_ROUND.
assert(N->getOpcode() == ISD::FP_ROUND || N->getOpcode() == ISD::FP_TO_FP16);
EVT SVT = N->getOperand(0).getValueType();
EVT RVT = N->getValueType(0);
EVT FloatRVT = N->getOpcode() == ISD::FP_TO_FP16 ? MVT::f16 : RVT;
RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, FloatRVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND libcall");
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, LC, RVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
EVT VT = NewLHS.getValueType();
NewLHS = GetSoftenedFloat(NewLHS);
NewRHS = GetSoftenedFloat(NewRHS);
TLI.softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, SDLoc(N));
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, SDLoc(N), NewLHS.getValueType());
CCCode = ISD::SETNE;
}
// Update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
N->getOperand(4)),
0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) {
EVT RVT = N->getValueType(0);
RTLIB::Libcall LC = RTLIB::getFPTOSINT(N->getOperand(0).getValueType(), RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_SINT!");
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, LC, RVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_UINT(SDNode *N) {
EVT RVT = N->getValueType(0);
RTLIB::Libcall LC = RTLIB::getFPTOUINT(N->getOperand(0).getValueType(), RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_UINT!");
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, LC, RVT, &Op, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
EVT VT = NewLHS.getValueType();
NewLHS = GetSoftenedFloat(NewLHS);
NewRHS = GetSoftenedFloat(NewRHS);
TLI.softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, SDLoc(N));
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, SDLoc(N), NewLHS.getValueType());
CCCode = ISD::SETNE;
}
// Update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
DAG.getCondCode(CCCode)),
0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
EVT VT = NewLHS.getValueType();
NewLHS = GetSoftenedFloat(NewLHS);
NewRHS = GetSoftenedFloat(NewRHS);
TLI.softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, SDLoc(N));
// If softenSetCCOperands returned a scalar, use it.
if (!NewRHS.getNode()) {
assert(NewLHS.getValueType() == N->getValueType(0) &&
"Unexpected setcc expansion!");
return NewLHS;
}
// Otherwise, update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
DAG.getCondCode(CCCode)),
0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) {
assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
assert(OpNo == 1 && "Can only soften the stored value!");
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Val = ST->getValue();
SDLoc dl(N);
if (ST->isTruncatingStore())
// Do an FP_ROUND followed by a non-truncating store.
Val = BitConvertToInteger(DAG.getNode(ISD::FP_ROUND, dl, ST->getMemoryVT(),
Val, DAG.getIntPtrConstant(0, dl)));
else
Val = GetSoftenedFloat(Val);
return DAG.getStore(ST->getChain(), dl, Val, ST->getBasePtr(),
ST->getMemOperand());
}
//===----------------------------------------------------------------------===//
// Float Result Expansion
//===----------------------------------------------------------------------===//
/// ExpandFloatResult - This method is called when the specified result of the
/// specified node is found to need expansion. At this point, the node may also
/// have invalid operands or may have other results that need promotion, we just
/// know that (at least) one result needs expansion.
void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
DEBUG(dbgs() << "Expand float result: "; N->dump(&DAG); dbgs() << "\n");
SDValue Lo, Hi;
Lo = Hi = SDValue();
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(ResNo), true))
return;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "ExpandFloatResult #" << ResNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to expand the result of this operator!");
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::MERGE_VALUES: ExpandRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
case ISD::VAARG: ExpandRes_VAARG(N, Lo, Hi); break;
case ISD::ConstantFP: ExpandFloatRes_ConstantFP(N, Lo, Hi); break;
case ISD::FABS: ExpandFloatRes_FABS(N, Lo, Hi); break;
case ISD::FMINNUM: ExpandFloatRes_FMINNUM(N, Lo, Hi); break;
case ISD::FMAXNUM: ExpandFloatRes_FMAXNUM(N, Lo, Hi); break;
case ISD::FADD: ExpandFloatRes_FADD(N, Lo, Hi); break;
case ISD::FCEIL: ExpandFloatRes_FCEIL(N, Lo, Hi); break;
case ISD::FCOPYSIGN: ExpandFloatRes_FCOPYSIGN(N, Lo, Hi); break;
case ISD::FCOS: ExpandFloatRes_FCOS(N, Lo, Hi); break;
case ISD::FDIV: ExpandFloatRes_FDIV(N, Lo, Hi); break;
case ISD::FEXP: ExpandFloatRes_FEXP(N, Lo, Hi); break;
case ISD::FEXP2: ExpandFloatRes_FEXP2(N, Lo, Hi); break;
case ISD::FFLOOR: ExpandFloatRes_FFLOOR(N, Lo, Hi); break;
case ISD::FLOG: ExpandFloatRes_FLOG(N, Lo, Hi); break;
case ISD::FLOG2: ExpandFloatRes_FLOG2(N, Lo, Hi); break;
case ISD::FLOG10: ExpandFloatRes_FLOG10(N, Lo, Hi); break;
case ISD::FMA: ExpandFloatRes_FMA(N, Lo, Hi); break;
case ISD::FMUL: ExpandFloatRes_FMUL(N, Lo, Hi); break;
case ISD::FNEARBYINT: ExpandFloatRes_FNEARBYINT(N, Lo, Hi); break;
case ISD::FNEG: ExpandFloatRes_FNEG(N, Lo, Hi); break;
case ISD::FP_EXTEND: ExpandFloatRes_FP_EXTEND(N, Lo, Hi); break;
case ISD::FPOW: ExpandFloatRes_FPOW(N, Lo, Hi); break;
case ISD::FPOWI: ExpandFloatRes_FPOWI(N, Lo, Hi); break;
case ISD::FRINT: ExpandFloatRes_FRINT(N, Lo, Hi); break;
case ISD::FROUND: ExpandFloatRes_FROUND(N, Lo, Hi); break;
case ISD::FSIN: ExpandFloatRes_FSIN(N, Lo, Hi); break;
case ISD::FSQRT: ExpandFloatRes_FSQRT(N, Lo, Hi); break;
case ISD::FSUB: ExpandFloatRes_FSUB(N, Lo, Hi); break;
case ISD::FTRUNC: ExpandFloatRes_FTRUNC(N, Lo, Hi); break;
case ISD::LOAD: ExpandFloatRes_LOAD(N, Lo, Hi); break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP: ExpandFloatRes_XINT_TO_FP(N, Lo, Hi); break;
case ISD::FREM: ExpandFloatRes_FREM(N, Lo, Hi); break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
if (Lo.getNode())
SetExpandedFloat(SDValue(N, ResNo), Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo,
SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
assert(NVT.getSizeInBits() == integerPartWidth &&
"Do not know how to expand this float constant!");
APInt C = cast<ConstantFPSDNode>(N)->getValueAPF().bitcastToAPInt();
SDLoc dl(N);
Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
APInt(integerPartWidth, C.getRawData()[1])),
dl, NVT);
Hi = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
APInt(integerPartWidth, C.getRawData()[0])),
dl, NVT);
}
void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDValue &Lo,
SDValue &Hi) {
assert(N->getValueType(0) == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
SDLoc dl(N);
SDValue Tmp;
GetExpandedFloat(N->getOperand(0), Lo, Tmp);
Hi = DAG.getNode(ISD::FABS, dl, Tmp.getValueType(), Tmp);
// Lo = Hi==fabs(Hi) ? Lo : -Lo;
Lo = DAG.getSelectCC(dl, Tmp, Hi, Lo,
DAG.getNode(ISD::FNEG, dl, Lo.getValueType(), Lo),
ISD::SETEQ);
}
void DAGTypeLegalizer::ExpandFloatRes_FMINNUM(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::FMIN_F32, RTLIB::FMIN_F64,
RTLIB::FMIN_F80, RTLIB::FMIN_F128,
RTLIB::FMIN_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FMAXNUM(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::FMAX_F32, RTLIB::FMAX_F64,
RTLIB::FMAX_F80, RTLIB::FMAX_F128,
RTLIB::FMAX_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FADD(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::ADD_F32, RTLIB::ADD_F64,
RTLIB::ADD_F80, RTLIB::ADD_F128,
RTLIB::ADD_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FCEIL(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::CEIL_F32, RTLIB::CEIL_F64,
RTLIB::CEIL_F80, RTLIB::CEIL_F128,
RTLIB::CEIL_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FCOPYSIGN(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::COPYSIGN_F32,
RTLIB::COPYSIGN_F64,
RTLIB::COPYSIGN_F80,
RTLIB::COPYSIGN_F128,
RTLIB::COPYSIGN_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FCOS(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::COS_F32, RTLIB::COS_F64,
RTLIB::COS_F80, RTLIB::COS_F128,
RTLIB::COS_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FDIV(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::DIV_F32,
RTLIB::DIV_F64,
RTLIB::DIV_F80,
RTLIB::DIV_F128,
RTLIB::DIV_PPCF128),
N->getValueType(0), Ops, 2, false,
SDLoc(N)).first;
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FEXP(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::EXP_F32, RTLIB::EXP_F64,
RTLIB::EXP_F80, RTLIB::EXP_F128,
RTLIB::EXP_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FEXP2(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::EXP2_F32, RTLIB::EXP2_F64,
RTLIB::EXP2_F80, RTLIB::EXP2_F128,
RTLIB::EXP2_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FFLOOR(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::FLOOR_F32, RTLIB::FLOOR_F64,
RTLIB::FLOOR_F80, RTLIB::FLOOR_F128,
RTLIB::FLOOR_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FLOG(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::LOG_F32, RTLIB::LOG_F64,
RTLIB::LOG_F80, RTLIB::LOG_F128,
RTLIB::LOG_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FLOG2(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::LOG2_F32, RTLIB::LOG2_F64,
RTLIB::LOG2_F80, RTLIB::LOG2_F128,
RTLIB::LOG2_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FLOG10(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::LOG10_F32, RTLIB::LOG10_F64,
RTLIB::LOG10_F80, RTLIB::LOG10_F128,
RTLIB::LOG10_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FMA(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Ops[3] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) };
SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::FMA_F32,
RTLIB::FMA_F64,
RTLIB::FMA_F80,
RTLIB::FMA_F128,
RTLIB::FMA_PPCF128),
N->getValueType(0), Ops, 3, false,
SDLoc(N)).first;
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FMUL(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::MUL_F32,
RTLIB::MUL_F64,
RTLIB::MUL_F80,
RTLIB::MUL_F128,
RTLIB::MUL_PPCF128),
N->getValueType(0), Ops, 2, false,
SDLoc(N)).first;
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FNEARBYINT(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::NEARBYINT_F32,
RTLIB::NEARBYINT_F64,
RTLIB::NEARBYINT_F80,
RTLIB::NEARBYINT_F128,
RTLIB::NEARBYINT_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FNEG(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDLoc dl(N);
GetExpandedFloat(N->getOperand(0), Lo, Hi);
Lo = DAG.getNode(ISD::FNEG, dl, Lo.getValueType(), Lo);
Hi = DAG.getNode(ISD::FNEG, dl, Hi.getValueType(), Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FP_EXTEND(SDNode *N, SDValue &Lo,
SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
Hi = DAG.getNode(ISD::FP_EXTEND, dl, NVT, N->getOperand(0));
Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
APInt(NVT.getSizeInBits(), 0)), dl, NVT);
}
void DAGTypeLegalizer::ExpandFloatRes_FPOW(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::POW_F32, RTLIB::POW_F64,
RTLIB::POW_F80, RTLIB::POW_F128,
RTLIB::POW_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FPOWI(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::POWI_F32, RTLIB::POWI_F64,
RTLIB::POWI_F80, RTLIB::POWI_F128,
RTLIB::POWI_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FREM(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::REM_F32, RTLIB::REM_F64,
RTLIB::REM_F80, RTLIB::REM_F128,
RTLIB::REM_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FRINT(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::RINT_F32, RTLIB::RINT_F64,
RTLIB::RINT_F80, RTLIB::RINT_F128,
RTLIB::RINT_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FROUND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::ROUND_F32,
RTLIB::ROUND_F64,
RTLIB::ROUND_F80,
RTLIB::ROUND_F128,
RTLIB::ROUND_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FSIN(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::SIN_F32, RTLIB::SIN_F64,
RTLIB::SIN_F80, RTLIB::SIN_F128,
RTLIB::SIN_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FSQRT(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::SQRT_F32, RTLIB::SQRT_F64,
RTLIB::SQRT_F80, RTLIB::SQRT_F128,
RTLIB::SQRT_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FSUB(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::SUB_F32,
RTLIB::SUB_F64,
RTLIB::SUB_F80,
RTLIB::SUB_F128,
RTLIB::SUB_PPCF128),
N->getValueType(0), Ops, 2, false,
SDLoc(N)).first;
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_FTRUNC(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
RTLIB::TRUNC_F32, RTLIB::TRUNC_F64,
RTLIB::TRUNC_F80, RTLIB::TRUNC_F128,
RTLIB::TRUNC_PPCF128),
N, false);
GetPairElements(Call, Lo, Hi);
}
void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo,
SDValue &Hi) {
if (ISD::isNormalLoad(N)) {
ExpandRes_NormalLoad(N, Lo, Hi);
return;
}
assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
LoadSDNode *LD = cast<LoadSDNode>(N);
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
SDLoc dl(N);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), LD->getValueType(0));
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(LD->getMemoryVT().bitsLE(NVT) && "Float type not round?");
Hi = DAG.getExtLoad(LD->getExtensionType(), dl, NVT, Chain, Ptr,
LD->getMemoryVT(), LD->getMemOperand());
// Remember the chain.
Chain = Hi.getValue(1);
// The low part is zero.
Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
APInt(NVT.getSizeInBits(), 0)), dl, NVT);
// Modified the chain - switch anything that used the old chain to use the
// new one.
ReplaceValueWith(SDValue(LD, 1), Chain);
}
void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
SDValue &Hi) {
assert(N->getValueType(0) == MVT::ppcf128 && "Unsupported XINT_TO_FP!");
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Src = N->getOperand(0);
EVT SrcVT = Src.getValueType();
bool isSigned = N->getOpcode() == ISD::SINT_TO_FP;
SDLoc dl(N);
// First do an SINT_TO_FP, whether the original was signed or unsigned.
// When promoting partial word types to i32 we must honor the signedness,
// though.
if (SrcVT.bitsLE(MVT::i32)) {
// The integer can be represented exactly in an f64.
Src = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
MVT::i32, Src);
Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
APInt(NVT.getSizeInBits(), 0)), dl, NVT);
Hi = DAG.getNode(ISD::SINT_TO_FP, dl, NVT, Src);
} else {
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (SrcVT.bitsLE(MVT::i64)) {
Src = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
MVT::i64, Src);
LC = RTLIB::SINTTOFP_I64_PPCF128;
} else if (SrcVT.bitsLE(MVT::i128)) {
Src = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i128, Src);
LC = RTLIB::SINTTOFP_I128_PPCF128;
}
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported XINT_TO_FP!");
Hi = TLI.makeLibCall(DAG, LC, VT, &Src, 1, true, dl).first;
GetPairElements(Hi, Lo, Hi);
}
if (isSigned)
return;
// Unsigned - fix up the SINT_TO_FP value just calculated.
Hi = DAG.getNode(ISD::BUILD_PAIR, dl, VT, Lo, Hi);
SrcVT = Src.getValueType();
// x>=0 ? (ppcf128)(iN)x : (ppcf128)(iN)x + 2^N; N=32,64,128.
static const uint64_t TwoE32[] = { 0x41f0000000000000LL, 0 };
static const uint64_t TwoE64[] = { 0x43f0000000000000LL, 0 };
static const uint64_t TwoE128[] = { 0x47f0000000000000LL, 0 };
ArrayRef<uint64_t> Parts;
switch (SrcVT.getSimpleVT().SimpleTy) {
default:
llvm_unreachable("Unsupported UINT_TO_FP!");
case MVT::i32:
Parts = TwoE32;
break;
case MVT::i64:
Parts = TwoE64;
break;
case MVT::i128:
Parts = TwoE128;
break;
}
Lo = DAG.getNode(ISD::FADD, dl, VT, Hi,
DAG.getConstantFP(APFloat(APFloat::PPCDoubleDouble,
APInt(128, Parts)),
dl, MVT::ppcf128));
Lo = DAG.getSelectCC(dl, Src, DAG.getConstant(0, dl, SrcVT),
Lo, Hi, ISD::SETLT);
GetPairElements(Lo, Lo, Hi);
}
//===----------------------------------------------------------------------===//
// Float Operand Expansion
//===----------------------------------------------------------------------===//
/// ExpandFloatOperand - This method is called when the specified operand of the
/// specified node is found to need expansion. At this point, all of the result
/// types of the node are known to be legal, but other operands of the node may
/// need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Expand float operand: "; N->dump(&DAG); dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
return false;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "ExpandFloatOperand Op #" << OpNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to expand this operator's operand!");
case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
case ISD::BR_CC: Res = ExpandFloatOp_BR_CC(N); break;
case ISD::FCOPYSIGN: Res = ExpandFloatOp_FCOPYSIGN(N); break;
case ISD::FP_ROUND: Res = ExpandFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT: Res = ExpandFloatOp_FP_TO_SINT(N); break;
case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_UINT(N); break;
case ISD::SELECT_CC: Res = ExpandFloatOp_SELECT_CC(N); break;
case ISD::SETCC: Res = ExpandFloatOp_SETCC(N); break;
case ISD::STORE: Res = ExpandFloatOp_STORE(cast<StoreSDNode>(N),
OpNo); break;
}
// If the result is null, the sub-method took care of registering results etc.
if (!Res.getNode()) return false;
// If the result is N, the sub-method updated N in place. Tell the legalizer
// core about this.
if (Res.getNode() == N)
return true;
assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
return false;
}
/// FloatExpandSetCCOperands - Expand the operands of a comparison. This code
/// is shared among BR_CC, SELECT_CC, and SETCC handlers.
void DAGTypeLegalizer::FloatExpandSetCCOperands(SDValue &NewLHS,
SDValue &NewRHS,
ISD::CondCode &CCCode,
SDLoc dl) {
SDValue LHSLo, LHSHi, RHSLo, RHSHi;
GetExpandedFloat(NewLHS, LHSLo, LHSHi);
GetExpandedFloat(NewRHS, RHSLo, RHSHi);
assert(NewLHS.getValueType() == MVT::ppcf128 && "Unsupported setcc type!");
// FIXME: This generated code sucks. We want to generate
// FCMPU crN, hi1, hi2
// BNE crN, L:
// FCMPU crN, lo1, lo2
// The following can be improved, but not that much.
SDValue Tmp1, Tmp2, Tmp3;
Tmp1 = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, ISD::SETOEQ);
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(LHSLo.getValueType()),
LHSLo, RHSLo, CCCode);
Tmp3 = DAG.getNode(ISD::AND, dl, Tmp1.getValueType(), Tmp1, Tmp2);
Tmp1 = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, ISD::SETUNE);
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, CCCode);
Tmp1 = DAG.getNode(ISD::AND, dl, Tmp1.getValueType(), Tmp1, Tmp2);
NewLHS = DAG.getNode(ISD::OR, dl, Tmp1.getValueType(), Tmp1, Tmp3);
NewRHS = SDValue(); // LHS is the result, not a compare.
}
SDValue DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, SDLoc(N), NewLHS.getValueType());
CCCode = ISD::SETNE;
}
// Update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FCOPYSIGN(SDNode *N) {
assert(N->getOperand(1).getValueType() == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
SDValue Lo, Hi;
GetExpandedFloat(N->getOperand(1), Lo, Hi);
// The ppcf128 value is providing only the sign; take it from the
// higher-order double (which must have the larger magnitude).
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N),
N->getValueType(0), N->getOperand(0), Hi);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
SDValue Lo, Hi;
GetExpandedFloat(N->getOperand(0), Lo, Hi);
// Round it the rest of the way (e.g. to f32) if needed.
return DAG.getNode(ISD::FP_ROUND, SDLoc(N),
N->getValueType(0), Hi, N->getOperand(1));
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) {
EVT RVT = N->getValueType(0);
SDLoc dl(N);
// Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
// PPC (the libcall is not available). FIXME: Do this in a less hacky way.
if (RVT == MVT::i32) {
assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
SDValue Res = DAG.getNode(ISD::FP_ROUND_INREG, dl, MVT::ppcf128,
N->getOperand(0), DAG.getValueType(MVT::f64));
Res = DAG.getNode(ISD::FP_ROUND, dl, MVT::f64, Res,
DAG.getIntPtrConstant(1, dl));
return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
}
RTLIB::Libcall LC = RTLIB::getFPTOSINT(N->getOperand(0).getValueType(), RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_SINT!");
return TLI.makeLibCall(DAG, LC, RVT, &N->getOperand(0), 1, false, dl).first;
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) {
EVT RVT = N->getValueType(0);
SDLoc dl(N);
// Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
// PPC (the libcall is not available). FIXME: Do this in a less hacky way.
if (RVT == MVT::i32) {
assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
APFloat APF = APFloat(APFloat::PPCDoubleDouble, APInt(128, TwoE31));
SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
// X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
// FIXME: generated code sucks.
return DAG.getSelectCC(dl, N->getOperand(0), Tmp,
DAG.getNode(ISD::ADD, dl, MVT::i32,
DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
DAG.getNode(ISD::FSUB, dl,
MVT::ppcf128,
N->getOperand(0),
Tmp)),
DAG.getConstant(0x80000000, dl,
MVT::i32)),
DAG.getNode(ISD::FP_TO_SINT, dl,
MVT::i32, N->getOperand(0)),
ISD::SETGE);
}
RTLIB::Libcall LC = RTLIB::getFPTOUINT(N->getOperand(0).getValueType(), RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_UINT!");
return TLI.makeLibCall(DAG, LC, N->getValueType(0), &N->getOperand(0), 1,
false, dl).first;
}
SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, SDLoc(N), NewLHS.getValueType());
CCCode = ISD::SETNE;
}
// Update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, use it.
if (!NewRHS.getNode()) {
assert(NewLHS.getValueType() == N->getValueType(0) &&
"Unexpected setcc expansion!");
return NewLHS;
}
// Otherwise, update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
if (ISD::isNormalStore(N))
return ExpandOp_NormalStore(N, OpNo);
assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
assert(OpNo == 1 && "Can only expand the stored value so far");
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(),
ST->getValue().getValueType());
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(ST->getMemoryVT().bitsLE(NVT) && "Float type not round?");
(void)NVT;
SDValue Lo, Hi;
GetExpandedOp(ST->getValue(), Lo, Hi);
return DAG.getTruncStore(Chain, SDLoc(N), Hi, Ptr,
ST->getMemoryVT(), ST->getMemOperand());
}
//===----------------------------------------------------------------------===//
// Float Operand Promotion
//===----------------------------------------------------------------------===//
//
static ISD::NodeType GetPromotionOpcode(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f16) {
return ISD::FP16_TO_FP;
} else if (RetVT == MVT::f16) {
return ISD::FP_TO_FP16;
}
report_fatal_error("Attempt at an invalid promotion-related conversion");
}
bool DAGTypeLegalizer::PromoteFloatOperand(SDNode *N, unsigned OpNo) {
SDValue R = SDValue();
// Nodes that use a promotion-requiring floating point operand, but doesn't
// produce a promotion-requiring floating point result, need to be legalized
// to use the promoted float operand. Nodes that produce at least one
// promotion-requiring floating point result have their operands legalized as
// a part of PromoteFloatResult.
switch (N->getOpcode()) {
default:
llvm_unreachable("Do not know how to promote this operator's operand!");
case ISD::BITCAST: R = PromoteFloatOp_BITCAST(N, OpNo); break;
case ISD::FCOPYSIGN: R = PromoteFloatOp_FCOPYSIGN(N, OpNo); break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: R = PromoteFloatOp_FP_TO_XINT(N, OpNo); break;
case ISD::FP_EXTEND: R = PromoteFloatOp_FP_EXTEND(N, OpNo); break;
case ISD::SELECT_CC: R = PromoteFloatOp_SELECT_CC(N, OpNo); break;
case ISD::SETCC: R = PromoteFloatOp_SETCC(N, OpNo); break;
case ISD::STORE: R = PromoteFloatOp_STORE(N, OpNo); break;
}
if (R.getNode())
ReplaceValueWith(SDValue(N, 0), R);
return false;
}
SDValue DAGTypeLegalizer::PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo) {
SDValue Op = N->getOperand(0);
EVT OpVT = Op->getValueType(0);
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
assert (IVT == N->getValueType(0) && "Bitcast to type of different size");
SDValue Promoted = GetPromotedFloat(N->getOperand(0));
EVT PromotedVT = Promoted->getValueType(0);
// Convert the promoted float value to the desired IVT.
return DAG.getNode(GetPromotionOpcode(PromotedVT, OpVT), SDLoc(N), IVT,
Promoted);
}
// Promote Operand 1 of FCOPYSIGN. Operand 0 ought to be handled by
// PromoteFloatRes_FCOPYSIGN.
SDValue DAGTypeLegalizer::PromoteFloatOp_FCOPYSIGN(SDNode *N, unsigned OpNo) {
assert (OpNo == 1 && "Only Operand 1 must need promotion here");
SDValue Op1 = GetPromotedFloat(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
N->getOperand(0), Op1);
}
// Convert the promoted float value to the desired integer type
SDValue DAGTypeLegalizer::PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo) {
SDValue Op = GetPromotedFloat(N->getOperand(0));
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), Op);
}
SDValue DAGTypeLegalizer::PromoteFloatOp_FP_EXTEND(SDNode *N, unsigned OpNo) {
SDValue Op = GetPromotedFloat(N->getOperand(0));
EVT VT = N->getValueType(0);
// Desired VT is same as promoted type. Use promoted float directly.
if (VT == Op->getValueType(0))
return Op;
// Else, extend the promoted float value to the desired VT.
return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, Op);
}
// Promote the float operands used for comparison. The true- and false-
// operands have the same type as the result and are promoted, if needed, by
// PromoteFloatRes_SELECT_CC
SDValue DAGTypeLegalizer::PromoteFloatOp_SELECT_CC(SDNode *N, unsigned OpNo) {
SDValue LHS = GetPromotedFloat(N->getOperand(0));
SDValue RHS = GetPromotedFloat(N->getOperand(1));
return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0),
LHS, RHS, N->getOperand(2), N->getOperand(3),
N->getOperand(4));
}
// Construct a SETCC that compares the promoted values and sets the conditional
// code.
SDValue DAGTypeLegalizer::PromoteFloatOp_SETCC(SDNode *N, unsigned OpNo) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Op0 = GetPromotedFloat(N->getOperand(0));
SDValue Op1 = GetPromotedFloat(N->getOperand(1));
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
return DAG.getSetCC(SDLoc(N), NVT, Op0, Op1, CCCode);
}
// Lower the promoted Float down to the integer value of same size and construct
// a STORE of the integer value.
SDValue DAGTypeLegalizer::PromoteFloatOp_STORE(SDNode *N, unsigned OpNo) {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Val = ST->getValue();
SDLoc DL(N);
SDValue Promoted = GetPromotedFloat(Val);
EVT VT = ST->getOperand(1)->getValueType(0);
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
SDValue NewVal;
NewVal = DAG.getNode(GetPromotionOpcode(Promoted.getValueType(), VT), DL,
IVT, Promoted);
return DAG.getStore(ST->getChain(), DL, NewVal, ST->getBasePtr(),
ST->getMemOperand());
}
//===----------------------------------------------------------------------===//
// Float Result Promotion
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) {
SDValue R = SDValue();
switch (N->getOpcode()) {
// These opcodes cannot appear if promotion of FP16 is done in the backend
// instead of Clang
case ISD::FP16_TO_FP:
case ISD::FP_TO_FP16:
default:
llvm_unreachable("Do not know how to promote this operator's result!");
case ISD::BITCAST: R = PromoteFloatRes_BITCAST(N); break;
case ISD::ConstantFP: R = PromoteFloatRes_ConstantFP(N); break;
case ISD::EXTRACT_VECTOR_ELT:
R = PromoteFloatRes_EXTRACT_VECTOR_ELT(N); break;
case ISD::FCOPYSIGN: R = PromoteFloatRes_FCOPYSIGN(N); break;
// Unary FP Operations
case ISD::FABS:
case ISD::FCEIL:
case ISD::FCOS:
case ISD::FEXP:
case ISD::FEXP2:
case ISD::FFLOOR:
case ISD::FLOG:
case ISD::FLOG2:
case ISD::FLOG10:
case ISD::FNEARBYINT:
case ISD::FNEG:
case ISD::FRINT:
case ISD::FROUND:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC: R = PromoteFloatRes_UnaryOp(N); break;
// Binary FP Operations
case ISD::FADD:
case ISD::FDIV:
case ISD::FMAXNUM:
case ISD::FMINNUM:
case ISD::FMUL:
case ISD::FPOW:
case ISD::FREM:
case ISD::FSUB: R = PromoteFloatRes_BinOp(N); break;
case ISD::FMA: // FMA is same as FMAD
case ISD::FMAD: R = PromoteFloatRes_FMAD(N); break;
case ISD::FPOWI: R = PromoteFloatRes_FPOWI(N); break;
case ISD::FP_ROUND: R = PromoteFloatRes_FP_ROUND(N); break;
case ISD::LOAD: R = PromoteFloatRes_LOAD(N); break;
case ISD::SELECT: R = PromoteFloatRes_SELECT(N); break;
case ISD::SELECT_CC: R = PromoteFloatRes_SELECT_CC(N); break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP: R = PromoteFloatRes_XINT_TO_FP(N); break;
case ISD::UNDEF: R = PromoteFloatRes_UNDEF(N); break;
}
if (R.getNode())
SetPromotedFloat(SDValue(N, ResNo), R);
}
// Bitcast from i16 to f16: convert the i16 to a f32 value instead.
// At this point, it is not possible to determine if the bitcast value is
// eventually stored to memory or promoted to f32 or promoted to a floating
// point at a higher precision. Some of these cases are handled by FP_EXTEND,
// STORE promotion handlers.
SDValue DAGTypeLegalizer::PromoteFloatRes_BITCAST(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT,
N->getOperand(0));
}
SDValue DAGTypeLegalizer::PromoteFloatRes_ConstantFP(SDNode *N) {
ConstantFPSDNode *CFPNode = cast<ConstantFPSDNode>(N);
EVT VT = N->getValueType(0);
SDLoc DL(N);
// Get the (bit-cast) APInt of the APFloat and build an integer constant
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
SDValue C = DAG.getConstant(CFPNode->getValueAPF().bitcastToAPInt(), DL,
IVT);
// Convert the Constant to the desired FP type
// FIXME We might be able to do the conversion during compilation and get rid
// of it from the object code
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
return DAG.getNode(GetPromotionOpcode(VT, NVT), DL, NVT, C);
}
// If the Index operand is a constant, try to redirect the extract operation to
// the correct legalized vector. If not, bit-convert the input vector to
// equivalent integer vector. Extract the element as an (bit-cast) integer
// value and convert it to the promoted type.
SDValue DAGTypeLegalizer::PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
SDLoc DL(N);
// If the index is constant, try to extract the value from the legalized
// vector type.
if (isa<ConstantSDNode>(N->getOperand(1))) {
SDValue Vec = N->getOperand(0);
SDValue Idx = N->getOperand(1);
EVT VecVT = Vec->getValueType(0);
EVT EltVT = VecVT.getVectorElementType();
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
switch (getTypeAction(VecVT)) {
default: break;
case TargetLowering::TypeScalarizeVector: {
SDValue Res = GetScalarizedVector(N->getOperand(0));
ReplaceValueWith(SDValue(N, 0), Res);
return SDValue();
}
case TargetLowering::TypeWidenVector: {
Vec = GetWidenedVector(Vec);
SDValue Res = DAG.getNode(N->getOpcode(), DL, EltVT, Vec, Idx);
ReplaceValueWith(SDValue(N, 0), Res);
return SDValue();
}
case TargetLowering::TypeSplitVector: {
SDValue Lo, Hi;
GetSplitVector(Vec, Lo, Hi);
uint64_t LoElts = Lo.getValueType().getVectorNumElements();
SDValue Res;
if (IdxVal < LoElts)
Res = DAG.getNode(N->getOpcode(), DL, EltVT, Lo, Idx);
else
Res = DAG.getNode(N->getOpcode(), DL, EltVT, Hi,
DAG.getConstant(IdxVal - LoElts, DL,
Idx.getValueType()));
ReplaceValueWith(SDValue(N, 0), Res);
return SDValue();
}
}
}
// Bit-convert the input vector to the equivalent integer vector
SDValue NewOp = BitConvertVectorToIntegerVector(N->getOperand(0));
EVT IVT = NewOp.getValueType().getVectorElementType();
// Extract the element as an (bit-cast) integer value
SDValue NewVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IVT,
NewOp, N->getOperand(1));
// Convert the element to the desired FP type
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT, NewVal);
}
// FCOPYSIGN(X, Y) returns the value of X with the sign of Y. If the result
// needs promotion, so does the argument X. Note that Y, if needed, will be
// handled during operand promotion.
SDValue DAGTypeLegalizer::PromoteFloatRes_FCOPYSIGN(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Op0 = GetPromotedFloat(N->getOperand(0));
SDValue Op1 = N->getOperand(1);
return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1);
}
// Unary operation where the result and the operand have PromoteFloat type
// action. Construct a new SDNode with the promoted float value of the old
// operand.
SDValue DAGTypeLegalizer::PromoteFloatRes_UnaryOp(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Op = GetPromotedFloat(N->getOperand(0));
return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op);
}
// Binary operations where the result and both operands have PromoteFloat type
// action. Construct a new SDNode with the promoted float values of the old
// operands.
SDValue DAGTypeLegalizer::PromoteFloatRes_BinOp(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Op0 = GetPromotedFloat(N->getOperand(0));
SDValue Op1 = GetPromotedFloat(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1);
}
SDValue DAGTypeLegalizer::PromoteFloatRes_FMAD(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Op0 = GetPromotedFloat(N->getOperand(0));
SDValue Op1 = GetPromotedFloat(N->getOperand(1));
SDValue Op2 = GetPromotedFloat(N->getOperand(2));
return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1, Op2);
}
// Promote the Float (first) operand and retain the Integer (second) operand
SDValue DAGTypeLegalizer::PromoteFloatRes_FPOWI(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Op0 = GetPromotedFloat(N->getOperand(0));
SDValue Op1 = N->getOperand(1);
return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, Op0, Op1);
}
// Explicit operation to reduce precision. Reduce the value to half precision
// and promote it back to the legal type.
SDValue DAGTypeLegalizer::PromoteFloatRes_FP_ROUND(SDNode *N) {
SDLoc DL(N);
SDValue Op = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT OpVT = Op->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
// Round promoted float to desired precision
SDValue Round = DAG.getNode(GetPromotionOpcode(OpVT, VT), DL, IVT, Op);
// Promote it back to the legal output type
return DAG.getNode(GetPromotionOpcode(VT, NVT), DL, NVT, Round);
}
SDValue DAGTypeLegalizer::PromoteFloatRes_LOAD(SDNode *N) {
LoadSDNode *L = cast<LoadSDNode>(N);
EVT VT = N->getValueType(0);
// Load the value as an integer value with the same number of bits
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
SDValue newL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
IVT, SDLoc(N), L->getChain(), L->getBasePtr(),
L->getOffset(), L->getPointerInfo(), IVT, L->isVolatile(),
L->isNonTemporal(), false, L->getAlignment(),
L->getAAInfo());
// Legalize the chain result by replacing uses of the old value chain with the
// new one
ReplaceValueWith(SDValue(N, 1), newL.getValue(1));
// Convert the integer value to the desired FP type
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT, newL);
}
// Construct a new SELECT node with the promoted true- and false- values.
SDValue DAGTypeLegalizer::PromoteFloatRes_SELECT(SDNode *N) {
SDValue TrueVal = GetPromotedFloat(N->getOperand(1));
SDValue FalseVal = GetPromotedFloat(N->getOperand(2));
return DAG.getNode(ISD::SELECT, SDLoc(N), TrueVal->getValueType(0),
N->getOperand(0), TrueVal, FalseVal);
}
// Construct a new SELECT_CC node with the promoted true- and false- values.
// The operands used for comparison are promoted by PromoteFloatOp_SELECT_CC.
SDValue DAGTypeLegalizer::PromoteFloatRes_SELECT_CC(SDNode *N) {
SDValue TrueVal = GetPromotedFloat(N->getOperand(2));
SDValue FalseVal = GetPromotedFloat(N->getOperand(3));
return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0),
N->getOperand(0), N->getOperand(1), TrueVal, FalseVal,
N->getOperand(4));
}
// Construct a SDNode that transforms the SINT or UINT operand to the promoted
// float type.
SDValue DAGTypeLegalizer::PromoteFloatRes_XINT_TO_FP(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
return DAG.getNode(N->getOpcode(), SDLoc(N), NVT, N->getOperand(0));
}
SDValue DAGTypeLegalizer::PromoteFloatRes_UNDEF(SDNode *N) {
return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(),
N->getValueType(0)));
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp | //===-- SelectionDAGPrinter.cpp - Implement SelectionDAG::viewGraph() -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the SelectionDAG::viewGraph method.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAG.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "dag-printer"
namespace llvm {
template<>
struct DOTGraphTraits<SelectionDAG*> : public DefaultDOTGraphTraits {
explicit DOTGraphTraits(bool isSimple=false) :
DefaultDOTGraphTraits(isSimple) {}
static bool hasEdgeDestLabels() {
return true;
}
static unsigned numEdgeDestLabels(const void *Node) {
return ((const SDNode *) Node)->getNumValues();
}
static std::string getEdgeDestLabel(const void *Node, unsigned i) {
return ((const SDNode *) Node)->getValueType(i).getEVTString();
}
template<typename EdgeIter>
static std::string getEdgeSourceLabel(const void *Node, EdgeIter I) {
return itostr(I - SDNodeIterator::begin((const SDNode *) Node));
}
/// edgeTargetsEdgeSource - This method returns true if this outgoing edge
/// should actually target another edge source, not a node. If this method
/// is implemented, getEdgeTarget should be implemented.
template<typename EdgeIter>
static bool edgeTargetsEdgeSource(const void *Node, EdgeIter I) {
return true;
}
/// getEdgeTarget - If edgeTargetsEdgeSource returns true, this method is
/// called to determine which outgoing edge of Node is the target of this
/// edge.
template<typename EdgeIter>
static EdgeIter getEdgeTarget(const void *Node, EdgeIter I) {
SDNode *TargetNode = *I;
SDNodeIterator NI = SDNodeIterator::begin(TargetNode);
std::advance(NI, I.getNode()->getOperand(I.getOperand()).getResNo());
return NI;
}
static std::string getGraphName(const SelectionDAG *G) {
return G->getMachineFunction().getName();
}
static bool renderGraphFromBottomUp() {
return true;
}
static bool hasNodeAddressLabel(const SDNode *Node,
const SelectionDAG *Graph) {
return true;
}
/// If you want to override the dot attributes printed for a particular
/// edge, override this method.
template<typename EdgeIter>
static std::string getEdgeAttributes(const void *Node, EdgeIter EI,
const SelectionDAG *Graph) {
SDValue Op = EI.getNode()->getOperand(EI.getOperand());
EVT VT = Op.getValueType();
if (VT == MVT::Glue)
return "color=red,style=bold";
else if (VT == MVT::Other)
return "color=blue,style=dashed";
return "";
}
static std::string getSimpleNodeLabel(const SDNode *Node,
const SelectionDAG *G) {
std::string Result = Node->getOperationName(G);
{
raw_string_ostream OS(Result);
Node->print_details(OS, G);
}
return Result;
}
std::string getNodeLabel(const SDNode *Node, const SelectionDAG *Graph);
static std::string getNodeAttributes(const SDNode *N,
const SelectionDAG *Graph) {
#ifndef NDEBUG
const std::string &Attrs = Graph->getGraphAttrs(N);
if (!Attrs.empty()) {
if (Attrs.find("shape=") == std::string::npos)
return std::string("shape=Mrecord,") + Attrs;
else
return Attrs;
}
#endif
return "shape=Mrecord";
}
static void addCustomGraphFeatures(SelectionDAG *G,
GraphWriter<SelectionDAG*> &GW) {
GW.emitSimpleNode(nullptr, "plaintext=circle", "GraphRoot");
if (G->getRoot().getNode())
GW.emitEdge(nullptr, -1, G->getRoot().getNode(), G->getRoot().getResNo(),
"color=blue,style=dashed");
}
};
}
std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node,
const SelectionDAG *G) {
return DOTGraphTraits<SelectionDAG*>::getSimpleNodeLabel(Node, G);
}
/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
/// rendered using 'dot'.
///
void SelectionDAG::viewGraph(const std::string &Title) {
// This code is only for debugging!
#ifndef NDEBUG
ViewGraph(this, "dag." + getMachineFunction().getName(),
false, Title);
#else
errs() << "SelectionDAG::viewGraph is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
// This overload is defined out-of-line here instead of just using a
// default parameter because this is easiest for gdb to call.
void SelectionDAG::viewGraph() {
viewGraph("");
}
/// clearGraphAttrs - Clear all previously defined node graph attributes.
/// Intended to be used from a debugging tool (eg. gdb).
void SelectionDAG::clearGraphAttrs() {
#ifndef NDEBUG
NodeGraphAttrs.clear();
#else
errs() << "SelectionDAG::clearGraphAttrs is only available in debug builds"
<< " on systems with Graphviz or gv!\n";
#endif
}
/// setGraphAttrs - Set graph attributes for a node. (eg. "color=red".)
///
void SelectionDAG::setGraphAttrs(const SDNode *N, const char *Attrs) {
#ifndef NDEBUG
NodeGraphAttrs[N] = Attrs;
#else
errs() << "SelectionDAG::setGraphAttrs is only available in debug builds"
<< " on systems with Graphviz or gv!\n";
#endif
}
/// getGraphAttrs - Get graph attributes for a node. (eg. "color=red".)
/// Used from getNodeAttributes.
const std::string SelectionDAG::getGraphAttrs(const SDNode *N) const {
#ifndef NDEBUG
std::map<const SDNode *, std::string>::const_iterator I =
NodeGraphAttrs.find(N);
if (I != NodeGraphAttrs.end())
return I->second;
else
return "";
#else
errs() << "SelectionDAG::getGraphAttrs is only available in debug builds"
<< " on systems with Graphviz or gv!\n";
return std::string();
#endif
}
/// setGraphColor - Convenience for setting node color attribute.
///
void SelectionDAG::setGraphColor(const SDNode *N, const char *Color) {
#ifndef NDEBUG
NodeGraphAttrs[N] = std::string("color=") + Color;
#else
errs() << "SelectionDAG::setGraphColor is only available in debug builds"
<< " on systems with Graphviz or gv!\n";
#endif
}
/// setSubgraphColorHelper - Implement setSubgraphColor. Return
/// whether we truncated the search.
///
bool SelectionDAG::setSubgraphColorHelper(SDNode *N, const char *Color, DenseSet<SDNode *> &visited,
int level, bool &printed) {
bool hit_limit = false;
#ifndef NDEBUG
if (level >= 20) {
if (!printed) {
printed = true;
DEBUG(dbgs() << "setSubgraphColor hit max level\n");
}
return true;
}
unsigned oldSize = visited.size();
visited.insert(N);
if (visited.size() != oldSize) {
setGraphColor(N, Color);
for(SDNodeIterator i = SDNodeIterator::begin(N), iend = SDNodeIterator::end(N);
i != iend;
++i) {
hit_limit = setSubgraphColorHelper(*i, Color, visited, level+1, printed) || hit_limit;
}
}
#else
errs() << "SelectionDAG::setSubgraphColor is only available in debug builds"
<< " on systems with Graphviz or gv!\n";
#endif
return hit_limit;
}
/// setSubgraphColor - Convenience for setting subgraph color attribute.
///
void SelectionDAG::setSubgraphColor(SDNode *N, const char *Color) {
#ifndef NDEBUG
DenseSet<SDNode *> visited;
bool printed = false;
if (setSubgraphColorHelper(N, Color, visited, 0, printed)) {
// Visually mark that we hit the limit
if (strcmp(Color, "red") == 0) {
setSubgraphColorHelper(N, "blue", visited, 0, printed);
} else if (strcmp(Color, "yellow") == 0) {
setSubgraphColorHelper(N, "green", visited, 0, printed);
}
}
#else
errs() << "SelectionDAG::setSubgraphColor is only available in debug builds"
<< " on systems with Graphviz or gv!\n";
#endif
}
std::string ScheduleDAGSDNodes::getGraphNodeLabel(const SUnit *SU) const {
std::string s;
raw_string_ostream O(s);
O << "SU(" << SU->NodeNum << "): ";
if (SU->getNode()) {
SmallVector<SDNode *, 4> GluedNodes;
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
GluedNodes.push_back(N);
while (!GluedNodes.empty()) {
O << DOTGraphTraits<SelectionDAG*>
::getSimpleNodeLabel(GluedNodes.back(), DAG);
GluedNodes.pop_back();
if (!GluedNodes.empty())
O << "\n ";
}
} else {
O << "CROSS RC COPY";
}
return O.str();
}
void ScheduleDAGSDNodes::getCustomGraphFeatures(GraphWriter<ScheduleDAG*> &GW) const {
if (DAG) {
// Draw a special "GraphRoot" node to indicate the root of the graph.
GW.emitSimpleNode(nullptr, "plaintext=circle", "GraphRoot");
const SDNode *N = DAG->getRoot().getNode();
if (N && N->getNodeId() != -1)
GW.emitEdge(nullptr, -1, &SUnits[N->getNodeId()], -1,
"color=blue,style=dashed");
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/StatepointLowering.cpp | //===-- StatepointLowering.cpp - SDAGBuilder's statepoint code -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file includes support code use by SelectionDAGBuilder when lowering a
// statepoint sequence in SelectionDAG IR.
//
//===----------------------------------------------------------------------===//
#include "StatepointLowering.h"
#include "SelectionDAGBuilder.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/Target/TargetLowering.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "statepoint-lowering"
STATISTIC(NumSlotsAllocatedForStatepoints,
"Number of stack slots allocated for statepoints");
STATISTIC(NumOfStatepoints, "Number of statepoint nodes encountered");
STATISTIC(StatepointMaxSlotsRequired,
"Maximum number of stack slots required for a singe statepoint");
static void pushStackMapConstant(SmallVectorImpl<SDValue>& Ops,
SelectionDAGBuilder &Builder, uint64_t Value) {
SDLoc L = Builder.getCurSDLoc();
Ops.push_back(Builder.DAG.getTargetConstant(StackMaps::ConstantOp, L,
MVT::i64));
Ops.push_back(Builder.DAG.getTargetConstant(Value, L, MVT::i64));
}
void StatepointLoweringState::startNewStatepoint(SelectionDAGBuilder &Builder) {
// Consistency check
assert(PendingGCRelocateCalls.empty() &&
"Trying to visit statepoint before finished processing previous one");
Locations.clear();
NextSlotToAllocate = 0;
// Need to resize this on each safepoint - we need the two to stay in
// sync and the clear patterns of a SelectionDAGBuilder have no relation
// to FunctionLoweringInfo.
AllocatedStackSlots.resize(Builder.FuncInfo.StatepointStackSlots.size());
for (size_t i = 0; i < AllocatedStackSlots.size(); i++) {
AllocatedStackSlots[i] = false;
}
}
void StatepointLoweringState::clear() {
Locations.clear();
AllocatedStackSlots.clear();
assert(PendingGCRelocateCalls.empty() &&
"cleared before statepoint sequence completed");
}
SDValue
StatepointLoweringState::allocateStackSlot(EVT ValueType,
SelectionDAGBuilder &Builder) {
NumSlotsAllocatedForStatepoints++;
// The basic scheme here is to first look for a previously created stack slot
// which is not in use (accounting for the fact arbitrary slots may already
// be reserved), or to create a new stack slot and use it.
// If this doesn't succeed in 40000 iterations, something is seriously wrong
for (int i = 0; i < 40000; i++) {
assert(Builder.FuncInfo.StatepointStackSlots.size() ==
AllocatedStackSlots.size() &&
"broken invariant");
const size_t NumSlots = AllocatedStackSlots.size();
assert(NextSlotToAllocate <= NumSlots && "broken invariant");
if (NextSlotToAllocate >= NumSlots) {
assert(NextSlotToAllocate == NumSlots);
// record stats
if (NumSlots + 1 > StatepointMaxSlotsRequired) {
StatepointMaxSlotsRequired = NumSlots + 1;
}
SDValue SpillSlot = Builder.DAG.CreateStackTemporary(ValueType);
const unsigned FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
Builder.FuncInfo.StatepointStackSlots.push_back(FI);
AllocatedStackSlots.push_back(true);
return SpillSlot;
}
if (!AllocatedStackSlots[NextSlotToAllocate]) {
const int FI = Builder.FuncInfo.StatepointStackSlots[NextSlotToAllocate];
AllocatedStackSlots[NextSlotToAllocate] = true;
return Builder.DAG.getFrameIndex(FI, ValueType);
}
// Note: We deliberately choose to advance this only on the failing path.
// Doing so on the suceeding path involes a bit of complexity that caused a
// minor bug previously. Unless performance shows this matters, please
// keep this code as simple as possible.
NextSlotToAllocate++;
}
llvm_unreachable("infinite loop?");
}
/// Utility function for reservePreviousStackSlotForValue. Tries to find
/// stack slot index to which we have spilled value for previous statepoints.
/// LookUpDepth specifies maximum DFS depth this function is allowed to look.
static Optional<int> findPreviousSpillSlot(const Value *Val,
SelectionDAGBuilder &Builder,
int LookUpDepth) {
// Can not look any futher - give up now
if (LookUpDepth <= 0)
return Optional<int>();
// Spill location is known for gc relocates
if (isGCRelocate(Val)) {
GCRelocateOperands RelocOps(cast<Instruction>(Val));
FunctionLoweringInfo::StatepointSpilledValueMapTy &SpillMap =
Builder.FuncInfo.StatepointRelocatedValues[RelocOps.getStatepoint()];
auto It = SpillMap.find(RelocOps.getDerivedPtr());
if (It == SpillMap.end())
return Optional<int>();
return It->second;
}
// Look through bitcast instructions.
if (const BitCastInst *Cast = dyn_cast<BitCastInst>(Val)) {
return findPreviousSpillSlot(Cast->getOperand(0), Builder, LookUpDepth - 1);
}
// Look through phi nodes
// All incoming values should have same known stack slot, otherwise result
// is unknown.
if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
Optional<int> MergedResult = None;
for (auto &IncomingValue : Phi->incoming_values()) {
Optional<int> SpillSlot =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
if (!SpillSlot.hasValue())
return Optional<int>();
if (MergedResult.hasValue() && *MergedResult != *SpillSlot)
return Optional<int>();
MergedResult = SpillSlot;
}
return MergedResult;
}
// TODO: We can do better for PHI nodes. In cases like this:
// ptr = phi(relocated_pointer, not_relocated_pointer)
// statepoint(ptr)
// We will return that stack slot for ptr is unknown. And later we might
// assign different stack slots for ptr and relocated_pointer. This limits
// llvm's ability to remove redundant stores.
// Unfortunately it's hard to accomplish in current infrastructure.
// We use this function to eliminate spill store completely, while
// in example we still need to emit store, but instead of any location
// we need to use special "preferred" location.
// TODO: handle simple updates. If a value is modified and the original
// value is no longer live, it would be nice to put the modified value in the
// same slot. This allows folding of the memory accesses for some
// instructions types (like an increment).
// statepoint (i)
// i1 = i+1
// statepoint (i1)
// However we need to be careful for cases like this:
// statepoint(i)
// i1 = i+1
// statepoint(i, i1)
// Here we want to reserve spill slot for 'i', but not for 'i+1'. If we just
// put handling of simple modifications in this function like it's done
// for bitcasts we might end up reserving i's slot for 'i+1' because order in
// which we visit values is unspecified.
// Don't know any information about this instruction
return Optional<int>();
}
/// Try to find existing copies of the incoming values in stack slots used for
/// statepoint spilling. If we can find a spill slot for the incoming value,
/// mark that slot as allocated, and reuse the same slot for this safepoint.
/// This helps to avoid series of loads and stores that only serve to resuffle
/// values on the stack between calls.
static void reservePreviousStackSlotForValue(const Value *IncomingValue,
SelectionDAGBuilder &Builder) {
SDValue Incoming = Builder.getValue(IncomingValue);
if (isa<ConstantSDNode>(Incoming) || isa<FrameIndexSDNode>(Incoming)) {
// We won't need to spill this, so no need to check for previously
// allocated stack slots
return;
}
SDValue OldLocation = Builder.StatepointLowering.getLocation(Incoming);
if (OldLocation.getNode())
// duplicates in input
return;
const int LookUpDepth = 6;
Optional<int> Index =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth);
if (!Index.hasValue())
return;
auto Itr = std::find(Builder.FuncInfo.StatepointStackSlots.begin(),
Builder.FuncInfo.StatepointStackSlots.end(), *Index);
assert(Itr != Builder.FuncInfo.StatepointStackSlots.end() &&
"value spilled to the unknown stack slot");
// This is one of our dedicated lowering slots
const int Offset =
std::distance(Builder.FuncInfo.StatepointStackSlots.begin(), Itr);
if (Builder.StatepointLowering.isStackSlotAllocated(Offset)) {
// stack slot already assigned to someone else, can't use it!
// TODO: currently we reserve space for gc arguments after doing
// normal allocation for deopt arguments. We should reserve for
// _all_ deopt and gc arguments, then start allocating. This
// will prevent some moves being inserted when vm state changes,
// but gc state doesn't between two calls.
return;
}
// Reserve this stack slot
Builder.StatepointLowering.reserveStackSlot(Offset);
// Cache this slot so we find it when going through the normal
// assignment loop.
SDValue Loc = Builder.DAG.getTargetFrameIndex(*Index, Incoming.getValueType());
Builder.StatepointLowering.setLocation(Incoming, Loc);
}
/// Remove any duplicate (as SDValues) from the derived pointer pairs. This
/// is not required for correctness. It's purpose is to reduce the size of
/// StackMap section. It has no effect on the number of spill slots required
/// or the actual lowering.
static void removeDuplicatesGCPtrs(SmallVectorImpl<const Value *> &Bases,
SmallVectorImpl<const Value *> &Ptrs,
SmallVectorImpl<const Value *> &Relocs,
SelectionDAGBuilder &Builder) {
// This is horribly ineffecient, but I don't care right now
SmallSet<SDValue, 64> Seen;
SmallVector<const Value *, 64> NewBases, NewPtrs, NewRelocs;
for (size_t i = 0; i < Ptrs.size(); i++) {
SDValue SD = Builder.getValue(Ptrs[i]);
// Only add non-duplicates
if (Seen.count(SD) == 0) {
NewBases.push_back(Bases[i]);
NewPtrs.push_back(Ptrs[i]);
NewRelocs.push_back(Relocs[i]);
}
Seen.insert(SD);
}
assert(Bases.size() >= NewBases.size());
assert(Ptrs.size() >= NewPtrs.size());
assert(Relocs.size() >= NewRelocs.size());
Bases = NewBases;
Ptrs = NewPtrs;
Relocs = NewRelocs;
assert(Ptrs.size() == Bases.size());
assert(Ptrs.size() == Relocs.size());
}
/// Extract call from statepoint, lower it and return pointer to the
/// call node. Also update NodeMap so that getValue(statepoint) will
/// reference lowered call result
static SDNode *
lowerCallFromStatepoint(ImmutableStatepoint ISP, MachineBasicBlock *LandingPad,
SelectionDAGBuilder &Builder,
SmallVectorImpl<SDValue> &PendingExports) {
ImmutableCallSite CS(ISP.getCallSite());
SDValue ActualCallee = Builder.getValue(ISP.getCalledValue());
assert(CS.getCallingConv() != CallingConv::AnyReg &&
"anyregcc is not supported on statepoints!");
Type *DefTy = ISP.getActualReturnType();
bool HasDef = !DefTy->isVoidTy();
SDValue ReturnValue, CallEndVal;
std::tie(ReturnValue, CallEndVal) = Builder.lowerCallOperands(
ISP.getCallSite(), ImmutableStatepoint::CallArgsBeginPos,
ISP.getNumCallArgs(), ActualCallee, DefTy, LandingPad,
false /* IsPatchPoint */);
SDNode *CallEnd = CallEndVal.getNode();
// Get a call instruction from the call sequence chain. Tail calls are not
// allowed. The following code is essentially reverse engineering X86's
// LowerCallTo.
//
// We are expecting DAG to have the following form:
//
// ch = eh_label (only in case of invoke statepoint)
// ch, glue = callseq_start ch
// ch, glue = X86::Call ch, glue
// ch, glue = callseq_end ch, glue
// get_return_value ch, glue
//
// get_return_value can either be a CopyFromReg to grab the return value from
// %RAX, or it can be a LOAD to load a value returned by reference via a stack
// slot.
if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg ||
CallEnd->getOpcode() == ISD::LOAD))
CallEnd = CallEnd->getOperand(0).getNode();
assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && "expected!");
if (HasDef) {
if (CS.isInvoke()) {
// Result value will be used in different basic block for invokes
// so we need to export it now. But statepoint call has a different type
// than the actuall call. It means that standart exporting mechanism will
// create register of the wrong type. So instead we need to create
// register with correct type and save value into it manually.
// TODO: To eliminate this problem we can remove gc.result intrinsics
// completelly and make statepoint call to return a tuple.
unsigned Reg = Builder.FuncInfo.CreateRegs(ISP.getActualReturnType());
RegsForValue RFV(
*Builder.DAG.getContext(), Builder.DAG.getTargetLoweringInfo(),
Builder.DAG.getDataLayout(), Reg, ISP.getActualReturnType());
SDValue Chain = Builder.DAG.getEntryNode();
RFV.getCopyToRegs(ReturnValue, Builder.DAG, Builder.getCurSDLoc(), Chain,
nullptr);
PendingExports.push_back(Chain);
Builder.FuncInfo.ValueMap[CS.getInstruction()] = Reg;
} else {
// The value of the statepoint itself will be the value of call itself.
// We'll replace the actually call node shortly. gc_result will grab
// this value.
Builder.setValue(CS.getInstruction(), ReturnValue);
}
} else {
// The token value is never used from here on, just generate a poison value
Builder.setValue(CS.getInstruction(),
Builder.DAG.getIntPtrConstant(-1, Builder.getCurSDLoc()));
}
return CallEnd->getOperand(0).getNode();
}
/// Callect all gc pointers coming into statepoint intrinsic, clean them up,
/// and return two arrays:
/// Bases - base pointers incoming to this statepoint
/// Ptrs - derived pointers incoming to this statepoint
/// Relocs - the gc_relocate corresponding to each base/ptr pair
/// Elements of this arrays should be in one-to-one correspondence with each
/// other i.e Bases[i], Ptrs[i] are from the same gcrelocate call
static void getIncomingStatepointGCValues(
SmallVectorImpl<const Value *> &Bases, SmallVectorImpl<const Value *> &Ptrs,
SmallVectorImpl<const Value *> &Relocs, ImmutableStatepoint StatepointSite,
SelectionDAGBuilder &Builder) {
for (GCRelocateOperands relocateOpers : StatepointSite.getRelocates()) {
Relocs.push_back(relocateOpers.getUnderlyingCallSite().getInstruction());
Bases.push_back(relocateOpers.getBasePtr());
Ptrs.push_back(relocateOpers.getDerivedPtr());
}
// Remove any redundant llvm::Values which map to the same SDValue as another
// input. Also has the effect of removing duplicates in the original
// llvm::Value input list as well. This is a useful optimization for
// reducing the size of the StackMap section. It has no other impact.
removeDuplicatesGCPtrs(Bases, Ptrs, Relocs, Builder);
assert(Bases.size() == Ptrs.size() && Ptrs.size() == Relocs.size());
}
/// Spill a value incoming to the statepoint. It might be either part of
/// vmstate
/// or gcstate. In both cases unconditionally spill it on the stack unless it
/// is a null constant. Return pair with first element being frame index
/// containing saved value and second element with outgoing chain from the
/// emitted store
static std::pair<SDValue, SDValue>
spillIncomingStatepointValue(SDValue Incoming, SDValue Chain,
SelectionDAGBuilder &Builder) {
SDValue Loc = Builder.StatepointLowering.getLocation(Incoming);
// Emit new store if we didn't do it for this ptr before
if (!Loc.getNode()) {
Loc = Builder.StatepointLowering.allocateStackSlot(Incoming.getValueType(),
Builder);
assert(isa<FrameIndexSDNode>(Loc));
int Index = cast<FrameIndexSDNode>(Loc)->getIndex();
// We use TargetFrameIndex so that isel will not select it into LEA
Loc = Builder.DAG.getTargetFrameIndex(Index, Incoming.getValueType());
// TODO: We can create TokenFactor node instead of
// chaining stores one after another, this may allow
// a bit more optimal scheduling for them
Chain = Builder.DAG.getStore(Chain, Builder.getCurSDLoc(), Incoming, Loc,
MachinePointerInfo::getFixedStack(Index),
false, false, 0);
Builder.StatepointLowering.setLocation(Incoming, Loc);
}
assert(Loc.getNode());
return std::make_pair(Loc, Chain);
}
/// Lower a single value incoming to a statepoint node. This value can be
/// either a deopt value or a gc value, the handling is the same. We special
/// case constants and allocas, then fall back to spilling if required.
static void lowerIncomingStatepointValue(SDValue Incoming,
SmallVectorImpl<SDValue> &Ops,
SelectionDAGBuilder &Builder) {
SDValue Chain = Builder.getRoot();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Incoming)) {
// If the original value was a constant, make sure it gets recorded as
// such in the stackmap. This is required so that the consumer can
// parse any internal format to the deopt state. It also handles null
// pointers and other constant pointers in GC states
pushStackMapConstant(Ops, Builder, C->getSExtValue());
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
// This handles allocas as arguments to the statepoint (this is only
// really meaningful for a deopt value. For GC, we'd be trying to
// relocate the address of the alloca itself?)
Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(),
Incoming.getValueType()));
} else {
// Otherwise, locate a spill slot and explicitly spill it so it
// can be found by the runtime later. We currently do not support
// tracking values through callee saved registers to their eventual
// spill location. This would be a useful optimization, but would
// need to be optional since it requires a lot of complexity on the
// runtime side which not all would support.
std::pair<SDValue, SDValue> Res =
spillIncomingStatepointValue(Incoming, Chain, Builder);
Ops.push_back(Res.first);
Chain = Res.second;
}
Builder.DAG.setRoot(Chain);
}
/// Lower deopt state and gc pointer arguments of the statepoint. The actual
/// lowering is described in lowerIncomingStatepointValue. This function is
/// responsible for lowering everything in the right position and playing some
/// tricks to avoid redundant stack manipulation where possible. On
/// completion, 'Ops' will contain ready to use operands for machine code
/// statepoint. The chain nodes will have already been created and the DAG root
/// will be set to the last value spilled (if any were).
static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
ImmutableStatepoint StatepointSite,
SelectionDAGBuilder &Builder) {
// Lower the deopt and gc arguments for this statepoint. Layout will
// be: deopt argument length, deopt arguments.., gc arguments...
SmallVector<const Value *, 64> Bases, Ptrs, Relocations;
getIncomingStatepointGCValues(Bases, Ptrs, Relocations, StatepointSite,
Builder);
#ifndef NDEBUG
// Check that each of the gc pointer and bases we've gotten out of the
// safepoint is something the strategy thinks might be a pointer into the GC
// heap. This is basically just here to help catch errors during statepoint
// insertion. TODO: This should actually be in the Verifier, but we can't get
// to the GCStrategy from there (yet).
GCStrategy &S = Builder.GFI->getStrategy();
for (const Value *V : Bases) {
auto Opt = S.isGCManagedPointer(V);
if (Opt.hasValue()) {
assert(Opt.getValue() &&
"non gc managed base pointer found in statepoint");
}
}
for (const Value *V : Ptrs) {
auto Opt = S.isGCManagedPointer(V);
if (Opt.hasValue()) {
assert(Opt.getValue() &&
"non gc managed derived pointer found in statepoint");
}
}
for (const Value *V : Relocations) {
auto Opt = S.isGCManagedPointer(V);
if (Opt.hasValue()) {
assert(Opt.getValue() && "non gc managed pointer relocated");
}
}
#endif
// Before we actually start lowering (and allocating spill slots for values),
// reserve any stack slots which we judge to be profitable to reuse for a
// particular value. This is purely an optimization over the code below and
// doesn't change semantics at all. It is important for performance that we
// reserve slots for both deopt and gc values before lowering either.
for (const Value *V : StatepointSite.vm_state_args()) {
reservePreviousStackSlotForValue(V, Builder);
}
for (unsigned i = 0; i < Bases.size(); ++i) {
reservePreviousStackSlotForValue(Bases[i], Builder);
reservePreviousStackSlotForValue(Ptrs[i], Builder);
}
// First, prefix the list with the number of unique values to be
// lowered. Note that this is the number of *Values* not the
// number of SDValues required to lower them.
const int NumVMSArgs = StatepointSite.getNumTotalVMSArgs();
pushStackMapConstant(Ops, Builder, NumVMSArgs);
assert(NumVMSArgs == std::distance(StatepointSite.vm_state_begin(),
StatepointSite.vm_state_end()));
// The vm state arguments are lowered in an opaque manner. We do
// not know what type of values are contained within. We skip the
// first one since that happens to be the total number we lowered
// explicitly just above. We could have left it in the loop and
// not done it explicitly, but it's far easier to understand this
// way.
for (const Value *V : StatepointSite.vm_state_args()) {
SDValue Incoming = Builder.getValue(V);
lowerIncomingStatepointValue(Incoming, Ops, Builder);
}
// Finally, go ahead and lower all the gc arguments. There's no prefixed
// length for this one. After lowering, we'll have the base and pointer
// arrays interwoven with each (lowered) base pointer immediately followed by
// it's (lowered) derived pointer. i.e
// (base[0], ptr[0], base[1], ptr[1], ...)
for (unsigned i = 0; i < Bases.size(); ++i) {
const Value *Base = Bases[i];
lowerIncomingStatepointValue(Builder.getValue(Base), Ops, Builder);
const Value *Ptr = Ptrs[i];
lowerIncomingStatepointValue(Builder.getValue(Ptr), Ops, Builder);
}
// If there are any explicit spill slots passed to the statepoint, record
// them, but otherwise do not do anything special. These are user provided
// allocas and give control over placement to the consumer. In this case,
// it is the contents of the slot which may get updated, not the pointer to
// the alloca
for (Value *V : StatepointSite.gc_args()) {
SDValue Incoming = Builder.getValue(V);
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
// This handles allocas as arguments to the statepoint
Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(),
Incoming.getValueType()));
}
}
// Record computed locations for all lowered values.
// This can not be embedded in lowering loops as we need to record *all*
// values, while previous loops account only values with unique SDValues.
const Instruction *StatepointInstr =
StatepointSite.getCallSite().getInstruction();
FunctionLoweringInfo::StatepointSpilledValueMapTy &SpillMap =
Builder.FuncInfo.StatepointRelocatedValues[StatepointInstr];
for (GCRelocateOperands RelocateOpers : StatepointSite.getRelocates()) {
const Value *V = RelocateOpers.getDerivedPtr();
SDValue SDV = Builder.getValue(V);
SDValue Loc = Builder.StatepointLowering.getLocation(SDV);
if (Loc.getNode()) {
SpillMap[V] = cast<FrameIndexSDNode>(Loc)->getIndex();
} else {
// Record value as visited, but not spilled. This is case for allocas
// and constants. For this values we can avoid emiting spill load while
// visiting corresponding gc_relocate.
// Actually we do not need to record them in this map at all.
// We do this only to check that we are not relocating any unvisited value.
SpillMap[V] = None;
// Default llvm mechanisms for exporting values which are used in
// different basic blocks does not work for gc relocates.
// Note that it would be incorrect to teach llvm that all relocates are
// uses of the corresponging values so that it would automatically
// export them. Relocates of the spilled values does not use original
// value.
if (StatepointSite.getCallSite().isInvoke())
Builder.ExportFromCurrentBlock(V);
}
}
}
void SelectionDAGBuilder::visitStatepoint(const CallInst &CI) {
// Check some preconditions for sanity
assert(isStatepoint(&CI) &&
"function called must be the statepoint function");
LowerStatepoint(ImmutableStatepoint(&CI));
}
void SelectionDAGBuilder::LowerStatepoint(
ImmutableStatepoint ISP, MachineBasicBlock *LandingPad /*=nullptr*/) {
// The basic scheme here is that information about both the original call and
// the safepoint is encoded in the CallInst. We create a temporary call and
// lower it, then reverse engineer the calling sequence.
NumOfStatepoints++;
// Clear state
StatepointLowering.startNewStatepoint(*this);
ImmutableCallSite CS(ISP.getCallSite());
#ifndef NDEBUG
// Consistency check. Don't do this for invokes. It would be too
// expensive to preserve this information across different basic blocks
if (!CS.isInvoke()) {
for (const User *U : CS->users()) {
const CallInst *Call = cast<CallInst>(U);
if (isGCRelocate(Call))
StatepointLowering.scheduleRelocCall(*Call);
}
}
#endif
#ifndef NDEBUG
// If this is a malformed statepoint, report it early to simplify debugging.
// This should catch any IR level mistake that's made when constructing or
// transforming statepoints.
ISP.verify();
// Check that the associated GCStrategy expects to encounter statepoints.
assert(GFI->getStrategy().useStatepoints() &&
"GCStrategy does not expect to encounter statepoints");
#endif
// Lower statepoint vmstate and gcstate arguments
SmallVector<SDValue, 10> LoweredMetaArgs;
lowerStatepointMetaArgs(LoweredMetaArgs, ISP, *this);
// Get call node, we will replace it later with statepoint
SDNode *CallNode =
lowerCallFromStatepoint(ISP, LandingPad, *this, PendingExports);
// Construct the actual GC_TRANSITION_START, STATEPOINT, and GC_TRANSITION_END
// nodes with all the appropriate arguments and return values.
// Call Node: Chain, Target, {Args}, RegMask, [Glue]
SDValue Chain = CallNode->getOperand(0);
SDValue Glue;
bool CallHasIncomingGlue = CallNode->getGluedNode();
if (CallHasIncomingGlue) {
// Glue is always last operand
Glue = CallNode->getOperand(CallNode->getNumOperands() - 1);
}
// Build the GC_TRANSITION_START node if necessary.
//
// The operands to the GC_TRANSITION_{START,END} nodes are laid out in the
// order in which they appear in the call to the statepoint intrinsic. If
// any of the operands is a pointer-typed, that operand is immediately
// followed by a SRCVALUE for the pointer that may be used during lowering
// (e.g. to form MachinePointerInfo values for loads/stores).
const bool IsGCTransition =
(ISP.getFlags() & (uint64_t)StatepointFlags::GCTransition) ==
(uint64_t)StatepointFlags::GCTransition;
if (IsGCTransition) {
SmallVector<SDValue, 8> TSOps;
// Add chain
TSOps.push_back(Chain);
// Add GC transition arguments
for (const Value *V : ISP.gc_transition_args()) {
TSOps.push_back(getValue(V));
if (V->getType()->isPointerTy())
TSOps.push_back(DAG.getSrcValue(V));
}
// Add glue if necessary
if (CallHasIncomingGlue)
TSOps.push_back(Glue);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue GCTransitionStart =
DAG.getNode(ISD::GC_TRANSITION_START, getCurSDLoc(), NodeTys, TSOps);
Chain = GCTransitionStart.getValue(0);
Glue = GCTransitionStart.getValue(1);
}
// TODO: Currently, all of these operands are being marked as read/write in
// PrologEpilougeInserter.cpp, we should special case the VMState arguments
// and flags to be read-only.
SmallVector<SDValue, 40> Ops;
// Add the <id> and <numBytes> constants.
Ops.push_back(DAG.getTargetConstant(ISP.getID(), getCurSDLoc(), MVT::i64));
Ops.push_back(
DAG.getTargetConstant(ISP.getNumPatchBytes(), getCurSDLoc(), MVT::i32));
// Calculate and push starting position of vmstate arguments
// Get number of arguments incoming directly into call node
unsigned NumCallRegArgs =
CallNode->getNumOperands() - (CallHasIncomingGlue ? 4 : 3);
Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, getCurSDLoc(), MVT::i32));
// Add call target
SDValue CallTarget = SDValue(CallNode->getOperand(1).getNode(), 0);
Ops.push_back(CallTarget);
// Add call arguments
// Get position of register mask in the call
SDNode::op_iterator RegMaskIt;
if (CallHasIncomingGlue)
RegMaskIt = CallNode->op_end() - 2;
else
RegMaskIt = CallNode->op_end() - 1;
Ops.insert(Ops.end(), CallNode->op_begin() + 2, RegMaskIt);
// Add a constant argument for the calling convention
pushStackMapConstant(Ops, *this, CS.getCallingConv());
// Add a constant argument for the flags
uint64_t Flags = ISP.getFlags();
assert(
((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0)
&& "unknown flag used");
pushStackMapConstant(Ops, *this, Flags);
// Insert all vmstate and gcstate arguments
Ops.insert(Ops.end(), LoweredMetaArgs.begin(), LoweredMetaArgs.end());
// Add register mask from call node
Ops.push_back(*RegMaskIt);
// Add chain
Ops.push_back(Chain);
// Same for the glue, but we add it only if original call had it
if (Glue.getNode())
Ops.push_back(Glue);
// Compute return values. Provide a glue output since we consume one as
// input. This allows someone else to chain off us as needed.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDNode *StatepointMCNode =
DAG.getMachineNode(TargetOpcode::STATEPOINT, getCurSDLoc(), NodeTys, Ops);
SDNode *SinkNode = StatepointMCNode;
// Build the GC_TRANSITION_END node if necessary.
//
// See the comment above regarding GC_TRANSITION_START for the layout of
// the operands to the GC_TRANSITION_END node.
if (IsGCTransition) {
SmallVector<SDValue, 8> TEOps;
// Add chain
TEOps.push_back(SDValue(StatepointMCNode, 0));
// Add GC transition arguments
for (const Value *V : ISP.gc_transition_args()) {
TEOps.push_back(getValue(V));
if (V->getType()->isPointerTy())
TEOps.push_back(DAG.getSrcValue(V));
}
// Add glue
TEOps.push_back(SDValue(StatepointMCNode, 1));
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue GCTransitionStart =
DAG.getNode(ISD::GC_TRANSITION_END, getCurSDLoc(), NodeTys, TEOps);
SinkNode = GCTransitionStart.getNode();
}
// Replace original call
DAG.ReplaceAllUsesWith(CallNode, SinkNode); // This may update Root
// Remove originall call node
DAG.DeleteNode(CallNode);
// DON'T set the root - under the assumption that it's already set past the
// inserted node we created.
// TODO: A better future implementation would be to emit a single variable
// argument, variable return value STATEPOINT node here and then hookup the
// return value of each gc.relocate to the respective output of the
// previously emitted STATEPOINT value. Unfortunately, this doesn't appear
// to actually be possible today.
}
void SelectionDAGBuilder::visitGCResult(const CallInst &CI) {
// The result value of the gc_result is simply the result of the actual
// call. We've already emitted this, so just grab the value.
Instruction *I = cast<Instruction>(CI.getArgOperand(0));
assert(isStatepoint(I) && "first argument must be a statepoint token");
if (isa<InvokeInst>(I)) {
// For invokes we should have stored call result in a virtual register.
// We can not use default getValue() functionality to copy value from this
// register because statepoint and actuall call return types can be
// different, and getValue() will use CopyFromReg of the wrong type,
// which is always i32 in our case.
PointerType *CalleeType = cast<PointerType>(
ImmutableStatepoint(I).getCalledValue()->getType());
Type *RetTy =
cast<FunctionType>(CalleeType->getElementType())->getReturnType();
SDValue CopyFromReg = getCopyFromRegs(I, RetTy);
assert(CopyFromReg.getNode());
setValue(&CI, CopyFromReg);
} else {
setValue(&CI, getValue(I));
}
}
void SelectionDAGBuilder::visitGCRelocate(const CallInst &CI) {
GCRelocateOperands RelocateOpers(&CI);
#ifndef NDEBUG
// Consistency check
// We skip this check for invoke statepoints. It would be too expensive to
// preserve validation info through different basic blocks.
if (!RelocateOpers.isTiedToInvoke()) {
StatepointLowering.relocCallVisited(CI);
}
#endif
const Value *DerivedPtr = RelocateOpers.getDerivedPtr();
SDValue SD = getValue(DerivedPtr);
FunctionLoweringInfo::StatepointSpilledValueMapTy &SpillMap =
FuncInfo.StatepointRelocatedValues[RelocateOpers.getStatepoint()];
// We should have recorded location for this pointer
assert(SpillMap.count(DerivedPtr) && "Relocating not lowered gc value");
Optional<int> DerivedPtrLocation = SpillMap[DerivedPtr];
// We didn't need to spill these special cases (constants and allocas).
// See the handling in spillIncomingValueForStatepoint for detail.
if (!DerivedPtrLocation) {
setValue(&CI, SD);
return;
}
SDValue SpillSlot = DAG.getTargetFrameIndex(*DerivedPtrLocation,
SD.getValueType());
// Be conservative: flush all pending loads
// TODO: Probably we can be less restrictive on this,
// it may allow more scheduling opprtunities
SDValue Chain = getRoot();
SDValue SpillLoad =
DAG.getLoad(SpillSlot.getValueType(), getCurSDLoc(), Chain, SpillSlot,
MachinePointerInfo::getFixedStack(*DerivedPtrLocation),
false, false, false, 0);
// Again, be conservative, don't emit pending loads
DAG.setRoot(SpillLoad.getValue(1));
assert(SpillLoad.getNode());
setValue(&CI, SpillLoad);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | //===----- LegalizeIntegerTypes.cpp - Legalization of integer types -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements integer type expansion and promotion for LegalizeTypes.
// Promotion is the act of changing a computation in an illegal type into a
// computation in a larger type. For example, implementing i8 arithmetic in an
// i32 register (often needed on powerpc).
// Expansion is the act of changing a computation in an illegal type into a
// computation in two identical registers of a smaller type. For example,
// implementing i64 arithmetic in two i32 registers (often needed on 32-bit
// targets).
//
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "legalize-types"
//===----------------------------------------------------------------------===//
// Integer Result Promotion
//===----------------------------------------------------------------------===//
/// PromoteIntegerResult - This method is called when a result of a node is
/// found to be in need of promotion to a larger type. At this point, the node
/// may also have invalid operands or may have other results that need
/// expansion, we just know that (at least) one result needs promotion.
void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
DEBUG(dbgs() << "Promote integer result: "; N->dump(&DAG); dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(ResNo), true))
return;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "PromoteIntegerResult #" << ResNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to promote this operator!");
case ISD::MERGE_VALUES:Res = PromoteIntRes_MERGE_VALUES(N, ResNo); break;
case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break;
case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break;
case ISD::BITCAST: Res = PromoteIntRes_BITCAST(N); break;
case ISD::BSWAP: Res = PromoteIntRes_BSWAP(N); break;
case ISD::BUILD_PAIR: Res = PromoteIntRes_BUILD_PAIR(N); break;
case ISD::Constant: Res = PromoteIntRes_Constant(N); break;
case ISD::CONVERT_RNDSAT:
Res = PromoteIntRes_CONVERT_RNDSAT(N); break;
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: Res = PromoteIntRes_CTLZ(N); break;
case ISD::CTPOP: Res = PromoteIntRes_CTPOP(N); break;
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTTZ: Res = PromoteIntRes_CTTZ(N); break;
case ISD::EXTRACT_VECTOR_ELT:
Res = PromoteIntRes_EXTRACT_VECTOR_ELT(N); break;
case ISD::LOAD: Res = PromoteIntRes_LOAD(cast<LoadSDNode>(N));break;
case ISD::MLOAD: Res = PromoteIntRes_MLOAD(cast<MaskedLoadSDNode>(N));break;
case ISD::SELECT: Res = PromoteIntRes_SELECT(N); break;
case ISD::VSELECT: Res = PromoteIntRes_VSELECT(N); break;
case ISD::SELECT_CC: Res = PromoteIntRes_SELECT_CC(N); break;
case ISD::SETCC: Res = PromoteIntRes_SETCC(N); break;
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX: Res = PromoteIntRes_SimpleIntBinOp(N); break;
case ISD::SHL: Res = PromoteIntRes_SHL(N); break;
case ISD::SIGN_EXTEND_INREG:
Res = PromoteIntRes_SIGN_EXTEND_INREG(N); break;
case ISD::SRA: Res = PromoteIntRes_SRA(N); break;
case ISD::SRL: Res = PromoteIntRes_SRL(N); break;
case ISD::TRUNCATE: Res = PromoteIntRes_TRUNCATE(N); break;
case ISD::UNDEF: Res = PromoteIntRes_UNDEF(N); break;
case ISD::VAARG: Res = PromoteIntRes_VAARG(N); break;
case ISD::EXTRACT_SUBVECTOR:
Res = PromoteIntRes_EXTRACT_SUBVECTOR(N); break;
case ISD::VECTOR_SHUFFLE:
Res = PromoteIntRes_VECTOR_SHUFFLE(N); break;
case ISD::INSERT_VECTOR_ELT:
Res = PromoteIntRes_INSERT_VECTOR_ELT(N); break;
case ISD::BUILD_VECTOR:
Res = PromoteIntRes_BUILD_VECTOR(N); break;
case ISD::SCALAR_TO_VECTOR:
Res = PromoteIntRes_SCALAR_TO_VECTOR(N); break;
case ISD::CONCAT_VECTORS:
Res = PromoteIntRes_CONCAT_VECTORS(N); break;
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND: Res = PromoteIntRes_INT_EXTEND(N); break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = PromoteIntRes_FP_TO_XINT(N); break;
case ISD::FP_TO_FP16: Res = PromoteIntRes_FP_TO_FP16(N); break;
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::ADD:
case ISD::SUB:
case ISD::MUL: Res = PromoteIntRes_SimpleIntBinOp(N); break;
case ISD::SDIV:
case ISD::SREM: Res = PromoteIntRes_SDIV(N); break;
case ISD::UDIV:
case ISD::UREM: Res = PromoteIntRes_UDIV(N); break;
case ISD::SADDO:
case ISD::SSUBO: Res = PromoteIntRes_SADDSUBO(N, ResNo); break;
case ISD::UADDO:
case ISD::USUBO: Res = PromoteIntRes_UADDSUBO(N, ResNo); break;
case ISD::SMULO:
case ISD::UMULO: Res = PromoteIntRes_XMULO(N, ResNo); break;
case ISD::ATOMIC_LOAD:
Res = PromoteIntRes_Atomic0(cast<AtomicSDNode>(N)); break;
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_SWAP:
Res = PromoteIntRes_Atomic1(cast<AtomicSDNode>(N)); break;
case ISD::ATOMIC_CMP_SWAP:
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
Res = PromoteIntRes_AtomicCmpSwap(cast<AtomicSDNode>(N), ResNo);
break;
}
// If the result is null then the sub-method took care of registering it.
if (Res.getNode())
SetPromotedInteger(SDValue(N, ResNo), Res);
}
SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
unsigned ResNo) {
SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
return GetPromotedInteger(Op);
}
SDValue DAGTypeLegalizer::PromoteIntRes_AssertSext(SDNode *N) {
// Sign-extend the new bits, and continue the assertion.
SDValue Op = SExtPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::AssertSext, SDLoc(N),
Op.getValueType(), Op, N->getOperand(1));
}
SDValue DAGTypeLegalizer::PromoteIntRes_AssertZext(SDNode *N) {
// Zero the new bits, and continue the assertion.
SDValue Op = ZExtPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::AssertZext, SDLoc(N),
Op.getValueType(), Op, N->getOperand(1));
}
SDValue DAGTypeLegalizer::PromoteIntRes_Atomic0(AtomicSDNode *N) {
EVT ResVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Res = DAG.getAtomic(N->getOpcode(), SDLoc(N),
N->getMemoryVT(), ResVT,
N->getChain(), N->getBasePtr(),
N->getMemOperand(), N->getOrdering(),
N->getSynchScope());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
return Res;
}
SDValue DAGTypeLegalizer::PromoteIntRes_Atomic1(AtomicSDNode *N) {
SDValue Op2 = GetPromotedInteger(N->getOperand(2));
SDValue Res = DAG.getAtomic(N->getOpcode(), SDLoc(N),
N->getMemoryVT(),
N->getChain(), N->getBasePtr(),
Op2, N->getMemOperand(), N->getOrdering(),
N->getSynchScope());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
return Res;
}
SDValue DAGTypeLegalizer::PromoteIntRes_AtomicCmpSwap(AtomicSDNode *N,
unsigned ResNo) {
if (ResNo == 1) {
assert(N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
EVT SVT = getSetCCResultType(N->getOperand(2).getValueType());
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(1));
// Only use the result of getSetCCResultType if it is legal,
// otherwise just use the promoted result type (NVT).
if (!TLI.isTypeLegal(SVT))
SVT = NVT;
SDVTList VTs = DAG.getVTList(N->getValueType(0), SVT, MVT::Other);
SDValue Res = DAG.getAtomicCmpSwap(
ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, SDLoc(N), N->getMemoryVT(), VTs,
N->getChain(), N->getBasePtr(), N->getOperand(2), N->getOperand(3),
N->getMemOperand(), N->getSuccessOrdering(), N->getFailureOrdering(),
N->getSynchScope());
ReplaceValueWith(SDValue(N, 0), Res.getValue(0));
ReplaceValueWith(SDValue(N, 2), Res.getValue(2));
return Res.getValue(1);
}
SDValue Op2 = GetPromotedInteger(N->getOperand(2));
SDValue Op3 = GetPromotedInteger(N->getOperand(3));
SDVTList VTs =
DAG.getVTList(Op2.getValueType(), N->getValueType(1), MVT::Other);
SDValue Res = DAG.getAtomicCmpSwap(
N->getOpcode(), SDLoc(N), N->getMemoryVT(), VTs, N->getChain(),
N->getBasePtr(), Op2, Op3, N->getMemOperand(), N->getSuccessOrdering(),
N->getFailureOrdering(), N->getSynchScope());
// Update the use to N with the newly created Res.
for (unsigned i = 1, NumResults = N->getNumValues(); i < NumResults; ++i)
ReplaceValueWith(SDValue(N, i), Res.getValue(i));
return Res;
}
SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType();
EVT NInVT = TLI.getTypeToTransformTo(*DAG.getContext(), InVT);
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
SDLoc dl(N);
switch (getTypeAction(InVT)) {
case TargetLowering::TypeLegal:
break;
case TargetLowering::TypePromoteInteger:
if (NOutVT.bitsEq(NInVT) && !NOutVT.isVector() && !NInVT.isVector())
// The input promotes to the same size. Convert the promoted value.
return DAG.getNode(ISD::BITCAST, dl, NOutVT, GetPromotedInteger(InOp));
break;
case TargetLowering::TypeSoftenFloat:
// Promote the integer operand by hand.
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT, GetSoftenedFloat(InOp));
case TargetLowering::TypePromoteFloat: {
// Convert the promoted float by hand.
if (NOutVT.bitsEq(NInVT)) {
SDValue PromotedOp = GetPromotedFloat(InOp);
SDValue Trunc = DAG.getNode(ISD::FP_TO_FP16, dl, NOutVT, PromotedOp);
return DAG.getNode(ISD::AssertZext, dl, NOutVT, Trunc,
DAG.getValueType(OutVT));
}
break;
}
case TargetLowering::TypeExpandInteger:
case TargetLowering::TypeExpandFloat:
break;
case TargetLowering::TypeScalarizeVector:
// Convert the element to an integer and promote it by hand.
if (!NOutVT.isVector())
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
BitConvertToInteger(GetScalarizedVector(InOp)));
break;
case TargetLowering::TypeSplitVector: {
// For example, i32 = BITCAST v2i16 on alpha. Convert the split
// pieces of the input into integers and reassemble in the final type.
SDValue Lo, Hi;
GetSplitVector(N->getOperand(0), Lo, Hi);
Lo = BitConvertToInteger(Lo);
Hi = BitConvertToInteger(Hi);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
InOp = DAG.getNode(ISD::ANY_EXTEND, dl,
EVT::getIntegerVT(*DAG.getContext(),
NOutVT.getSizeInBits()),
JoinIntegers(Lo, Hi));
return DAG.getNode(ISD::BITCAST, dl, NOutVT, InOp);
}
case TargetLowering::TypeWidenVector:
// The input is widened to the same size. Convert to the widened value.
// Make sure that the outgoing value is not a vector, because this would
// make us bitcast between two vectors which are legalized in different ways.
if (NOutVT.bitsEq(NInVT) && !NOutVT.isVector())
return DAG.getNode(ISD::BITCAST, dl, NOutVT, GetWidenedVector(InOp));
}
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
CreateStackStoreLoad(InOp, OutVT));
}
SDValue DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
EVT OVT = N->getValueType(0);
EVT NVT = Op.getValueType();
SDLoc dl(N);
unsigned DiffBits = NVT.getScalarSizeInBits() - OVT.getScalarSizeInBits();
return DAG.getNode(
ISD::SRL, dl, NVT, DAG.getNode(ISD::BSWAP, dl, NVT, Op),
DAG.getConstant(DiffBits, dl,
TLI.getShiftAmountTy(NVT, DAG.getDataLayout())));
}
SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) {
// The pair element type may be legal, or may not promote to the same type as
// the result, for example i14 = BUILD_PAIR (i7, i7). Handle all cases.
return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N),
TLI.getTypeToTransformTo(*DAG.getContext(),
N->getValueType(0)), JoinIntegers(N->getOperand(0),
N->getOperand(1)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) {
EVT VT = N->getValueType(0);
// FIXME there is no actual debug info here
SDLoc dl(N);
// Zero extend things like i1, sign extend everything else. It shouldn't
// matter in theory which one we pick, but this tends to give better code?
unsigned Opc = VT.isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
SDValue Result = DAG.getNode(Opc, dl,
TLI.getTypeToTransformTo(*DAG.getContext(), VT),
SDValue(N, 0));
assert(isa<ConstantSDNode>(Result) && "Didn't constant fold ext?");
return Result;
}
SDValue DAGTypeLegalizer::PromoteIntRes_CONVERT_RNDSAT(SDNode *N) {
ISD::CvtCode CvtCode = cast<CvtRndSatSDNode>(N)->getCvtCode();
assert ((CvtCode == ISD::CVT_SS || CvtCode == ISD::CVT_SU ||
CvtCode == ISD::CVT_US || CvtCode == ISD::CVT_UU ||
CvtCode == ISD::CVT_SF || CvtCode == ISD::CVT_UF) &&
"can only promote integers");
EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getConvertRndSat(OutVT, SDLoc(N), N->getOperand(0),
N->getOperand(1), N->getOperand(2),
N->getOperand(3), N->getOperand(4), CvtCode);
}
SDValue DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) {
// Zero extend to the promoted type and do the count there.
SDValue Op = ZExtPromotedInteger(N->getOperand(0));
SDLoc dl(N);
EVT OVT = N->getValueType(0);
EVT NVT = Op.getValueType();
Op = DAG.getNode(N->getOpcode(), dl, NVT, Op);
// Subtract off the extra leading bits in the bigger type.
return DAG.getNode(
ISD::SUB, dl, NVT, Op,
DAG.getConstant(NVT.getScalarSizeInBits() - OVT.getScalarSizeInBits(), dl,
NVT));
}
SDValue DAGTypeLegalizer::PromoteIntRes_CTPOP(SDNode *N) {
// Zero extend to the promoted type and do the count there.
SDValue Op = ZExtPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::CTPOP, SDLoc(N), Op.getValueType(), Op);
}
SDValue DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
EVT OVT = N->getValueType(0);
EVT NVT = Op.getValueType();
SDLoc dl(N);
if (N->getOpcode() == ISD::CTTZ) {
// The count is the same in the promoted type except if the original
// value was zero. This can be handled by setting the bit just off
// the top of the original type.
auto TopBit = APInt::getOneBitSet(NVT.getScalarSizeInBits(),
OVT.getScalarSizeInBits());
Op = DAG.getNode(ISD::OR, dl, NVT, Op, DAG.getConstant(TopBit, dl, NVT));
}
return DAG.getNode(N->getOpcode(), dl, NVT, Op);
}
SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) {
SDLoc dl(N);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NVT, N->getOperand(0),
N->getOperand(1));
}
SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned NewOpc = N->getOpcode();
SDLoc dl(N);
// If we're promoting a UINT to a larger size and the larger FP_TO_UINT is
// not Legal, check to see if we can use FP_TO_SINT instead. (If both UINT
// and SINT conversions are Custom, there is no way to tell which is
// preferable. We choose SINT because that's the right thing on PPC.)
if (N->getOpcode() == ISD::FP_TO_UINT &&
!TLI.isOperationLegal(ISD::FP_TO_UINT, NVT) &&
TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NVT))
NewOpc = ISD::FP_TO_SINT;
SDValue Res = DAG.getNode(NewOpc, dl, NVT, N->getOperand(0));
// Assert that the converted value fits in the original type. If it doesn't
// (eg: because the value being converted is too big), then the result of the
// original operation was undefined anyway, so the assert is still correct.
return DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ?
ISD::AssertZext : ISD::AssertSext, dl, NVT, Res,
DAG.getValueType(N->getValueType(0).getScalarType()));
}
SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_FP16(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
SDValue Res = DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0));
return DAG.getNode(ISD::AssertZext, dl,
NVT, Res, DAG.getValueType(N->getValueType(0)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
if (getTypeAction(N->getOperand(0).getValueType())
== TargetLowering::TypePromoteInteger) {
SDValue Res = GetPromotedInteger(N->getOperand(0));
assert(Res.getValueType().bitsLE(NVT) && "Extension doesn't make sense!");
// If the result and operand types are the same after promotion, simplify
// to an in-register extension.
if (NVT == Res.getValueType()) {
// The high bits are not guaranteed to be anything. Insert an extend.
if (N->getOpcode() == ISD::SIGN_EXTEND)
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NVT, Res,
DAG.getValueType(N->getOperand(0).getValueType()));
if (N->getOpcode() == ISD::ZERO_EXTEND)
return DAG.getZeroExtendInReg(Res, dl,
N->getOperand(0).getValueType().getScalarType());
assert(N->getOpcode() == ISD::ANY_EXTEND && "Unknown integer extension!");
return Res;
}
}
// Otherwise, just extend the original operand all the way to the larger type.
return DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0));
}
SDValue DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) {
assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
ISD::LoadExtType ExtType =
ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType();
SDLoc dl(N);
SDValue Res = DAG.getExtLoad(ExtType, dl, NVT, N->getChain(), N->getBasePtr(),
N->getMemoryVT(), N->getMemOperand());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
return Res;
}
SDValue DAGTypeLegalizer::PromoteIntRes_MLOAD(MaskedLoadSDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue ExtSrc0 = GetPromotedInteger(N->getSrc0());
SDValue Mask = N->getMask();
EVT NewMaskVT = getSetCCResultType(NVT);
if (NewMaskVT != N->getMask().getValueType())
Mask = PromoteTargetBoolean(Mask, NewMaskVT);
SDLoc dl(N);
SDValue Res = DAG.getMaskedLoad(NVT, dl, N->getChain(), N->getBasePtr(),
Mask, ExtSrc0, N->getMemoryVT(),
N->getMemOperand(), ISD::SEXTLOAD);
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
return Res;
}
/// Promote the overflow flag of an overflowing arithmetic node.
SDValue DAGTypeLegalizer::PromoteIntRes_Overflow(SDNode *N) {
// Simply change the return type of the boolean result.
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(1));
EVT ValueVTs[] = { N->getValueType(0), NVT };
SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
SDValue Res = DAG.getNode(N->getOpcode(), SDLoc(N),
DAG.getVTList(ValueVTs), Ops);
// Modified the sum result - switch anything that used the old sum to use
// the new one.
ReplaceValueWith(SDValue(N, 0), Res);
return SDValue(Res.getNode(), 1);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo) {
if (ResNo == 1)
return PromoteIntRes_Overflow(N);
// The operation overflowed iff the result in the larger type is not the
// sign extension of its truncation to the original type.
SDValue LHS = SExtPromotedInteger(N->getOperand(0));
SDValue RHS = SExtPromotedInteger(N->getOperand(1));
EVT OVT = N->getOperand(0).getValueType();
EVT NVT = LHS.getValueType();
SDLoc dl(N);
// Do the arithmetic in the larger type.
unsigned Opcode = N->getOpcode() == ISD::SADDO ? ISD::ADD : ISD::SUB;
SDValue Res = DAG.getNode(Opcode, dl, NVT, LHS, RHS);
// Calculate the overflow flag: sign extend the arithmetic result from
// the original type.
SDValue Ofl = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NVT, Res,
DAG.getValueType(OVT));
// Overflowed if and only if this is not equal to Res.
Ofl = DAG.getSetCC(dl, N->getValueType(1), Ofl, Res, ISD::SETNE);
// Use the calculated overflow everywhere.
ReplaceValueWith(SDValue(N, 1), Ofl);
return Res;
}
SDValue DAGTypeLegalizer::PromoteIntRes_SDIV(SDNode *N) {
// Sign extend the input.
SDValue LHS = SExtPromotedInteger(N->getOperand(0));
SDValue RHS = SExtPromotedInteger(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N),
LHS.getValueType(), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SELECT(SDNode *N) {
SDValue LHS = GetPromotedInteger(N->getOperand(1));
SDValue RHS = GetPromotedInteger(N->getOperand(2));
return DAG.getSelect(SDLoc(N),
LHS.getValueType(), N->getOperand(0), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_VSELECT(SDNode *N) {
SDValue Mask = N->getOperand(0);
EVT OpTy = N->getOperand(1).getValueType();
// Promote all the way up to the canonical SetCC type.
Mask = PromoteTargetBoolean(Mask, OpTy);
SDValue LHS = GetPromotedInteger(N->getOperand(1));
SDValue RHS = GetPromotedInteger(N->getOperand(2));
return DAG.getNode(ISD::VSELECT, SDLoc(N),
LHS.getValueType(), Mask, LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SELECT_CC(SDNode *N) {
SDValue LHS = GetPromotedInteger(N->getOperand(2));
SDValue RHS = GetPromotedInteger(N->getOperand(3));
return DAG.getNode(ISD::SELECT_CC, SDLoc(N),
LHS.getValueType(), N->getOperand(0),
N->getOperand(1), LHS, RHS, N->getOperand(4));
}
SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) {
EVT SVT = getSetCCResultType(N->getOperand(0).getValueType());
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
// Only use the result of getSetCCResultType if it is legal,
// otherwise just use the promoted result type (NVT).
if (!TLI.isTypeLegal(SVT))
SVT = NVT;
SDLoc dl(N);
assert(SVT.isVector() == N->getOperand(0).getValueType().isVector() &&
"Vector compare must return a vector result!");
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (LHS.getValueType() != RHS.getValueType()) {
if (getTypeAction(LHS.getValueType()) == TargetLowering::TypePromoteInteger &&
!LHS.getValueType().isVector())
LHS = GetPromotedInteger(LHS);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger &&
!RHS.getValueType().isVector())
RHS = GetPromotedInteger(RHS);
}
// Get the SETCC result using the canonical SETCC type.
SDValue SetCC = DAG.getNode(N->getOpcode(), dl, SVT, LHS, RHS,
N->getOperand(2));
assert(NVT.bitsLE(SVT) && "Integer type overpromoted?");
// Convert to the expected type.
return DAG.getNode(ISD::TRUNCATE, dl, NVT, SetCC);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (getTypeAction(LHS.getValueType()) == TargetLowering::TypePromoteInteger)
LHS = GetPromotedInteger(LHS);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
return DAG.getNode(ISD::SHL, SDLoc(N), LHS.getValueType(), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N),
Op.getValueType(), Op, N->getOperand(1));
}
SDValue DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N) {
// The input may have strange things in the top bits of the registers, but
// these operations don't care. They may have weird bits going out, but
// that too is okay if they are integer operations.
SDValue LHS = GetPromotedInteger(N->getOperand(0));
SDValue RHS = GetPromotedInteger(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N),
LHS.getValueType(), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
// The input value must be properly sign extended.
if (getTypeAction(LHS.getValueType()) == TargetLowering::TypePromoteInteger)
LHS = SExtPromotedInteger(LHS);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
return DAG.getNode(ISD::SRA, SDLoc(N), LHS.getValueType(), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
// The input value must be properly zero extended.
if (getTypeAction(LHS.getValueType()) == TargetLowering::TypePromoteInteger)
LHS = ZExtPromotedInteger(LHS);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
return DAG.getNode(ISD::SRL, SDLoc(N), LHS.getValueType(), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Res;
SDValue InOp = N->getOperand(0);
SDLoc dl(N);
switch (getTypeAction(InOp.getValueType())) {
default: llvm_unreachable("Unknown type action!");
case TargetLowering::TypeLegal:
case TargetLowering::TypeExpandInteger:
Res = InOp;
break;
case TargetLowering::TypePromoteInteger:
Res = GetPromotedInteger(InOp);
break;
case TargetLowering::TypeSplitVector:
EVT InVT = InOp.getValueType();
assert(InVT.isVector() && "Cannot split scalar types");
unsigned NumElts = InVT.getVectorNumElements();
assert(NumElts == NVT.getVectorNumElements() &&
"Dst and Src must have the same number of elements");
assert(isPowerOf2_32(NumElts) &&
"Promoted vector type must be a power of two");
SDValue EOp1, EOp2;
GetSplitVector(InOp, EOp1, EOp2);
EVT HalfNVT = EVT::getVectorVT(*DAG.getContext(), NVT.getScalarType(),
NumElts/2);
EOp1 = DAG.getNode(ISD::TRUNCATE, dl, HalfNVT, EOp1);
EOp2 = DAG.getNode(ISD::TRUNCATE, dl, HalfNVT, EOp2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, NVT, EOp1, EOp2);
}
// Truncate to NVT instead of VT
return DAG.getNode(ISD::TRUNCATE, dl, NVT, Res);
}
SDValue DAGTypeLegalizer::PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo) {
if (ResNo == 1)
return PromoteIntRes_Overflow(N);
// The operation overflowed iff the result in the larger type is not the
// zero extension of its truncation to the original type.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
SDValue RHS = ZExtPromotedInteger(N->getOperand(1));
EVT OVT = N->getOperand(0).getValueType();
EVT NVT = LHS.getValueType();
SDLoc dl(N);
// Do the arithmetic in the larger type.
unsigned Opcode = N->getOpcode() == ISD::UADDO ? ISD::ADD : ISD::SUB;
SDValue Res = DAG.getNode(Opcode, dl, NVT, LHS, RHS);
// Calculate the overflow flag: zero extend the arithmetic result from
// the original type.
SDValue Ofl = DAG.getZeroExtendInReg(Res, dl, OVT);
// Overflowed if and only if this is not equal to Res.
Ofl = DAG.getSetCC(dl, N->getValueType(1), Ofl, Res, ISD::SETNE);
// Use the calculated overflow everywhere.
ReplaceValueWith(SDValue(N, 1), Ofl);
return Res;
}
SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) {
// Promote the overflow bit trivially.
if (ResNo == 1)
return PromoteIntRes_Overflow(N);
SDValue LHS = N->getOperand(0), RHS = N->getOperand(1);
SDLoc DL(N);
EVT SmallVT = LHS.getValueType();
// To determine if the result overflowed in a larger type, we extend the
// input to the larger type, do the multiply (checking if it overflows),
// then also check the high bits of the result to see if overflow happened
// there.
if (N->getOpcode() == ISD::SMULO) {
LHS = SExtPromotedInteger(LHS);
RHS = SExtPromotedInteger(RHS);
} else {
LHS = ZExtPromotedInteger(LHS);
RHS = ZExtPromotedInteger(RHS);
}
SDVTList VTs = DAG.getVTList(LHS.getValueType(), N->getValueType(1));
SDValue Mul = DAG.getNode(N->getOpcode(), DL, VTs, LHS, RHS);
// Overflow occurred if it occurred in the larger type, or if the high part
// of the result does not zero/sign-extend the low part. Check this second
// possibility first.
SDValue Overflow;
if (N->getOpcode() == ISD::UMULO) {
// Unsigned overflow occurred if the high part is non-zero.
SDValue Hi = DAG.getNode(ISD::SRL, DL, Mul.getValueType(), Mul,
DAG.getIntPtrConstant(SmallVT.getSizeInBits(),
DL));
Overflow = DAG.getSetCC(DL, N->getValueType(1), Hi,
DAG.getConstant(0, DL, Hi.getValueType()),
ISD::SETNE);
} else {
// Signed overflow occurred if the high part does not sign extend the low.
SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Mul.getValueType(),
Mul, DAG.getValueType(SmallVT));
Overflow = DAG.getSetCC(DL, N->getValueType(1), SExt, Mul, ISD::SETNE);
}
// The only other way for overflow to occur is if the multiplication in the
// larger type itself overflowed.
Overflow = DAG.getNode(ISD::OR, DL, N->getValueType(1), Overflow,
SDValue(Mul.getNode(), 1));
// Use the calculated overflow everywhere.
ReplaceValueWith(SDValue(N, 1), Overflow);
return Mul;
}
SDValue DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) {
// Zero extend the input.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
SDValue RHS = ZExtPromotedInteger(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N),
LHS.getValueType(), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_UNDEF(SDNode *N) {
return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(),
N->getValueType(0)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
SDValue Chain = N->getOperand(0); // Get the chain.
SDValue Ptr = N->getOperand(1); // Get the pointer.
EVT VT = N->getValueType(0);
SDLoc dl(N);
MVT RegVT = TLI.getRegisterType(*DAG.getContext(), VT);
unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), VT);
// The argument is passed as NumRegs registers of type RegVT.
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned i = 0; i < NumRegs; ++i) {
Parts[i] = DAG.getVAArg(RegVT, dl, Chain, Ptr, N->getOperand(2),
N->getConstantOperandVal(3));
Chain = Parts[i].getValue(1);
}
// Handle endianness of the load.
if (DAG.getDataLayout().isBigEndian())
std::reverse(Parts.begin(), Parts.end());
// Assemble the parts in the promoted type.
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Res = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Parts[0]);
for (unsigned i = 1; i < NumRegs; ++i) {
SDValue Part = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Parts[i]);
// Shift it to the right position and "or" it in.
Part = DAG.getNode(ISD::SHL, dl, NVT, Part,
DAG.getConstant(i * RegVT.getSizeInBits(), dl,
TLI.getPointerTy(DAG.getDataLayout())));
Res = DAG.getNode(ISD::OR, dl, NVT, Res, Part);
}
// Modified the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Chain);
return Res;
}
//===----------------------------------------------------------------------===//
// Integer Operand Promotion
//===----------------------------------------------------------------------===//
/// PromoteIntegerOperand - This method is called when the specified operand of
/// the specified node is found to need promotion. At this point, all of the
/// result types of the node are known to be legal, but other operands of the
/// node may need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Promote integer operand: "; N->dump(&DAG); dbgs() << "\n");
SDValue Res = SDValue();
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
return false;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "PromoteIntegerOperand Op #" << OpNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to promote this operator's operand!");
case ISD::ANY_EXTEND: Res = PromoteIntOp_ANY_EXTEND(N); break;
case ISD::ATOMIC_STORE:
Res = PromoteIntOp_ATOMIC_STORE(cast<AtomicSDNode>(N));
break;
case ISD::BITCAST: Res = PromoteIntOp_BITCAST(N); break;
case ISD::BR_CC: Res = PromoteIntOp_BR_CC(N, OpNo); break;
case ISD::BRCOND: Res = PromoteIntOp_BRCOND(N, OpNo); break;
case ISD::BUILD_PAIR: Res = PromoteIntOp_BUILD_PAIR(N); break;
case ISD::BUILD_VECTOR: Res = PromoteIntOp_BUILD_VECTOR(N); break;
case ISD::CONCAT_VECTORS: Res = PromoteIntOp_CONCAT_VECTORS(N); break;
case ISD::EXTRACT_VECTOR_ELT: Res = PromoteIntOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::CONVERT_RNDSAT:
Res = PromoteIntOp_CONVERT_RNDSAT(N); break;
case ISD::INSERT_VECTOR_ELT:
Res = PromoteIntOp_INSERT_VECTOR_ELT(N, OpNo);break;
case ISD::SCALAR_TO_VECTOR:
Res = PromoteIntOp_SCALAR_TO_VECTOR(N); break;
case ISD::VSELECT:
case ISD::SELECT: Res = PromoteIntOp_SELECT(N, OpNo); break;
case ISD::SELECT_CC: Res = PromoteIntOp_SELECT_CC(N, OpNo); break;
case ISD::SETCC: Res = PromoteIntOp_SETCC(N, OpNo); break;
case ISD::SIGN_EXTEND: Res = PromoteIntOp_SIGN_EXTEND(N); break;
case ISD::SINT_TO_FP: Res = PromoteIntOp_SINT_TO_FP(N); break;
case ISD::STORE: Res = PromoteIntOp_STORE(cast<StoreSDNode>(N),
OpNo); break;
case ISD::MSTORE: Res = PromoteIntOp_MSTORE(cast<MaskedStoreSDNode>(N),
OpNo); break;
case ISD::MLOAD: Res = PromoteIntOp_MLOAD(cast<MaskedLoadSDNode>(N),
OpNo); break;
case ISD::TRUNCATE: Res = PromoteIntOp_TRUNCATE(N); break;
case ISD::FP16_TO_FP:
case ISD::UINT_TO_FP: Res = PromoteIntOp_UINT_TO_FP(N); break;
case ISD::ZERO_EXTEND: Res = PromoteIntOp_ZERO_EXTEND(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = PromoteIntOp_EXTRACT_SUBVECTOR(N); break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR: Res = PromoteIntOp_Shift(N); break;
}
// If the result is null, the sub-method took care of registering results etc.
if (!Res.getNode()) return false;
// If the result is N, the sub-method updated N in place. Tell the legalizer
// core about this.
if (Res.getNode() == N)
return true;
assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
return false;
}
/// PromoteSetCCOperands - Promote the operands of a comparison. This code is
/// shared among BR_CC, SELECT_CC, and SETCC handlers.
void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &NewLHS,SDValue &NewRHS,
ISD::CondCode CCCode) {
// We have to insert explicit sign or zero extends. Note that we could
// insert sign extends for ALL conditions, but zero extend is cheaper on
// many machines (an AND instead of two shifts), so prefer it.
switch (CCCode) {
default: llvm_unreachable("Unknown integer comparison!");
case ISD::SETEQ:
case ISD::SETNE: {
SDValue OpL = GetPromotedInteger(NewLHS);
SDValue OpR = GetPromotedInteger(NewRHS);
// We would prefer to promote the comparison operand with sign extension,
// if we find the operand is actually to truncate an AssertSext. With this
// optimization, we can avoid inserting real truncate instruction, which
// is redudant eventually.
if (OpL->getOpcode() == ISD::AssertSext &&
cast<VTSDNode>(OpL->getOperand(1))->getVT() == NewLHS.getValueType() &&
OpR->getOpcode() == ISD::AssertSext &&
cast<VTSDNode>(OpR->getOperand(1))->getVT() == NewRHS.getValueType()) {
NewLHS = OpL;
NewRHS = OpR;
} else {
NewLHS = ZExtPromotedInteger(NewLHS);
NewRHS = ZExtPromotedInteger(NewRHS);
}
break;
}
case ISD::SETUGE:
case ISD::SETUGT:
case ISD::SETULE:
case ISD::SETULT:
// ALL of these operations will work if we either sign or zero extend
// the operands (including the unsigned comparisons!). Zero extend is
// usually a simpler/cheaper operation, so prefer it.
NewLHS = ZExtPromotedInteger(NewLHS);
NewRHS = ZExtPromotedInteger(NewRHS);
break;
case ISD::SETGE:
case ISD::SETGT:
case ISD::SETLT:
case ISD::SETLE:
NewLHS = SExtPromotedInteger(NewLHS);
NewRHS = SExtPromotedInteger(NewRHS);
break;
}
}
SDValue DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), N->getValueType(0), Op);
}
SDValue DAGTypeLegalizer::PromoteIntOp_ATOMIC_STORE(AtomicSDNode *N) {
SDValue Op2 = GetPromotedInteger(N->getOperand(2));
return DAG.getAtomic(N->getOpcode(), SDLoc(N), N->getMemoryVT(),
N->getChain(), N->getBasePtr(), Op2, N->getMemOperand(),
N->getOrdering(), N->getSynchScope());
}
SDValue DAGTypeLegalizer::PromoteIntOp_BITCAST(SDNode *N) {
// This should only occur in unusual situations like bitcasting to an
// x86_fp80, so just turn it into a store+load
return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0));
}
SDValue DAGTypeLegalizer::PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo) {
assert(OpNo == 2 && "Don't know how to promote this operand!");
SDValue LHS = N->getOperand(2);
SDValue RHS = N->getOperand(3);
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(1))->get());
// The chain (Op#0), CC (#1) and basic block destination (Op#4) are always
// legal types.
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
N->getOperand(1), LHS, RHS, N->getOperand(4)),
0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
assert(OpNo == 1 && "only know how to promote condition");
// Promote all the way up to the canonical SetCC type.
SDValue Cond = PromoteTargetBoolean(N->getOperand(1), MVT::Other);
// The chain (Op#0) and basic block destination (Op#2) are always legal types.
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Cond,
N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) {
// Since the result type is legal, the operands must promote to it.
EVT OVT = N->getOperand(0).getValueType();
SDValue Lo = ZExtPromotedInteger(N->getOperand(0));
SDValue Hi = GetPromotedInteger(N->getOperand(1));
assert(Lo.getValueType() == N->getValueType(0) && "Operand over promoted?");
SDLoc dl(N);
Hi = DAG.getNode(ISD::SHL, dl, N->getValueType(0), Hi,
DAG.getConstant(OVT.getSizeInBits(), dl,
TLI.getPointerTy(DAG.getDataLayout())));
return DAG.getNode(ISD::OR, dl, N->getValueType(0), Lo, Hi);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
// The vector type is legal but the element type is not. This implies
// that the vector is a power-of-two in length and that the element
// type does not have a strange size (eg: it is not i1).
EVT VecVT = N->getValueType(0);
unsigned NumElts = VecVT.getVectorNumElements();
assert(!((NumElts & 1) && (!TLI.isTypeLegal(VecVT))) &&
"Legal vector of one illegal element?");
// Promote the inserted value. The type does not need to match the
// vector element type. Check that any extra bits introduced will be
// truncated away.
assert(N->getOperand(0).getValueType().getSizeInBits() >=
N->getValueType(0).getVectorElementType().getSizeInBits() &&
"Type of inserted value narrower than vector element type!");
SmallVector<SDValue, 16> NewOps;
for (unsigned i = 0; i < NumElts; ++i)
NewOps.push_back(GetPromotedInteger(N->getOperand(i)));
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_CONVERT_RNDSAT(SDNode *N) {
ISD::CvtCode CvtCode = cast<CvtRndSatSDNode>(N)->getCvtCode();
assert ((CvtCode == ISD::CVT_SS || CvtCode == ISD::CVT_SU ||
CvtCode == ISD::CVT_US || CvtCode == ISD::CVT_UU ||
CvtCode == ISD::CVT_FS || CvtCode == ISD::CVT_FU) &&
"can only promote integer arguments");
SDValue InOp = GetPromotedInteger(N->getOperand(0));
return DAG.getConvertRndSat(N->getValueType(0), SDLoc(N), InOp,
N->getOperand(1), N->getOperand(2),
N->getOperand(3), N->getOperand(4), CvtCode);
}
SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
unsigned OpNo) {
if (OpNo == 1) {
// Promote the inserted value. This is valid because the type does not
// have to match the vector element type.
// Check that any extra bits introduced will be truncated away.
assert(N->getOperand(1).getValueType().getSizeInBits() >=
N->getValueType(0).getVectorElementType().getSizeInBits() &&
"Type of inserted value narrower than vector element type!");
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
GetPromotedInteger(N->getOperand(1)),
N->getOperand(2)),
0);
}
assert(OpNo == 2 && "Different operand and result vector types?");
// Promote the index.
SDValue Idx = DAG.getZExtOrTrunc(N->getOperand(2), SDLoc(N),
TLI.getVectorIdxTy(DAG.getDataLayout()));
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
N->getOperand(1), Idx), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N) {
// Integer SCALAR_TO_VECTOR operands are implicitly truncated, so just promote
// the operand in place.
return SDValue(DAG.UpdateNodeOperands(N,
GetPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
assert(OpNo == 0 && "Only know how to promote the condition!");
SDValue Cond = N->getOperand(0);
EVT OpTy = N->getOperand(1).getValueType();
// Promote all the way up to the canonical SetCC type.
EVT OpVT = N->getOpcode() == ISD::SELECT ? OpTy.getScalarType() : OpTy;
Cond = PromoteTargetBoolean(Cond, OpVT);
return SDValue(DAG.UpdateNodeOperands(N, Cond, N->getOperand(1),
N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) {
assert(OpNo == 0 && "Don't know how to promote this operand!");
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(4))->get());
// The CC (#4) and the possible return values (#2 and #3) have legal types.
return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2),
N->getOperand(3), N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) {
assert(OpNo == 0 && "Don't know how to promote this operand!");
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(2))->get());
// The CC (#2) is always legal.
return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_Shift(SDNode *N) {
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
ZExtPromotedInteger(N->getOperand(1))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
SDLoc dl(N);
Op = DAG.getNode(ISD::ANY_EXTEND, dl, N->getValueType(0), Op);
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Op.getValueType(),
Op, DAG.getValueType(N->getOperand(0).getValueType()));
}
SDValue DAGTypeLegalizer::PromoteIntOp_SINT_TO_FP(SDNode *N) {
return SDValue(DAG.UpdateNodeOperands(N,
SExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){
assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
SDValue Ch = N->getChain(), Ptr = N->getBasePtr();
SDLoc dl(N);
SDValue Val = GetPromotedInteger(N->getValue()); // Get promoted value.
// Truncate the value and store the result.
return DAG.getTruncStore(Ch, dl, Val, Ptr,
N->getMemoryVT(), N->getMemOperand());
}
SDValue DAGTypeLegalizer::PromoteIntOp_MSTORE(MaskedStoreSDNode *N, unsigned OpNo){
SDValue DataOp = N->getValue();
EVT DataVT = DataOp.getValueType();
SDValue Mask = N->getMask();
EVT MaskVT = Mask.getValueType();
SDLoc dl(N);
bool TruncateStore = false;
if (!TLI.isTypeLegal(DataVT)) {
if (getTypeAction(DataVT) == TargetLowering::TypePromoteInteger) {
DataOp = GetPromotedInteger(DataOp);
if (!TLI.isTypeLegal(MaskVT))
Mask = PromoteTargetBoolean(Mask, DataOp.getValueType());
TruncateStore = true;
}
else {
assert(getTypeAction(DataVT) == TargetLowering::TypeWidenVector &&
"Unexpected data legalization in MSTORE");
DataOp = GetWidenedVector(DataOp);
if (getTypeAction(MaskVT) == TargetLowering::TypeWidenVector)
Mask = GetWidenedVector(Mask);
else {
EVT BoolVT = getSetCCResultType(DataOp.getValueType());
// We can't use ModifyToType() because we should fill the mask with
// zeroes
unsigned WidenNumElts = BoolVT.getVectorNumElements();
unsigned MaskNumElts = MaskVT.getVectorNumElements();
unsigned NumConcat = WidenNumElts / MaskNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
SDValue ZeroVal = DAG.getConstant(0, dl, MaskVT);
Ops[0] = Mask;
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = ZeroVal;
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, BoolVT, Ops);
}
}
}
else
Mask = PromoteTargetBoolean(N->getMask(), DataOp.getValueType());
return DAG.getMaskedStore(N->getChain(), dl, DataOp, N->getBasePtr(), Mask,
N->getMemoryVT(), N->getMemOperand(),
TruncateStore);
}
SDValue DAGTypeLegalizer::PromoteIntOp_MLOAD(MaskedLoadSDNode *N, unsigned OpNo){
assert(OpNo == 2 && "Only know how to promote the mask!");
EVT DataVT = N->getValueType(0);
SDValue Mask = PromoteTargetBoolean(N->getOperand(OpNo), DataVT);
SmallVector<SDValue, 4> NewOps(N->op_begin(), N->op_end());
NewOps[OpNo] = Mask;
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_TRUNCATE(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), Op);
}
SDValue DAGTypeLegalizer::PromoteIntOp_UINT_TO_FP(SDNode *N) {
return SDValue(DAG.UpdateNodeOperands(N,
ZExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) {
SDLoc dl(N);
SDValue Op = GetPromotedInteger(N->getOperand(0));
Op = DAG.getNode(ISD::ANY_EXTEND, dl, N->getValueType(0), Op);
return DAG.getZeroExtendInReg(Op, dl,
N->getOperand(0).getValueType().getScalarType());
}
//===----------------------------------------------------------------------===//
// Integer Result Expansion
//===----------------------------------------------------------------------===//
/// ExpandIntegerResult - This method is called when the specified result of the
/// specified node is found to need expansion. At this point, the node may also
/// have invalid operands or may have other results that need promotion, we just
/// know that (at least) one result needs expansion.
void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
DEBUG(dbgs() << "Expand integer result: "; N->dump(&DAG); dbgs() << "\n");
SDValue Lo, Hi;
Lo = Hi = SDValue();
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(ResNo), true))
return;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "ExpandIntegerResult #" << ResNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to expand the result of this operator!");
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
case ISD::VAARG: ExpandRes_VAARG(N, Lo, Hi); break;
case ISD::ANY_EXTEND: ExpandIntRes_ANY_EXTEND(N, Lo, Hi); break;
case ISD::AssertSext: ExpandIntRes_AssertSext(N, Lo, Hi); break;
case ISD::AssertZext: ExpandIntRes_AssertZext(N, Lo, Hi); break;
case ISD::BSWAP: ExpandIntRes_BSWAP(N, Lo, Hi); break;
case ISD::Constant: ExpandIntRes_Constant(N, Lo, Hi); break;
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: ExpandIntRes_CTLZ(N, Lo, Hi); break;
case ISD::CTPOP: ExpandIntRes_CTPOP(N, Lo, Hi); break;
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTTZ: ExpandIntRes_CTTZ(N, Lo, Hi); break;
case ISD::FP_TO_SINT: ExpandIntRes_FP_TO_SINT(N, Lo, Hi); break;
case ISD::FP_TO_UINT: ExpandIntRes_FP_TO_UINT(N, Lo, Hi); break;
case ISD::LOAD: ExpandIntRes_LOAD(cast<LoadSDNode>(N), Lo, Hi); break;
case ISD::MUL: ExpandIntRes_MUL(N, Lo, Hi); break;
case ISD::SDIV: ExpandIntRes_SDIV(N, Lo, Hi); break;
case ISD::SIGN_EXTEND: ExpandIntRes_SIGN_EXTEND(N, Lo, Hi); break;
case ISD::SIGN_EXTEND_INREG: ExpandIntRes_SIGN_EXTEND_INREG(N, Lo, Hi); break;
case ISD::SREM: ExpandIntRes_SREM(N, Lo, Hi); break;
case ISD::TRUNCATE: ExpandIntRes_TRUNCATE(N, Lo, Hi); break;
case ISD::UDIV: ExpandIntRes_UDIV(N, Lo, Hi); break;
case ISD::UREM: ExpandIntRes_UREM(N, Lo, Hi); break;
case ISD::ZERO_EXTEND: ExpandIntRes_ZERO_EXTEND(N, Lo, Hi); break;
case ISD::ATOMIC_LOAD: ExpandIntRes_ATOMIC_LOAD(N, Lo, Hi); break;
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_CMP_SWAP: {
std::pair<SDValue, SDValue> Tmp = ExpandAtomic(N);
SplitInteger(Tmp.first, Lo, Hi);
ReplaceValueWith(SDValue(N, 1), Tmp.second);
break;
}
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
AtomicSDNode *AN = cast<AtomicSDNode>(N);
SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::Other);
SDValue Tmp = DAG.getAtomicCmpSwap(
ISD::ATOMIC_CMP_SWAP, SDLoc(N), AN->getMemoryVT(), VTs,
N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3),
AN->getMemOperand(), AN->getSuccessOrdering(), AN->getFailureOrdering(),
AN->getSynchScope());
// Expanding to the strong ATOMIC_CMP_SWAP node means we can determine
// success simply by comparing the loaded value against the ingoing
// comparison.
SDValue Success = DAG.getSetCC(SDLoc(N), N->getValueType(1), Tmp,
N->getOperand(2), ISD::SETEQ);
SplitInteger(Tmp, Lo, Hi);
ReplaceValueWith(SDValue(N, 1), Success);
ReplaceValueWith(SDValue(N, 2), Tmp.getValue(1));
break;
}
case ISD::AND:
case ISD::OR:
case ISD::XOR: ExpandIntRes_Logical(N, Lo, Hi); break;
case ISD::ADD:
case ISD::SUB: ExpandIntRes_ADDSUB(N, Lo, Hi); break;
case ISD::ADDC:
case ISD::SUBC: ExpandIntRes_ADDSUBC(N, Lo, Hi); break;
case ISD::ADDE:
case ISD::SUBE: ExpandIntRes_ADDSUBE(N, Lo, Hi); break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: ExpandIntRes_Shift(N, Lo, Hi); break;
case ISD::SADDO:
case ISD::SSUBO: ExpandIntRes_SADDSUBO(N, Lo, Hi); break;
case ISD::UADDO:
case ISD::USUBO: ExpandIntRes_UADDSUBO(N, Lo, Hi); break;
case ISD::UMULO:
case ISD::SMULO: ExpandIntRes_XMULO(N, Lo, Hi); break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
if (Lo.getNode())
SetExpandedInteger(SDValue(N, ResNo), Lo, Hi);
}
/// Lower an atomic node to the appropriate builtin call.
std::pair <SDValue, SDValue> DAGTypeLegalizer::ExpandAtomic(SDNode *Node) {
unsigned Opc = Node->getOpcode();
MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
RTLIB::Libcall LC = RTLIB::getATOMIC(Opc, VT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected atomic op or value type!");
return ExpandChainLibCall(LC, Node, false);
}
/// N is a shift by a value that needs to be expanded,
/// and the shift amount is a constant 'Amt'. Expand the operation.
void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, const APInt &Amt,
SDValue &Lo, SDValue &Hi) {
SDLoc DL(N);
// Expand the incoming operand to be shifted, so that we have its parts
SDValue InL, InH;
GetExpandedInteger(N->getOperand(0), InL, InH);
// Though Amt shouldn't usually be 0, it's possible. E.g. when legalization
// splitted a vector shift, like this: <op1, op2> SHL <0, 2>.
if (!Amt) {
Lo = InL;
Hi = InH;
return;
}
EVT NVT = InL.getValueType();
unsigned VTBits = N->getValueType(0).getSizeInBits();
unsigned NVTBits = NVT.getSizeInBits();
EVT ShTy = N->getOperand(1).getValueType();
if (N->getOpcode() == ISD::SHL) {
if (Amt.ugt(VTBits)) {
Lo = Hi = DAG.getConstant(0, DL, NVT);
} else if (Amt.ugt(NVTBits)) {
Lo = DAG.getConstant(0, DL, NVT);
Hi = DAG.getNode(ISD::SHL, DL,
NVT, InL, DAG.getConstant(Amt - NVTBits, DL, ShTy));
} else if (Amt == NVTBits) {
Lo = DAG.getConstant(0, DL, NVT);
Hi = InL;
} else if (Amt == 1 &&
TLI.isOperationLegalOrCustom(ISD::ADDC,
TLI.getTypeToExpandTo(*DAG.getContext(), NVT))) {
// Emit this X << 1 as X+X.
SDVTList VTList = DAG.getVTList(NVT, MVT::Glue);
SDValue LoOps[2] = { InL, InL };
Lo = DAG.getNode(ISD::ADDC, DL, VTList, LoOps);
SDValue HiOps[3] = { InH, InH, Lo.getValue(1) };
Hi = DAG.getNode(ISD::ADDE, DL, VTList, HiOps);
} else {
Lo = DAG.getNode(ISD::SHL, DL, NVT, InL, DAG.getConstant(Amt, DL, ShTy));
Hi = DAG.getNode(ISD::OR, DL, NVT,
DAG.getNode(ISD::SHL, DL, NVT, InH,
DAG.getConstant(Amt, DL, ShTy)),
DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
}
return;
}
if (N->getOpcode() == ISD::SRL) {
if (Amt.ugt(VTBits)) {
Lo = Hi = DAG.getConstant(0, DL, NVT);
} else if (Amt.ugt(NVTBits)) {
Lo = DAG.getNode(ISD::SRL, DL,
NVT, InH, DAG.getConstant(Amt - NVTBits, DL, ShTy));
Hi = DAG.getConstant(0, DL, NVT);
} else if (Amt == NVTBits) {
Lo = InH;
Hi = DAG.getConstant(0, DL, NVT);
} else {
Lo = DAG.getNode(ISD::OR, DL, NVT,
DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(Amt, DL, ShTy)),
DAG.getNode(ISD::SHL, DL, NVT, InH,
DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
Hi = DAG.getNode(ISD::SRL, DL, NVT, InH, DAG.getConstant(Amt, DL, ShTy));
}
return;
}
assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
if (Amt.ugt(VTBits)) {
Hi = Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits - 1, DL, ShTy));
} else if (Amt.ugt(NVTBits)) {
Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(Amt - NVTBits, DL, ShTy));
Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits - 1, DL, ShTy));
} else if (Amt == NVTBits) {
Lo = InH;
Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits - 1, DL, ShTy));
} else {
Lo = DAG.getNode(ISD::OR, DL, NVT,
DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(Amt, DL, ShTy)),
DAG.getNode(ISD::SHL, DL, NVT, InH,
DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
Hi = DAG.getNode(ISD::SRA, DL, NVT, InH, DAG.getConstant(Amt, DL, ShTy));
}
}
/// ExpandShiftWithKnownAmountBit - Try to determine whether we can simplify
/// this shift based on knowledge of the high bit of the shift amount. If we
/// can tell this, we know that it is >= 32 or < 32, without knowing the actual
/// shift amount.
bool DAGTypeLegalizer::
ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue Amt = N->getOperand(1);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
EVT ShTy = Amt.getValueType();
unsigned ShBits = ShTy.getScalarType().getSizeInBits();
unsigned NVTBits = NVT.getScalarType().getSizeInBits();
assert(isPowerOf2_32(NVTBits) &&
"Expanded integer type size not a power of two!");
SDLoc dl(N);
APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
APInt KnownZero, KnownOne;
DAG.computeKnownBits(N->getOperand(1), KnownZero, KnownOne);
// If we don't know anything about the high bits, exit.
if (((KnownZero|KnownOne) & HighBitMask) == 0)
return false;
// Get the incoming operand to be shifted.
SDValue InL, InH;
GetExpandedInteger(N->getOperand(0), InL, InH);
// If we know that any of the high bits of the shift amount are one, then we
// can do this as a couple of simple shifts.
if (KnownOne.intersects(HighBitMask)) {
// Mask out the high bit, which we know is set.
Amt = DAG.getNode(ISD::AND, dl, ShTy, Amt,
DAG.getConstant(~HighBitMask, dl, ShTy));
switch (N->getOpcode()) {
default: llvm_unreachable("Unknown shift");
case ISD::SHL:
Lo = DAG.getConstant(0, dl, NVT); // Low part is zero.
Hi = DAG.getNode(ISD::SHL, dl, NVT, InL, Amt); // High part from Lo part.
return true;
case ISD::SRL:
Hi = DAG.getConstant(0, dl, NVT); // Hi part is zero.
Lo = DAG.getNode(ISD::SRL, dl, NVT, InH, Amt); // Lo part from Hi part.
return true;
case ISD::SRA:
Hi = DAG.getNode(ISD::SRA, dl, NVT, InH, // Sign extend high part.
DAG.getConstant(NVTBits - 1, dl, ShTy));
Lo = DAG.getNode(ISD::SRA, dl, NVT, InH, Amt); // Lo part from Hi part.
return true;
}
}
// If we know that all of the high bits of the shift amount are zero, then we
// can do this as a couple of simple shifts.
if ((KnownZero & HighBitMask) == HighBitMask) {
// Calculate 31-x. 31 is used instead of 32 to avoid creating an undefined
// shift if x is zero. We can use XOR here because x is known to be smaller
// than 32.
SDValue Amt2 = DAG.getNode(ISD::XOR, dl, ShTy, Amt,
DAG.getConstant(NVTBits - 1, dl, ShTy));
unsigned Op1, Op2;
switch (N->getOpcode()) {
default: llvm_unreachable("Unknown shift");
case ISD::SHL: Op1 = ISD::SHL; Op2 = ISD::SRL; break;
case ISD::SRL:
case ISD::SRA: Op1 = ISD::SRL; Op2 = ISD::SHL; break;
}
// When shifting right the arithmetic for Lo and Hi is swapped.
if (N->getOpcode() != ISD::SHL)
std::swap(InL, InH);
// Use a little trick to get the bits that move from Lo to Hi. First
// shift by one bit.
SDValue Sh1 = DAG.getNode(Op2, dl, NVT, InL, DAG.getConstant(1, dl, ShTy));
// Then compute the remaining shift with amount-1.
SDValue Sh2 = DAG.getNode(Op2, dl, NVT, Sh1, Amt2);
Lo = DAG.getNode(N->getOpcode(), dl, NVT, InL, Amt);
Hi = DAG.getNode(ISD::OR, dl, NVT, DAG.getNode(Op1, dl, NVT, InH, Amt),Sh2);
if (N->getOpcode() != ISD::SHL)
std::swap(Hi, Lo);
return true;
}
return false;
}
/// ExpandShiftWithUnknownAmountBit - Fully general expansion of integer shift
/// of any size.
bool DAGTypeLegalizer::
ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue Amt = N->getOperand(1);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
EVT ShTy = Amt.getValueType();
unsigned NVTBits = NVT.getSizeInBits();
assert(isPowerOf2_32(NVTBits) &&
"Expanded integer type size not a power of two!");
SDLoc dl(N);
// Get the incoming operand to be shifted.
SDValue InL, InH;
GetExpandedInteger(N->getOperand(0), InL, InH);
SDValue NVBitsNode = DAG.getConstant(NVTBits, dl, ShTy);
SDValue AmtExcess = DAG.getNode(ISD::SUB, dl, ShTy, Amt, NVBitsNode);
SDValue AmtLack = DAG.getNode(ISD::SUB, dl, ShTy, NVBitsNode, Amt);
SDValue isShort = DAG.getSetCC(dl, getSetCCResultType(ShTy),
Amt, NVBitsNode, ISD::SETULT);
SDValue isZero = DAG.getSetCC(dl, getSetCCResultType(ShTy),
Amt, DAG.getConstant(0, dl, ShTy),
ISD::SETEQ);
SDValue LoS, HiS, LoL, HiL;
switch (N->getOpcode()) {
default: llvm_unreachable("Unknown shift");
case ISD::SHL:
// Short: ShAmt < NVTBits
LoS = DAG.getNode(ISD::SHL, dl, NVT, InL, Amt);
HiS = DAG.getNode(ISD::OR, dl, NVT,
DAG.getNode(ISD::SHL, dl, NVT, InH, Amt),
DAG.getNode(ISD::SRL, dl, NVT, InL, AmtLack));
// Long: ShAmt >= NVTBits
LoL = DAG.getConstant(0, dl, NVT); // Lo part is zero.
HiL = DAG.getNode(ISD::SHL, dl, NVT, InL, AmtExcess); // Hi from Lo part.
Lo = DAG.getSelect(dl, NVT, isShort, LoS, LoL);
Hi = DAG.getSelect(dl, NVT, isZero, InH,
DAG.getSelect(dl, NVT, isShort, HiS, HiL));
return true;
case ISD::SRL:
// Short: ShAmt < NVTBits
HiS = DAG.getNode(ISD::SRL, dl, NVT, InH, Amt);
LoS = DAG.getNode(ISD::OR, dl, NVT,
DAG.getNode(ISD::SRL, dl, NVT, InL, Amt),
// FIXME: If Amt is zero, the following shift generates an undefined result
// on some architectures.
DAG.getNode(ISD::SHL, dl, NVT, InH, AmtLack));
// Long: ShAmt >= NVTBits
HiL = DAG.getConstant(0, dl, NVT); // Hi part is zero.
LoL = DAG.getNode(ISD::SRL, dl, NVT, InH, AmtExcess); // Lo from Hi part.
Lo = DAG.getSelect(dl, NVT, isZero, InL,
DAG.getSelect(dl, NVT, isShort, LoS, LoL));
Hi = DAG.getSelect(dl, NVT, isShort, HiS, HiL);
return true;
case ISD::SRA:
// Short: ShAmt < NVTBits
HiS = DAG.getNode(ISD::SRA, dl, NVT, InH, Amt);
LoS = DAG.getNode(ISD::OR, dl, NVT,
DAG.getNode(ISD::SRL, dl, NVT, InL, Amt),
DAG.getNode(ISD::SHL, dl, NVT, InH, AmtLack));
// Long: ShAmt >= NVTBits
HiL = DAG.getNode(ISD::SRA, dl, NVT, InH, // Sign of Hi part.
DAG.getConstant(NVTBits - 1, dl, ShTy));
LoL = DAG.getNode(ISD::SRA, dl, NVT, InH, AmtExcess); // Lo from Hi part.
Lo = DAG.getSelect(dl, NVT, isZero, InL,
DAG.getSelect(dl, NVT, isShort, LoS, LoL));
Hi = DAG.getSelect(dl, NVT, isShort, HiS, HiL);
return true;
}
}
void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
// Expand the subcomponents.
SDValue LHSL, LHSH, RHSL, RHSH;
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
EVT NVT = LHSL.getValueType();
SDValue LoOps[2] = { LHSL, RHSL };
SDValue HiOps[3] = { LHSH, RHSH };
// Do not generate ADDC/ADDE or SUBC/SUBE if the target does not support
// them. TODO: Teach operation legalization how to expand unsupported
// ADDC/ADDE/SUBC/SUBE. The problem is that these operations generate
// a carry of type MVT::Glue, but there doesn't seem to be any way to
// generate a value of this type in the expanded code sequence.
bool hasCarry =
TLI.isOperationLegalOrCustom(N->getOpcode() == ISD::ADD ?
ISD::ADDC : ISD::SUBC,
TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
if (hasCarry) {
SDVTList VTList = DAG.getVTList(NVT, MVT::Glue);
if (N->getOpcode() == ISD::ADD) {
Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
Hi = DAG.getNode(ISD::ADDE, dl, VTList, HiOps);
} else {
Lo = DAG.getNode(ISD::SUBC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps);
}
return;
}
bool hasOVF =
TLI.isOperationLegalOrCustom(N->getOpcode() == ISD::ADD ?
ISD::UADDO : ISD::USUBO,
TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
if (hasOVF) {
SDVTList VTList = DAG.getVTList(NVT, NVT);
TargetLoweringBase::BooleanContent BoolType = TLI.getBooleanContents(NVT);
int RevOpc;
if (N->getOpcode() == ISD::ADD) {
RevOpc = ISD::SUB;
Lo = DAG.getNode(ISD::UADDO, dl, VTList, LoOps);
Hi = DAG.getNode(ISD::ADD, dl, NVT, makeArrayRef(HiOps, 2));
} else {
RevOpc = ISD::ADD;
Lo = DAG.getNode(ISD::USUBO, dl, VTList, LoOps);
Hi = DAG.getNode(ISD::SUB, dl, NVT, makeArrayRef(HiOps, 2));
}
SDValue OVF = Lo.getValue(1);
switch (BoolType) {
case TargetLoweringBase::UndefinedBooleanContent:
OVF = DAG.getNode(ISD::AND, dl, NVT, DAG.getConstant(1, dl, NVT), OVF);
// Fallthrough
case TargetLoweringBase::ZeroOrOneBooleanContent:
Hi = DAG.getNode(N->getOpcode(), dl, NVT, Hi, OVF);
break;
case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
Hi = DAG.getNode(RevOpc, dl, NVT, Hi, OVF);
}
return;
}
if (N->getOpcode() == ISD::ADD) {
Lo = DAG.getNode(ISD::ADD, dl, NVT, LoOps);
Hi = DAG.getNode(ISD::ADD, dl, NVT, makeArrayRef(HiOps, 2));
SDValue Cmp1 = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo, LoOps[0],
ISD::SETULT);
SDValue Carry1 = DAG.getSelect(dl, NVT, Cmp1,
DAG.getConstant(1, dl, NVT),
DAG.getConstant(0, dl, NVT));
SDValue Cmp2 = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo, LoOps[1],
ISD::SETULT);
SDValue Carry2 = DAG.getSelect(dl, NVT, Cmp2,
DAG.getConstant(1, dl, NVT), Carry1);
Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, Carry2);
} else {
Lo = DAG.getNode(ISD::SUB, dl, NVT, LoOps);
Hi = DAG.getNode(ISD::SUB, dl, NVT, makeArrayRef(HiOps, 2));
SDValue Cmp =
DAG.getSetCC(dl, getSetCCResultType(LoOps[0].getValueType()),
LoOps[0], LoOps[1], ISD::SETULT);
SDValue Borrow = DAG.getSelect(dl, NVT, Cmp,
DAG.getConstant(1, dl, NVT),
DAG.getConstant(0, dl, NVT));
Hi = DAG.getNode(ISD::SUB, dl, NVT, Hi, Borrow);
}
}
void DAGTypeLegalizer::ExpandIntRes_ADDSUBC(SDNode *N,
SDValue &Lo, SDValue &Hi) {
// Expand the subcomponents.
SDValue LHSL, LHSH, RHSL, RHSH;
SDLoc dl(N);
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Glue);
SDValue LoOps[2] = { LHSL, RHSL };
SDValue HiOps[3] = { LHSH, RHSH };
if (N->getOpcode() == ISD::ADDC) {
Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
Hi = DAG.getNode(ISD::ADDE, dl, VTList, HiOps);
} else {
Lo = DAG.getNode(ISD::SUBC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps);
}
// Legalized the flag result - switch anything that used the old flag to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Hi.getValue(1));
}
void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N,
SDValue &Lo, SDValue &Hi) {
// Expand the subcomponents.
SDValue LHSL, LHSH, RHSL, RHSH;
SDLoc dl(N);
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Glue);
SDValue LoOps[3] = { LHSL, RHSL, N->getOperand(2) };
SDValue HiOps[3] = { LHSH, RHSH };
Lo = DAG.getNode(N->getOpcode(), dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
Hi = DAG.getNode(N->getOpcode(), dl, VTList, HiOps);
// Legalized the flag result - switch anything that used the old flag to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Hi.getValue(1));
}
void DAGTypeLegalizer::ExpandIntRes_MERGE_VALUES(SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi) {
SDValue Res = DisintegrateMERGE_VALUES(N, ResNo);
SplitInteger(Res, Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_ANY_EXTEND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
SDValue Op = N->getOperand(0);
if (Op.getValueType().bitsLE(NVT)) {
// The low part is any extension of the input (which degenerates to a copy).
Lo = DAG.getNode(ISD::ANY_EXTEND, dl, NVT, Op);
Hi = DAG.getUNDEF(NVT); // The high part is undefined.
} else {
// For example, extension of an i48 to an i64. The operand type necessarily
// promotes to the result type, so will end up being expanded too.
assert(getTypeAction(Op.getValueType()) ==
TargetLowering::TypePromoteInteger &&
"Only know how to promote this result!");
SDValue Res = GetPromotedInteger(Op);
assert(Res.getValueType() == N->getValueType(0) &&
"Operand over promoted?");
// Split the promoted operand. This will simplify when it is expanded.
SplitInteger(Res, Lo, Hi);
}
}
void DAGTypeLegalizer::ExpandIntRes_AssertSext(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
GetExpandedInteger(N->getOperand(0), Lo, Hi);
EVT NVT = Lo.getValueType();
EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
unsigned NVTBits = NVT.getSizeInBits();
unsigned EVTBits = EVT.getSizeInBits();
if (NVTBits < EVTBits) {
Hi = DAG.getNode(ISD::AssertSext, dl, NVT, Hi,
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
EVTBits - NVTBits)));
} else {
Lo = DAG.getNode(ISD::AssertSext, dl, NVT, Lo, DAG.getValueType(EVT));
// The high part replicates the sign bit of Lo, make it explicit.
Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
DAG.getConstant(NVTBits - 1, dl,
TLI.getPointerTy(DAG.getDataLayout())));
}
}
void DAGTypeLegalizer::ExpandIntRes_AssertZext(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
GetExpandedInteger(N->getOperand(0), Lo, Hi);
EVT NVT = Lo.getValueType();
EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
unsigned NVTBits = NVT.getSizeInBits();
unsigned EVTBits = EVT.getSizeInBits();
if (NVTBits < EVTBits) {
Hi = DAG.getNode(ISD::AssertZext, dl, NVT, Hi,
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
EVTBits - NVTBits)));
} else {
Lo = DAG.getNode(ISD::AssertZext, dl, NVT, Lo, DAG.getValueType(EVT));
// The high part must be zero, make it explicit.
Hi = DAG.getConstant(0, dl, NVT);
}
}
void DAGTypeLegalizer::ExpandIntRes_BSWAP(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
GetExpandedInteger(N->getOperand(0), Hi, Lo); // Note swapped operands.
Lo = DAG.getNode(ISD::BSWAP, dl, Lo.getValueType(), Lo);
Hi = DAG.getNode(ISD::BSWAP, dl, Hi.getValueType(), Hi);
}
void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned NBitWidth = NVT.getSizeInBits();
auto Constant = cast<ConstantSDNode>(N);
const APInt &Cst = Constant->getAPIntValue();
bool IsTarget = Constant->isTargetOpcode();
bool IsOpaque = Constant->isOpaque();
SDLoc dl(N);
Lo = DAG.getConstant(Cst.trunc(NBitWidth), dl, NVT, IsTarget, IsOpaque);
Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), dl, NVT, IsTarget,
IsOpaque);
}
void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
// ctlz (HiLo) -> Hi != 0 ? ctlz(Hi) : (ctlz(Lo)+32)
GetExpandedInteger(N->getOperand(0), Lo, Hi);
EVT NVT = Lo.getValueType();
SDValue HiNotZero = DAG.getSetCC(dl, getSetCCResultType(NVT), Hi,
DAG.getConstant(0, dl, NVT), ISD::SETNE);
SDValue LoLZ = DAG.getNode(N->getOpcode(), dl, NVT, Lo);
SDValue HiLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, NVT, Hi);
Lo = DAG.getSelect(dl, NVT, HiNotZero, HiLZ,
DAG.getNode(ISD::ADD, dl, NVT, LoLZ,
DAG.getConstant(NVT.getSizeInBits(), dl,
NVT)));
Hi = DAG.getConstant(0, dl, NVT);
}
void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
// ctpop(HiLo) -> ctpop(Hi)+ctpop(Lo)
GetExpandedInteger(N->getOperand(0), Lo, Hi);
EVT NVT = Lo.getValueType();
Lo = DAG.getNode(ISD::ADD, dl, NVT, DAG.getNode(ISD::CTPOP, dl, NVT, Lo),
DAG.getNode(ISD::CTPOP, dl, NVT, Hi));
Hi = DAG.getConstant(0, dl, NVT);
}
void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
// cttz (HiLo) -> Lo != 0 ? cttz(Lo) : (cttz(Hi)+32)
GetExpandedInteger(N->getOperand(0), Lo, Hi);
EVT NVT = Lo.getValueType();
SDValue LoNotZero = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo,
DAG.getConstant(0, dl, NVT), ISD::SETNE);
SDValue LoLZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, NVT, Lo);
SDValue HiLZ = DAG.getNode(N->getOpcode(), dl, NVT, Hi);
Lo = DAG.getSelect(dl, NVT, LoNotZero, LoLZ,
DAG.getNode(ISD::ADD, dl, NVT, HiLZ,
DAG.getConstant(NVT.getSizeInBits(), dl,
NVT)));
Hi = DAG.getConstant(0, dl, NVT);
}
void DAGTypeLegalizer::ExpandIntRes_FP_TO_SINT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue Op = N->getOperand(0);
if (getTypeAction(Op.getValueType()) == TargetLowering::TypePromoteFloat)
Op = GetPromotedFloat(Op);
RTLIB::Libcall LC = RTLIB::getFPTOSINT(Op.getValueType(), VT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-sint conversion!");
SplitInteger(TLI.makeLibCall(DAG, LC, VT, &Op, 1, true/*irrelevant*/,
dl).first,
Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_FP_TO_UINT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue Op = N->getOperand(0);
if (getTypeAction(Op.getValueType()) == TargetLowering::TypePromoteFloat)
Op = GetPromotedFloat(Op);
RTLIB::Libcall LC = RTLIB::getFPTOUINT(Op.getValueType(), VT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-uint conversion!");
SplitInteger(TLI.makeLibCall(DAG, LC, VT, &Op, 1, false/*irrelevant*/,
dl).first,
Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
SDValue &Lo, SDValue &Hi) {
if (ISD::isNormalLoad(N)) {
ExpandRes_NormalLoad(N, Lo, Hi);
return;
}
assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
ISD::LoadExtType ExtType = N->getExtensionType();
unsigned Alignment = N->getAlignment();
bool isVolatile = N->isVolatile();
bool isNonTemporal = N->isNonTemporal();
bool isInvariant = N->isInvariant();
AAMDNodes AAInfo = N->getAAInfo();
SDLoc dl(N);
assert(NVT.isByteSized() && "Expanded type not byte sized!");
if (N->getMemoryVT().bitsLE(NVT)) {
EVT MemVT = N->getMemoryVT();
Lo = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getPointerInfo(),
MemVT, isVolatile, isNonTemporal, isInvariant,
Alignment, AAInfo);
// Remember the chain.
Ch = Lo.getValue(1);
if (ExtType == ISD::SEXTLOAD) {
// The high part is obtained by SRA'ing all but one of the bits of the
// lo part.
unsigned LoSize = Lo.getValueType().getSizeInBits();
Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
DAG.getConstant(LoSize - 1, dl,
TLI.getPointerTy(DAG.getDataLayout())));
} else if (ExtType == ISD::ZEXTLOAD) {
// The high part is just a zero.
Hi = DAG.getConstant(0, dl, NVT);
} else {
assert(ExtType == ISD::EXTLOAD && "Unknown extload!");
// The high part is undefined.
Hi = DAG.getUNDEF(NVT);
}
} else if (DAG.getDataLayout().isLittleEndian()) {
// Little-endian - low bits are at low addresses.
Lo = DAG.getLoad(NVT, dl, Ch, Ptr, N->getPointerInfo(),
isVolatile, isNonTemporal, isInvariant, Alignment,
AAInfo);
unsigned ExcessBits =
N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits();
EVT NEVT = EVT::getIntegerVT(*DAG.getContext(), ExcessBits);
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize), NEVT,
isVolatile, isNonTemporal, isInvariant,
MinAlign(Alignment, IncrementSize), AAInfo);
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
} else {
// Big-endian - high bits are at low addresses. Favor aligned loads at
// the cost of some bit-fiddling.
EVT MemVT = N->getMemoryVT();
unsigned EBytes = MemVT.getStoreSize();
unsigned IncrementSize = NVT.getSizeInBits()/8;
unsigned ExcessBits = (EBytes - IncrementSize)*8;
// Load both the high bits and maybe some of the low bits.
Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getPointerInfo(),
EVT::getIntegerVT(*DAG.getContext(),
MemVT.getSizeInBits() - ExcessBits),
isVolatile, isNonTemporal, isInvariant, Alignment,
AAInfo);
// Increment the pointer to the other half.
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
// Load the rest of the low bits.
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, NVT, Ch, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
isVolatile, isNonTemporal, isInvariant,
MinAlign(Alignment, IncrementSize), AAInfo);
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
if (ExcessBits < NVT.getSizeInBits()) {
// Transfer low bits from the bottom of Hi to the top of Lo.
Lo = DAG.getNode(
ISD::OR, dl, NVT, Lo,
DAG.getNode(ISD::SHL, dl, NVT, Hi,
DAG.getConstant(ExcessBits, dl,
TLI.getPointerTy(DAG.getDataLayout()))));
// Move high bits to the right position in Hi.
Hi = DAG.getNode(ExtType == ISD::SEXTLOAD ? ISD::SRA : ISD::SRL, dl, NVT,
Hi,
DAG.getConstant(NVT.getSizeInBits() - ExcessBits, dl,
TLI.getPointerTy(DAG.getDataLayout())));
}
}
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Ch);
}
void DAGTypeLegalizer::ExpandIntRes_Logical(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
SDValue LL, LH, RL, RH;
GetExpandedInteger(N->getOperand(0), LL, LH);
GetExpandedInteger(N->getOperand(1), RL, RH);
Lo = DAG.getNode(N->getOpcode(), dl, LL.getValueType(), LL, RL);
Hi = DAG.getNode(N->getOpcode(), dl, LL.getValueType(), LH, RH);
}
void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDLoc dl(N);
SDValue LL, LH, RL, RH;
GetExpandedInteger(N->getOperand(0), LL, LH);
GetExpandedInteger(N->getOperand(1), RL, RH);
if (TLI.expandMUL(N, Lo, Hi, NVT, DAG, LL, LH, RL, RH))
return;
// If nothing else, we can make a libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i16)
LC = RTLIB::MUL_I16;
else if (VT == MVT::i32)
LC = RTLIB::MUL_I32;
else if (VT == MVT::i64)
LC = RTLIB::MUL_I64;
else if (VT == MVT::i128)
LC = RTLIB::MUL_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported MUL!");
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, 2, true/*irrelevant*/,
dl).first,
Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_SADDSUBO(SDNode *Node,
SDValue &Lo, SDValue &Hi) {
SDValue LHS = Node->getOperand(0);
SDValue RHS = Node->getOperand(1);
SDLoc dl(Node);
// Expand the result by simply replacing it with the equivalent
// non-overflow-checking operation.
SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
LHS, RHS);
SplitInteger(Sum, Lo, Hi);
// Compute the overflow.
//
// LHSSign -> LHS >= 0
// RHSSign -> RHS >= 0
// SumSign -> Sum >= 0
//
// Add:
// Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
// Sub:
// Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
//
EVT OType = Node->getValueType(1);
SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType());
SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
Node->getOpcode() == ISD::SADDO ?
ISD::SETEQ : ISD::SETNE);
SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
// Use the calculated overflow everywhere.
ReplaceValueWith(SDValue(Node, 1), Cmp);
}
void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
SDLoc dl(N);
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
if (TLI.getOperationAction(ISD::SDIVREM, VT) == TargetLowering::Custom) {
SDValue Res = DAG.getNode(ISD::SDIVREM, dl, DAG.getVTList(VT, VT), Ops);
SplitInteger(Res.getValue(0), Lo, Hi);
return;
}
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i16)
LC = RTLIB::SDIV_I16;
else if (VT == MVT::i32)
LC = RTLIB::SDIV_I32;
else if (VT == MVT::i64)
LC = RTLIB::SDIV_I64;
else if (VT == MVT::i128)
LC = RTLIB::SDIV_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, 2, true, dl).first, Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
SDLoc dl(N);
// If we can emit an efficient shift operation, do so now. Check to see if
// the RHS is a constant.
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return ExpandShiftByConstant(N, CN->getAPIntValue(), Lo, Hi);
// If we can determine that the high bit of the shift is zero or one, even if
// the low bits are variable, emit this shift in an optimized form.
if (ExpandShiftWithKnownAmountBit(N, Lo, Hi))
return;
// If this target supports shift_PARTS, use it. First, map to the _PARTS opc.
unsigned PartsOpc;
if (N->getOpcode() == ISD::SHL) {
PartsOpc = ISD::SHL_PARTS;
} else if (N->getOpcode() == ISD::SRL) {
PartsOpc = ISD::SRL_PARTS;
} else {
assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
PartsOpc = ISD::SRA_PARTS;
}
// Next check to see if the target supports this SHL_PARTS operation or if it
// will custom expand it.
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
TargetLowering::LegalizeAction Action = TLI.getOperationAction(PartsOpc, NVT);
if ((Action == TargetLowering::Legal && TLI.isTypeLegal(NVT)) ||
Action == TargetLowering::Custom) {
// Expand the subcomponents.
SDValue LHSL, LHSH;
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
EVT VT = LHSL.getValueType();
// If the shift amount operand is coming from a vector legalization it may
// have an illegal type. Fix that first by casting the operand, otherwise
// the new SHL_PARTS operation would need further legalization.
SDValue ShiftOp = N->getOperand(1);
EVT ShiftTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
assert(ShiftTy.getScalarType().getSizeInBits() >=
Log2_32_Ceil(VT.getScalarType().getSizeInBits()) &&
"ShiftAmountTy is too small to cover the range of this type!");
if (ShiftOp.getValueType() != ShiftTy)
ShiftOp = DAG.getZExtOrTrunc(ShiftOp, dl, ShiftTy);
SDValue Ops[] = { LHSL, LHSH, ShiftOp };
Lo = DAG.getNode(PartsOpc, dl, DAG.getVTList(VT, VT), Ops);
Hi = Lo.getValue(1);
return;
}
// Otherwise, emit a libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
bool isSigned;
if (N->getOpcode() == ISD::SHL) {
isSigned = false; /*sign irrelevant*/
if (VT == MVT::i16)
LC = RTLIB::SHL_I16;
else if (VT == MVT::i32)
LC = RTLIB::SHL_I32;
else if (VT == MVT::i64)
LC = RTLIB::SHL_I64;
else if (VT == MVT::i128)
LC = RTLIB::SHL_I128;
} else if (N->getOpcode() == ISD::SRL) {
isSigned = false;
if (VT == MVT::i16)
LC = RTLIB::SRL_I16;
else if (VT == MVT::i32)
LC = RTLIB::SRL_I32;
else if (VT == MVT::i64)
LC = RTLIB::SRL_I64;
else if (VT == MVT::i128)
LC = RTLIB::SRL_I128;
} else {
assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
isSigned = true;
if (VT == MVT::i16)
LC = RTLIB::SRA_I16;
else if (VT == MVT::i32)
LC = RTLIB::SRA_I32;
else if (VT == MVT::i64)
LC = RTLIB::SRA_I64;
else if (VT == MVT::i128)
LC = RTLIB::SRA_I128;
}
if (LC != RTLIB::UNKNOWN_LIBCALL && TLI.getLibcallName(LC)) {
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, 2, isSigned, dl).first, Lo,
Hi);
return;
}
if (!ExpandShiftWithUnknownAmountBit(N, Lo, Hi))
llvm_unreachable("Unsupported shift!");
}
void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
SDValue Op = N->getOperand(0);
if (Op.getValueType().bitsLE(NVT)) {
// The low part is sign extension of the input (degenerates to a copy).
Lo = DAG.getNode(ISD::SIGN_EXTEND, dl, NVT, N->getOperand(0));
// The high part is obtained by SRA'ing all but one of the bits of low part.
unsigned LoSize = NVT.getSizeInBits();
Hi = DAG.getNode(
ISD::SRA, dl, NVT, Lo,
DAG.getConstant(LoSize - 1, dl, TLI.getPointerTy(DAG.getDataLayout())));
} else {
// For example, extension of an i48 to an i64. The operand type necessarily
// promotes to the result type, so will end up being expanded too.
assert(getTypeAction(Op.getValueType()) ==
TargetLowering::TypePromoteInteger &&
"Only know how to promote this result!");
SDValue Res = GetPromotedInteger(Op);
assert(Res.getValueType() == N->getValueType(0) &&
"Operand over promoted?");
// Split the promoted operand. This will simplify when it is expanded.
SplitInteger(Res, Lo, Hi);
unsigned ExcessBits =
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
ExcessBits)));
}
}
void DAGTypeLegalizer::
ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
GetExpandedInteger(N->getOperand(0), Lo, Hi);
EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
if (EVT.bitsLE(Lo.getValueType())) {
// sext_inreg the low part if needed.
Lo = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Lo.getValueType(), Lo,
N->getOperand(1));
// The high part gets the sign extension from the lo-part. This handles
// things like sextinreg V:i64 from i8.
Hi = DAG.getNode(ISD::SRA, dl, Hi.getValueType(), Lo,
DAG.getConstant(Hi.getValueType().getSizeInBits() - 1, dl,
TLI.getPointerTy(DAG.getDataLayout())));
} else {
// For example, extension of an i48 to an i64. Leave the low part alone,
// sext_inreg the high part.
unsigned ExcessBits =
EVT.getSizeInBits() - Lo.getValueType().getSizeInBits();
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
ExcessBits)));
}
}
void DAGTypeLegalizer::ExpandIntRes_SREM(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
SDLoc dl(N);
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
if (TLI.getOperationAction(ISD::SDIVREM, VT) == TargetLowering::Custom) {
SDValue Res = DAG.getNode(ISD::SDIVREM, dl, DAG.getVTList(VT, VT), Ops);
SplitInteger(Res.getValue(1), Lo, Hi);
return;
}
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i16)
LC = RTLIB::SREM_I16;
else if (VT == MVT::i32)
LC = RTLIB::SREM_I32;
else if (VT == MVT::i64)
LC = RTLIB::SREM_I64;
else if (VT == MVT::i128)
LC = RTLIB::SREM_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, 2, true, dl).first, Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
Lo = DAG.getNode(ISD::TRUNCATE, dl, NVT, N->getOperand(0));
Hi = DAG.getNode(ISD::SRL, dl, N->getOperand(0).getValueType(),
N->getOperand(0),
DAG.getConstant(NVT.getSizeInBits(), dl,
TLI.getPointerTy(DAG.getDataLayout())));
Hi = DAG.getNode(ISD::TRUNCATE, dl, NVT, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDLoc dl(N);
// Expand the result by simply replacing it with the equivalent
// non-overflow-checking operation.
SDValue Sum = DAG.getNode(N->getOpcode() == ISD::UADDO ?
ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
LHS, RHS);
SplitInteger(Sum, Lo, Hi);
// Calculate the overflow: addition overflows iff a + b < a, and subtraction
// overflows iff a - b > a.
SDValue Ofl = DAG.getSetCC(dl, N->getValueType(1), Sum, LHS,
N->getOpcode () == ISD::UADDO ?
ISD::SETULT : ISD::SETUGT);
// Use the calculated overflow everywhere.
ReplaceValueWith(SDValue(N, 1), Ofl);
}
void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
SDLoc dl(N);
// A divide for UMULO should be faster than a function call.
if (N->getOpcode() == ISD::UMULO) {
SDValue LHS = N->getOperand(0), RHS = N->getOperand(1);
SDValue MUL = DAG.getNode(ISD::MUL, dl, LHS.getValueType(), LHS, RHS);
SplitInteger(MUL, Lo, Hi);
// A divide for UMULO will be faster than a function call. Select to
// make sure we aren't using 0.
SDValue isZero = DAG.getSetCC(dl, getSetCCResultType(VT),
RHS, DAG.getConstant(0, dl, VT), ISD::SETEQ);
SDValue NotZero = DAG.getSelect(dl, VT, isZero,
DAG.getConstant(1, dl, VT), RHS);
SDValue DIV = DAG.getNode(ISD::UDIV, dl, VT, MUL, NotZero);
SDValue Overflow = DAG.getSetCC(dl, N->getValueType(1), DIV, LHS,
ISD::SETNE);
Overflow = DAG.getSelect(dl, N->getValueType(1), isZero,
DAG.getConstant(0, dl, N->getValueType(1)),
Overflow);
ReplaceValueWith(SDValue(N, 1), Overflow);
return;
}
Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
// Replace this with a libcall that will check overflow.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i32)
LC = RTLIB::MULO_I32;
else if (VT == MVT::i64)
LC = RTLIB::MULO_I64;
else if (VT == MVT::i128)
LC = RTLIB::MULO_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported XMULO!");
SDValue Temp = DAG.CreateStackTemporary(PtrVT);
// Temporary for the overflow value, default it to zero.
SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl,
DAG.getConstant(0, dl, PtrVT), Temp,
MachinePointerInfo(), false, false, 0);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (const SDValue &Op : N->op_values()) {
EVT ArgVT = Op.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Op;
Entry.Ty = ArgTy;
Entry.isSExt = true;
Entry.isZExt = false;
Args.push_back(Entry);
}
// Also pass the address of the overflow check.
Entry.Node = Temp;
Entry.Ty = PtrTy->getPointerTo();
Entry.isSExt = true;
Entry.isZExt = false;
Args.push_back(Entry);
SDValue Func = DAG.getExternalSymbol(TLI.getLibcallName(LC), PtrVT);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(Chain)
.setCallee(TLI.getLibcallCallingConv(LC), RetTy, Func, std::move(Args), 0)
.setSExtResult();
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
SplitInteger(CallInfo.first, Lo, Hi);
SDValue Temp2 = DAG.getLoad(PtrVT, dl, CallInfo.second, Temp,
MachinePointerInfo(), false, false, false, 0);
SDValue Ofl = DAG.getSetCC(dl, N->getValueType(1), Temp2,
DAG.getConstant(0, dl, PtrVT),
ISD::SETNE);
// Use the overflow from the libcall everywhere.
ReplaceValueWith(SDValue(N, 1), Ofl);
}
void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
SDLoc dl(N);
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
if (TLI.getOperationAction(ISD::UDIVREM, VT) == TargetLowering::Custom) {
SDValue Res = DAG.getNode(ISD::UDIVREM, dl, DAG.getVTList(VT, VT), Ops);
SplitInteger(Res.getValue(0), Lo, Hi);
return;
}
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i16)
LC = RTLIB::UDIV_I16;
else if (VT == MVT::i32)
LC = RTLIB::UDIV_I32;
else if (VT == MVT::i64)
LC = RTLIB::UDIV_I64;
else if (VT == MVT::i128)
LC = RTLIB::UDIV_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported UDIV!");
SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, 2, false, dl).first, Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_UREM(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
SDLoc dl(N);
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
if (TLI.getOperationAction(ISD::UDIVREM, VT) == TargetLowering::Custom) {
SDValue Res = DAG.getNode(ISD::UDIVREM, dl, DAG.getVTList(VT, VT), Ops);
SplitInteger(Res.getValue(1), Lo, Hi);
return;
}
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i16)
LC = RTLIB::UREM_I16;
else if (VT == MVT::i32)
LC = RTLIB::UREM_I32;
else if (VT == MVT::i64)
LC = RTLIB::UREM_I64;
else if (VT == MVT::i128)
LC = RTLIB::UREM_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported UREM!");
SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, 2, false, dl).first, Lo, Hi);
}
void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
SDValue Op = N->getOperand(0);
if (Op.getValueType().bitsLE(NVT)) {
// The low part is zero extension of the input (degenerates to a copy).
Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N->getOperand(0));
Hi = DAG.getConstant(0, dl, NVT); // The high part is just a zero.
} else {
// For example, extension of an i48 to an i64. The operand type necessarily
// promotes to the result type, so will end up being expanded too.
assert(getTypeAction(Op.getValueType()) ==
TargetLowering::TypePromoteInteger &&
"Only know how to promote this result!");
SDValue Res = GetPromotedInteger(Op);
assert(Res.getValueType() == N->getValueType(0) &&
"Operand over promoted?");
// Split the promoted operand. This will simplify when it is expanded.
SplitInteger(Res, Lo, Hi);
unsigned ExcessBits =
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
Hi = DAG.getZeroExtendInReg(Hi, dl,
EVT::getIntegerVT(*DAG.getContext(),
ExcessBits));
}
}
void DAGTypeLegalizer::ExpandIntRes_ATOMIC_LOAD(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
EVT VT = cast<AtomicSDNode>(N)->getMemoryVT();
SDVTList VTs = DAG.getVTList(VT, MVT::i1, MVT::Other);
SDValue Zero = DAG.getConstant(0, dl, VT);
SDValue Swap = DAG.getAtomicCmpSwap(
ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl,
cast<AtomicSDNode>(N)->getMemoryVT(), VTs, N->getOperand(0),
N->getOperand(1), Zero, Zero, cast<AtomicSDNode>(N)->getMemOperand(),
cast<AtomicSDNode>(N)->getOrdering(),
cast<AtomicSDNode>(N)->getOrdering(),
cast<AtomicSDNode>(N)->getSynchScope());
ReplaceValueWith(SDValue(N, 0), Swap.getValue(0));
ReplaceValueWith(SDValue(N, 1), Swap.getValue(2));
}
//===----------------------------------------------------------------------===//
// Integer Operand Expansion
//===----------------------------------------------------------------------===//
/// ExpandIntegerOperand - This method is called when the specified operand of
/// the specified node is found to need expansion. At this point, all of the
/// result types of the node are known to be legal, but other operands of the
/// node may need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Expand integer operand: "; N->dump(&DAG); dbgs() << "\n");
SDValue Res = SDValue();
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
return false;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
dbgs() << "ExpandIntegerOperand Op #" << OpNo << ": ";
N->dump(&DAG); dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to expand this operator's operand!");
case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
case ISD::BR_CC: Res = ExpandIntOp_BR_CC(N); break;
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
case ISD::INSERT_VECTOR_ELT: Res = ExpandOp_INSERT_VECTOR_ELT(N); break;
case ISD::SCALAR_TO_VECTOR: Res = ExpandOp_SCALAR_TO_VECTOR(N); break;
case ISD::SELECT_CC: Res = ExpandIntOp_SELECT_CC(N); break;
case ISD::SETCC: Res = ExpandIntOp_SETCC(N); break;
case ISD::SINT_TO_FP: Res = ExpandIntOp_SINT_TO_FP(N); break;
case ISD::STORE: Res = ExpandIntOp_STORE(cast<StoreSDNode>(N), OpNo); break;
case ISD::TRUNCATE: Res = ExpandIntOp_TRUNCATE(N); break;
case ISD::UINT_TO_FP: Res = ExpandIntOp_UINT_TO_FP(N); break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR: Res = ExpandIntOp_Shift(N); break;
case ISD::RETURNADDR:
case ISD::FRAMEADDR: Res = ExpandIntOp_RETURNADDR(N); break;
case ISD::ATOMIC_STORE: Res = ExpandIntOp_ATOMIC_STORE(N); break;
}
// If the result is null, the sub-method took care of registering results etc.
if (!Res.getNode()) return false;
// If the result is N, the sub-method updated N in place. Tell the legalizer
// core about this.
if (Res.getNode() == N)
return true;
assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
return false;
}
/// IntegerExpandSetCCOperands - Expand the operands of a comparison. This code
/// is shared among BR_CC, SELECT_CC, and SETCC handlers.
void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS,
SDValue &NewRHS,
ISD::CondCode &CCCode,
SDLoc dl) {
SDValue LHSLo, LHSHi, RHSLo, RHSHi;
GetExpandedInteger(NewLHS, LHSLo, LHSHi);
GetExpandedInteger(NewRHS, RHSLo, RHSHi);
if (CCCode == ISD::SETEQ || CCCode == ISD::SETNE) {
if (RHSLo == RHSHi) {
if (ConstantSDNode *RHSCST = dyn_cast<ConstantSDNode>(RHSLo)) {
if (RHSCST->isAllOnesValue()) {
// Equality comparison to -1.
NewLHS = DAG.getNode(ISD::AND, dl,
LHSLo.getValueType(), LHSLo, LHSHi);
NewRHS = RHSLo;
return;
}
}
}
NewLHS = DAG.getNode(ISD::XOR, dl, LHSLo.getValueType(), LHSLo, RHSLo);
NewRHS = DAG.getNode(ISD::XOR, dl, LHSLo.getValueType(), LHSHi, RHSHi);
NewLHS = DAG.getNode(ISD::OR, dl, NewLHS.getValueType(), NewLHS, NewRHS);
NewRHS = DAG.getConstant(0, dl, NewLHS.getValueType());
return;
}
// If this is a comparison of the sign bit, just look at the top part.
// X > -1, x < 0
if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(NewRHS))
if ((CCCode == ISD::SETLT && CST->isNullValue()) || // X < 0
(CCCode == ISD::SETGT && CST->isAllOnesValue())) { // X > -1
NewLHS = LHSHi;
NewRHS = RHSHi;
return;
}
// FIXME: This generated code sucks.
ISD::CondCode LowCC;
switch (CCCode) {
default: llvm_unreachable("Unknown integer setcc!");
case ISD::SETLT:
case ISD::SETULT: LowCC = ISD::SETULT; break;
case ISD::SETGT:
case ISD::SETUGT: LowCC = ISD::SETUGT; break;
case ISD::SETLE:
case ISD::SETULE: LowCC = ISD::SETULE; break;
case ISD::SETGE:
case ISD::SETUGE: LowCC = ISD::SETUGE; break;
}
// Tmp1 = lo(op1) < lo(op2) // Always unsigned comparison
// Tmp2 = hi(op1) < hi(op2) // Signedness depends on operands
// dest = hi(op1) == hi(op2) ? Tmp1 : Tmp2;
// NOTE: on targets without efficient SELECT of bools, we can always use
// this identity: (B1 ? B2 : B3) --> (B1 & B2)|(!B1&B3)
TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, AfterLegalizeTypes, true,
nullptr);
SDValue Tmp1, Tmp2;
if (TLI.isTypeLegal(LHSLo.getValueType()) &&
TLI.isTypeLegal(RHSLo.getValueType()))
Tmp1 = TLI.SimplifySetCC(getSetCCResultType(LHSLo.getValueType()),
LHSLo, RHSLo, LowCC, false, DagCombineInfo, dl);
if (!Tmp1.getNode())
Tmp1 = DAG.getSetCC(dl, getSetCCResultType(LHSLo.getValueType()),
LHSLo, RHSLo, LowCC);
if (TLI.isTypeLegal(LHSHi.getValueType()) &&
TLI.isTypeLegal(RHSHi.getValueType()))
Tmp2 = TLI.SimplifySetCC(getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, CCCode, false, DagCombineInfo, dl);
if (!Tmp2.getNode())
Tmp2 = DAG.getNode(ISD::SETCC, dl,
getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, DAG.getCondCode(CCCode));
ConstantSDNode *Tmp1C = dyn_cast<ConstantSDNode>(Tmp1.getNode());
ConstantSDNode *Tmp2C = dyn_cast<ConstantSDNode>(Tmp2.getNode());
if ((Tmp1C && Tmp1C->isNullValue()) ||
(Tmp2C && Tmp2C->isNullValue() &&
(CCCode == ISD::SETLE || CCCode == ISD::SETGE ||
CCCode == ISD::SETUGE || CCCode == ISD::SETULE)) ||
(Tmp2C && Tmp2C->getAPIntValue() == 1 &&
(CCCode == ISD::SETLT || CCCode == ISD::SETGT ||
CCCode == ISD::SETUGT || CCCode == ISD::SETULT))) {
// low part is known false, returns high part.
// For LE / GE, if high part is known false, ignore the low part.
// For LT / GT, if high part is known true, ignore the low part.
NewLHS = Tmp2;
NewRHS = SDValue();
return;
}
NewLHS = TLI.SimplifySetCC(getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, ISD::SETEQ, false,
DagCombineInfo, dl);
if (!NewLHS.getNode())
NewLHS = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, ISD::SETEQ);
NewLHS = DAG.getSelect(dl, Tmp1.getValueType(),
NewLHS, Tmp1, Tmp2);
NewRHS = SDValue();
}
SDValue DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, SDLoc(N), NewLHS.getValueType());
CCCode = ISD::SETNE;
}
// Update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, SDLoc(N), NewLHS.getValueType());
CCCode = ISD::SETNE;
}
// Update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, use it.
if (!NewRHS.getNode()) {
assert(NewLHS.getValueType() == N->getValueType(0) &&
"Unexpected setcc expansion!");
return NewLHS;
}
// Otherwise, update N to have the operands specified.
return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
// The value being shifted is legal, but the shift amount is too big.
// It follows that either the result of the shift is undefined, or the
// upper half of the shift amount is zero. Just use the lower half.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(1), Lo, Hi);
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_RETURNADDR(SDNode *N) {
// The argument of RETURNADDR / FRAMEADDR builtin is 32 bit contant. This
// surely makes pretty nice problems on 8/16 bit targets. Just truncate this
// constant to valid type.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(0), Lo, Hi);
return SDValue(DAG.UpdateNodeOperands(N, Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) {
SDValue Op = N->getOperand(0);
EVT DstVT = N->getValueType(0);
RTLIB::Libcall LC = RTLIB::getSINTTOFP(Op.getValueType(), DstVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Don't know how to expand this SINT_TO_FP!");
return TLI.makeLibCall(DAG, LC, DstVT, &Op, 1, true, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
if (ISD::isNormalStore(N))
return ExpandOp_NormalStore(N, OpNo);
assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
assert(OpNo == 1 && "Can only expand the stored value so far");
EVT VT = N->getOperand(1).getValueType();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
unsigned Alignment = N->getAlignment();
bool isVolatile = N->isVolatile();
bool isNonTemporal = N->isNonTemporal();
AAMDNodes AAInfo = N->getAAInfo();
SDLoc dl(N);
SDValue Lo, Hi;
assert(NVT.isByteSized() && "Expanded type not byte sized!");
if (N->getMemoryVT().bitsLE(NVT)) {
GetExpandedInteger(N->getValue(), Lo, Hi);
return DAG.getTruncStore(Ch, dl, Lo, Ptr, N->getPointerInfo(),
N->getMemoryVT(), isVolatile, isNonTemporal,
Alignment, AAInfo);
}
if (DAG.getDataLayout().isLittleEndian()) {
// Little-endian - low bits are at low addresses.
GetExpandedInteger(N->getValue(), Lo, Hi);
Lo = DAG.getStore(Ch, dl, Lo, Ptr, N->getPointerInfo(),
isVolatile, isNonTemporal, Alignment, AAInfo);
unsigned ExcessBits =
N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits();
EVT NEVT = EVT::getIntegerVT(*DAG.getContext(), ExcessBits);
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
NEVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize), AAInfo);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
// Big-endian - high bits are at low addresses. Favor aligned stores at
// the cost of some bit-fiddling.
GetExpandedInteger(N->getValue(), Lo, Hi);
EVT ExtVT = N->getMemoryVT();
unsigned EBytes = ExtVT.getStoreSize();
unsigned IncrementSize = NVT.getSizeInBits()/8;
unsigned ExcessBits = (EBytes - IncrementSize)*8;
EVT HiVT = EVT::getIntegerVT(*DAG.getContext(),
ExtVT.getSizeInBits() - ExcessBits);
if (ExcessBits < NVT.getSizeInBits()) {
// Transfer high bits from the top of Lo to the bottom of Hi.
Hi = DAG.getNode(ISD::SHL, dl, NVT, Hi,
DAG.getConstant(NVT.getSizeInBits() - ExcessBits, dl,
TLI.getPointerTy(DAG.getDataLayout())));
Hi = DAG.getNode(
ISD::OR, dl, NVT, Hi,
DAG.getNode(ISD::SRL, dl, NVT, Lo,
DAG.getConstant(ExcessBits, dl,
TLI.getPointerTy(DAG.getDataLayout()))));
}
// Store both the high bits and maybe some of the low bits.
Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr, N->getPointerInfo(),
HiVT, isVolatile, isNonTemporal, Alignment, AAInfo);
// Increment the pointer to the other half.
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
// Store the lowest ExcessBits bits in the second half.
Lo = DAG.getTruncStore(Ch, dl, Lo, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize), AAInfo);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
SDValue DAGTypeLegalizer::ExpandIntOp_TRUNCATE(SDNode *N) {
SDValue InL, InH;
GetExpandedInteger(N->getOperand(0), InL, InH);
// Just truncate the low part of the source.
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), InL);
}
SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
SDValue Op = N->getOperand(0);
EVT SrcVT = Op.getValueType();
EVT DstVT = N->getValueType(0);
SDLoc dl(N);
// The following optimization is valid only if every value in SrcVT (when
// treated as signed) is representable in DstVT. Check that the mantissa
// size of DstVT is >= than the number of bits in SrcVT -1.
const fltSemantics &sem = DAG.EVTToAPFloatSemantics(DstVT);
if (APFloat::semanticsPrecision(sem) >= SrcVT.getSizeInBits()-1 &&
TLI.getOperationAction(ISD::SINT_TO_FP, SrcVT) == TargetLowering::Custom){
// Do a signed conversion then adjust the result.
SDValue SignedConv = DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Op);
SignedConv = TLI.LowerOperation(SignedConv, DAG);
// The result of the signed conversion needs adjusting if the 'sign bit' of
// the incoming integer was set. To handle this, we dynamically test to see
// if it is set, and, if so, add a fudge factor.
const uint64_t F32TwoE32 = 0x4F800000ULL;
const uint64_t F32TwoE64 = 0x5F800000ULL;
const uint64_t F32TwoE128 = 0x7F800000ULL;
APInt FF(32, 0);
if (SrcVT == MVT::i32)
FF = APInt(32, F32TwoE32);
else if (SrcVT == MVT::i64)
FF = APInt(32, F32TwoE64);
else if (SrcVT == MVT::i128)
FF = APInt(32, F32TwoE128);
else
llvm_unreachable("Unsupported UINT_TO_FP!");
// Check whether the sign bit is set.
SDValue Lo, Hi;
GetExpandedInteger(Op, Lo, Hi);
SDValue SignSet = DAG.getSetCC(dl,
getSetCCResultType(Hi.getValueType()),
Hi,
DAG.getConstant(0, dl, Hi.getValueType()),
ISD::SETLT);
// Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
SDValue FudgePtr =
DAG.getConstantPool(ConstantInt::get(*DAG.getContext(), FF.zext(64)),
TLI.getPointerTy(DAG.getDataLayout()));
// Get a pointer to FF if the sign bit was set, or to 0 otherwise.
SDValue Zero = DAG.getIntPtrConstant(0, dl);
SDValue Four = DAG.getIntPtrConstant(4, dl);
if (DAG.getDataLayout().isBigEndian())
std::swap(Zero, Four);
SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet,
Zero, Four);
unsigned Alignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlignment();
FudgePtr = DAG.getNode(ISD::ADD, dl, FudgePtr.getValueType(),
FudgePtr, Offset);
Alignment = std::min(Alignment, 4u);
// Load the value out, extending it from f32 to the destination float type.
// FIXME: Avoid the extend by constructing the right constant pool?
SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, DstVT, DAG.getEntryNode(),
FudgePtr,
MachinePointerInfo::getConstantPool(),
MVT::f32,
false, false, false, Alignment);
return DAG.getNode(ISD::FADD, dl, DstVT, SignedConv, Fudge);
}
// Otherwise, use a libcall.
RTLIB::Libcall LC = RTLIB::getUINTTOFP(SrcVT, DstVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Don't know how to expand this UINT_TO_FP!");
return TLI.makeLibCall(DAG, LC, DstVT, &Op, 1, true, dl).first;
}
SDValue DAGTypeLegalizer::ExpandIntOp_ATOMIC_STORE(SDNode *N) {
SDLoc dl(N);
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
cast<AtomicSDNode>(N)->getMemoryVT(),
N->getOperand(0),
N->getOperand(1), N->getOperand(2),
cast<AtomicSDNode>(N)->getMemOperand(),
cast<AtomicSDNode>(N)->getOrdering(),
cast<AtomicSDNode>(N)->getSynchScope());
return Swap.getValue(1);
}
SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
SDValue InOp0 = N->getOperand(0);
EVT InVT = InOp0.getValueType();
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
assert(NOutVT.isVector() && "This type must be promoted to a vector type");
unsigned OutNumElems = OutVT.getVectorNumElements();
EVT NOutVTElem = NOutVT.getVectorElementType();
SDLoc dl(N);
SDValue BaseIdx = N->getOperand(1);
SmallVector<SDValue, 8> Ops;
Ops.reserve(OutNumElems);
for (unsigned i = 0; i != OutNumElems; ++i) {
// Extract the element from the original vector.
SDValue Index = DAG.getNode(ISD::ADD, dl, BaseIdx.getValueType(),
BaseIdx, DAG.getConstant(i, dl, BaseIdx.getValueType()));
SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
InVT.getVectorElementType(), N->getOperand(0), Index);
SDValue Op = DAG.getNode(ISD::ANY_EXTEND, dl, NOutVTElem, Ext);
// Insert the converted element to the new vector.
Ops.push_back(Op);
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, Ops);
}
SDValue DAGTypeLegalizer::PromoteIntRes_VECTOR_SHUFFLE(SDNode *N) {
ShuffleVectorSDNode *SV = cast<ShuffleVectorSDNode>(N);
EVT VT = N->getValueType(0);
SDLoc dl(N);
ArrayRef<int> NewMask = SV->getMask().slice(0, VT.getVectorNumElements());
SDValue V0 = GetPromotedInteger(N->getOperand(0));
SDValue V1 = GetPromotedInteger(N->getOperand(1));
EVT OutVT = V0.getValueType();
return DAG.getVectorShuffle(OutVT, dl, V0, V1, NewMask);
}
SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_VECTOR(SDNode *N) {
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
assert(NOutVT.isVector() && "This type must be promoted to a vector type");
unsigned NumElems = N->getNumOperands();
EVT NOutVTElem = NOutVT.getVectorElementType();
SDLoc dl(N);
SmallVector<SDValue, 8> Ops;
Ops.reserve(NumElems);
for (unsigned i = 0; i != NumElems; ++i) {
SDValue Op;
// BUILD_VECTOR integer operand types are allowed to be larger than the
// result's element type. This may still be true after the promotion. For
// example, we might be promoting (<v?i1> = BV <i32>, <i32>, ...) to
// (v?i16 = BV <i32>, <i32>, ...), and we can't any_extend <i32> to <i16>.
if (N->getOperand(i).getValueType().bitsLT(NOutVTElem))
Op = DAG.getNode(ISD::ANY_EXTEND, dl, NOutVTElem, N->getOperand(i));
else
Op = N->getOperand(i);
Ops.push_back(Op);
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, Ops);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SCALAR_TO_VECTOR(SDNode *N) {
SDLoc dl(N);
assert(!N->getOperand(0).getValueType().isVector() &&
"Input must be a scalar");
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
assert(NOutVT.isVector() && "This type must be promoted to a vector type");
EVT NOutVTElem = NOutVT.getVectorElementType();
SDValue Op = DAG.getNode(ISD::ANY_EXTEND, dl, NOutVTElem, N->getOperand(0));
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NOutVT, Op);
}
SDValue DAGTypeLegalizer::PromoteIntRes_CONCAT_VECTORS(SDNode *N) {
SDLoc dl(N);
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
assert(NOutVT.isVector() && "This type must be promoted to a vector type");
EVT InElemTy = OutVT.getVectorElementType();
EVT OutElemTy = NOutVT.getVectorElementType();
unsigned NumElem = N->getOperand(0).getValueType().getVectorNumElements();
unsigned NumOutElem = NOutVT.getVectorNumElements();
unsigned NumOperands = N->getNumOperands();
assert(NumElem * NumOperands == NumOutElem &&
"Unexpected number of elements");
// Take the elements from the first vector.
SmallVector<SDValue, 8> Ops(NumOutElem);
for (unsigned i = 0; i < NumOperands; ++i) {
SDValue Op = N->getOperand(i);
for (unsigned j = 0; j < NumElem; ++j) {
SDValue Ext = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, InElemTy, Op,
DAG.getConstant(j, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Ops[i * NumElem + j] = DAG.getNode(ISD::ANY_EXTEND, dl, OutElemTy, Ext);
}
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, Ops);
}
SDValue DAGTypeLegalizer::PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N) {
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
assert(NOutVT.isVector() && "This type must be promoted to a vector type");
EVT NOutVTElem = NOutVT.getVectorElementType();
SDLoc dl(N);
SDValue V0 = GetPromotedInteger(N->getOperand(0));
SDValue ConvElem = DAG.getNode(ISD::ANY_EXTEND, dl,
NOutVTElem, N->getOperand(1));
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NOutVT,
V0, ConvElem, N->getOperand(2));
}
SDValue DAGTypeLegalizer::PromoteIntOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDLoc dl(N);
SDValue V0 = GetPromotedInteger(N->getOperand(0));
SDValue V1 = DAG.getZExtOrTrunc(N->getOperand(1), dl,
TLI.getVectorIdxTy(DAG.getDataLayout()));
SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
V0->getValueType(0).getScalarType(), V0, V1);
// EXTRACT_VECTOR_ELT can return types which are wider than the incoming
// element types. If this is the case then we need to expand the outgoing
// value and not truncate it.
return DAG.getAnyExtOrTrunc(Ext, dl, N->getValueType(0));
}
SDValue DAGTypeLegalizer::PromoteIntOp_EXTRACT_SUBVECTOR(SDNode *N) {
SDLoc dl(N);
SDValue V0 = GetPromotedInteger(N->getOperand(0));
MVT InVT = V0.getValueType().getSimpleVT();
MVT OutVT = MVT::getVectorVT(InVT.getVectorElementType(),
N->getValueType(0).getVectorNumElements());
SDValue Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OutVT, V0, N->getOperand(1));
return DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), Ext);
}
SDValue DAGTypeLegalizer::PromoteIntOp_CONCAT_VECTORS(SDNode *N) {
SDLoc dl(N);
unsigned NumElems = N->getNumOperands();
EVT RetSclrTy = N->getValueType(0).getVectorElementType();
SmallVector<SDValue, 8> NewOps;
NewOps.reserve(NumElems);
// For each incoming vector
for (unsigned VecIdx = 0; VecIdx != NumElems; ++VecIdx) {
SDValue Incoming = GetPromotedInteger(N->getOperand(VecIdx));
EVT SclrTy = Incoming->getValueType(0).getVectorElementType();
unsigned NumElem = Incoming->getValueType(0).getVectorNumElements();
for (unsigned i=0; i<NumElem; ++i) {
// Extract element from incoming vector
SDValue Ex = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, SclrTy, Incoming,
DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue Tr = DAG.getNode(ISD::TRUNCATE, dl, RetSclrTy, Ex);
NewOps.push_back(Tr);
}
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, N->getValueType(0), NewOps);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/InstrEmitter.cpp | //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the Emit routines for the SelectionDAG class, which creates
// MachineInstrs based on the decisions of the SelectionDAG instruction
// selection.
//
//===----------------------------------------------------------------------===//
#include "InstrEmitter.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "instr-emitter"
/// MinRCSize - Smallest register class we allow when constraining virtual
/// registers. If satisfying all register class constraints would require
/// using a smaller register class, emit a COPY to a new virtual register
/// instead.
const unsigned MinRCSize = 4;
/// CountResults - The results of target nodes have register or immediate
/// operands first, then an optional chain, and optional glue operands (which do
/// not go into the resulting MachineInstr).
unsigned InstrEmitter::CountResults(SDNode *Node) {
unsigned N = Node->getNumValues();
while (N && Node->getValueType(N - 1) == MVT::Glue)
--N;
if (N && Node->getValueType(N - 1) == MVT::Other)
--N; // Skip over chain result.
return N;
}
/// countOperands - The inputs to target nodes have any actual inputs first,
/// followed by an optional chain operand, then an optional glue operand.
/// Compute the number of actual operands that will go into the resulting
/// MachineInstr.
///
/// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
/// the chain and glue. These operands may be implicit on the machine instr.
static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
unsigned &NumImpUses) {
unsigned N = Node->getNumOperands();
while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
--N;
if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
--N; // Ignore chain if it exists.
// Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
NumImpUses = N - NumExpUses;
for (unsigned I = N; I > NumExpUses; --I) {
if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
continue;
if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
if (TargetRegisterInfo::isPhysicalRegister(RN->getReg()))
continue;
NumImpUses = N - I;
break;
}
return N;
}
/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
/// implicit physical register output.
void InstrEmitter::
EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
unsigned SrcReg, DenseMap<SDValue, unsigned> &VRBaseMap) {
unsigned VRBase = 0;
if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Just use the input register directly!
SDValue Op(Node, ResNo);
if (IsClone)
VRBaseMap.erase(Op);
bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
return;
}
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
// the CopyToReg'd destination register instead of creating a new vreg.
bool MatchReg = true;
const TargetRegisterClass *UseRC = nullptr;
MVT VT = Node->getSimpleValueType(ResNo);
// Stick to the preferred register classes for legal types.
if (TLI->isTypeLegal(VT))
UseRC = TLI->getRegClassFor(VT);
if (!IsClone && !IsCloned)
for (SDNode *User : Node->uses()) {
bool Match = true;
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node &&
User->getOperand(2).getResNo() == ResNo) {
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
VRBase = DestReg;
Match = false;
} else if (DestReg != SrcReg)
Match = false;
} else {
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
SDValue Op = User->getOperand(i);
if (Op.getNode() != Node || Op.getResNo() != ResNo)
continue;
MVT VT = Node->getSimpleValueType(Op.getResNo());
if (VT == MVT::Other || VT == MVT::Glue)
continue;
Match = false;
if (User->isMachineOpcode()) {
const MCInstrDesc &II = TII->get(User->getMachineOpcode());
const TargetRegisterClass *RC = nullptr;
if (i+II.getNumDefs() < II.getNumOperands()) {
RC = TRI->getAllocatableClass(
TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
}
if (!UseRC)
UseRC = RC;
else if (RC) {
const TargetRegisterClass *ComRC =
TRI->getCommonSubClass(UseRC, RC);
// If multiple uses expect disjoint register classes, we emit
// copies in AddRegisterOperand.
if (ComRC)
UseRC = ComRC;
}
}
}
}
MatchReg &= Match;
if (VRBase)
break;
}
const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
// Figure out the register class to create for the destreg.
if (VRBase) {
DstRC = MRI->getRegClass(VRBase);
} else if (UseRC) {
assert(UseRC->hasType(VT) && "Incompatible phys register def and uses!");
DstRC = UseRC;
} else {
DstRC = TLI->getRegClassFor(VT);
}
// If all uses are reading from the src physical register and copying the
// register is either impossible or very expensive, then don't create a copy.
if (MatchReg && SrcRC->getCopyCost() < 0) {
VRBase = SrcReg;
} else {
// Create the reg, emit the copy.
VRBase = MRI->createVirtualRegister(DstRC);
BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
VRBase).addReg(SrcReg);
}
SDValue Op(Node, ResNo);
if (IsClone)
VRBaseMap.erase(Op);
bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
/// getDstOfCopyToRegUse - If the only use of the specified result number of
/// node is a CopyToReg, return its destination register. Return 0 otherwise.
unsigned InstrEmitter::getDstOfOnlyCopyToRegUse(SDNode *Node,
unsigned ResNo) const {
if (!Node->hasOneUse())
return 0;
SDNode *User = *Node->use_begin();
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node &&
User->getOperand(2).getResNo() == ResNo) {
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
return Reg;
}
return 0;
}
void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
MachineInstrBuilder &MIB,
const MCInstrDesc &II,
bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap) {
assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
"IMPLICIT_DEF should have been handled as a special case elsewhere!");
unsigned NumResults = CountResults(Node);
for (unsigned i = 0; i < II.getNumDefs(); ++i) {
// If the specific node value is only used by a CopyToReg and the dest reg
// is a vreg in the same register class, use the CopyToReg'd destination
// register instead of creating a new vreg.
unsigned VRBase = 0;
const TargetRegisterClass *RC =
TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
// Always let the value type influence the used register class. The
// constraints on the instruction may be too lax to represent the value
// type correctly. For example, a 64-bit float (X86::FR64) can't live in
// the 32-bit float super-class (X86::FR32).
if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
const TargetRegisterClass *VTRC =
TLI->getRegClassFor(Node->getSimpleValueType(i));
if (RC)
VTRC = TRI->getCommonSubClass(RC, VTRC);
if (VTRC)
RC = VTRC;
}
if (II.OpInfo[i].isOptionalDef()) {
// Optional def must be a physical register.
unsigned NumResults = CountResults(Node);
VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
assert(TargetRegisterInfo::isPhysicalRegister(VRBase));
MIB.addReg(VRBase, RegState::Define);
}
if (!VRBase && !IsClone && !IsCloned)
for (SDNode *User : Node->uses()) {
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node &&
User->getOperand(2).getResNo() == i) {
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
if (RegRC == RC) {
VRBase = Reg;
MIB.addReg(VRBase, RegState::Define);
break;
}
}
}
}
// Create the result registers for this node and add the result regs to
// the machine instruction.
if (VRBase == 0) {
assert(RC && "Isn't a register operand!");
VRBase = MRI->createVirtualRegister(RC);
MIB.addReg(VRBase, RegState::Define);
}
// If this def corresponds to a result of the SDNode insert the VRBase into
// the lookup map.
if (i < NumResults) {
SDValue Op(Node, i);
if (IsClone)
VRBaseMap.erase(Op);
bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
}
}
/// getVR - Return the virtual register corresponding to the specified result
/// of the specified node.
unsigned InstrEmitter::getVR(SDValue Op,
DenseMap<SDValue, unsigned> &VRBaseMap) {
if (Op.isMachineOpcode() &&
Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
// Add an IMPLICIT_DEF instruction before every use.
unsigned VReg = getDstOfOnlyCopyToRegUse(Op.getNode(), Op.getResNo());
// IMPLICIT_DEF can produce any type of result so its MCInstrDesc
// does not include operand register class info.
if (!VReg) {
const TargetRegisterClass *RC =
TLI->getRegClassFor(Op.getSimpleValueType());
VReg = MRI->createVirtualRegister(RC);
}
BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
return VReg;
}
DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op);
assert(I != VRBaseMap.end() && "Node emitted out of order - late");
return I->second;
}
/// AddRegisterOperand - Add the specified register as an operand to the
/// specified machine instr. Insert register copies if the register is
/// not in the required register class.
void
InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
SDValue Op,
unsigned IIOpNum,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned) {
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue &&
"Chain and glue operands should occur at end of operand list!");
// Get/emit the operand.
unsigned VReg = getVR(Op, VRBaseMap);
assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
const MCInstrDesc &MCID = MIB->getDesc();
bool isOptDef = IIOpNum < MCID.getNumOperands() &&
MCID.OpInfo[IIOpNum].isOptionalDef();
// If the instruction requires a register in a different class, create
// a new virtual register and copy the value into it, but first attempt to
// shrink VReg's register class within reason. For example, if VReg == GR32
// and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
if (II) {
const TargetRegisterClass *DstRC = nullptr;
if (IIOpNum < II->getNumOperands())
DstRC = TRI->getAllocatableClass(TII->getRegClass(*II,IIOpNum,TRI,*MF));
if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) {
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
VReg = NewVReg;
}
}
// If this value has only one use, that use is a kill. This is a
// conservative approximation. InstrEmitter does trivial coalescing
// with CopyFromReg nodes, so don't emit kill flags for them.
// Avoid kill flags on Schedule cloned nodes, since there will be
// multiple uses.
// Tied operands are never killed, so we need to check that. And that
// means we need to determine the index of the operand.
bool isKill = Op.hasOneUse() &&
Op.getNode()->getOpcode() != ISD::CopyFromReg &&
!IsDebug &&
!(IsClone || IsCloned);
if (isKill) {
unsigned Idx = MIB->getNumOperands();
while (Idx > 0 &&
MIB->getOperand(Idx-1).isReg() &&
MIB->getOperand(Idx-1).isImplicit())
--Idx;
bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
if (isTied)
isKill = false;
}
MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
getDebugRegState(IsDebug));
}
/// AddOperand - Add the specified operand to the specified machine instr. II
/// specifies the instruction information for the node, and IIOpNum is the
/// operand number (in the II) that we are adding.
void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
SDValue Op,
unsigned IIOpNum,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned) {
if (Op.isMachineOpcode()) {
AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
IsDebug, IsClone, IsCloned);
} else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
MIB.addImm(C->getSExtValue());
} else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
MIB.addFPImm(F->getConstantFPValue());
} else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
// Turn additional physreg operands into implicit uses on non-variadic
// instructions. This is used by call and return instructions passing
// arguments in registers.
bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
MIB.addReg(R->getReg(), getImplRegState(Imp));
} else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
MIB.addRegMask(RM->getRegMask());
} else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
TGA->getTargetFlags());
} else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
MIB.addMBB(BBNode->getBasicBlock());
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
MIB.addFrameIndex(FI->getIndex());
} else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
int Offset = CP->getOffset();
unsigned Align = CP->getAlignment();
Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment.
if (Align == 0) {
Align = MF->getDataLayout().getPrefTypeAlignment(Type);
if (Align == 0) {
// Alignment of vector types. FIXME!
Align = MF->getDataLayout().getTypeAllocSize(Type);
}
}
unsigned Idx;
MachineConstantPool *MCP = MF->getConstantPool();
if (CP->isMachineConstantPoolEntry())
Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Align);
else
Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Align);
MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
} else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
} else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
MIB.addSym(SymNode->getMCSymbol());
} else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
MIB.addBlockAddress(BA->getBlockAddress(),
BA->getOffset(),
BA->getTargetFlags());
} else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
} else {
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue &&
"Chain and glue operands should occur at end of operand list!");
AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
IsDebug, IsClone, IsCloned);
}
}
unsigned InstrEmitter::ConstrainForSubReg(unsigned VReg, unsigned SubIdx,
MVT VT, DebugLoc DL) {
const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
// RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg
// within reason.
if (RC && RC != VRC)
RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
// VReg has been adjusted. It can be used with SubIdx operands now.
if (RC)
return VReg;
// VReg couldn't be reasonably constrained. Emit a COPY to a new virtual
// register instead.
RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT), SubIdx);
assert(RC && "No legal register class for VT supports that SubIdx");
unsigned NewReg = MRI->createVirtualRegister(RC);
BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
.addReg(VReg);
return NewReg;
}
/// EmitSubregNode - Generate machine code for subreg nodes.
///
void InstrEmitter::EmitSubregNode(SDNode *Node,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsClone, bool IsCloned) {
unsigned VRBase = 0;
unsigned Opc = Node->getMachineOpcode();
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
// the CopyToReg'd destination register instead of creating a new vreg.
for (SDNode *User : Node->uses()) {
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node) {
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
VRBase = DestReg;
break;
}
}
}
if (Opc == TargetOpcode::EXTRACT_SUBREG) {
// EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no
// constraints on the %dst register, COPY can target all legal register
// classes.
unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
const TargetRegisterClass *TRC =
TLI->getRegClassFor(Node->getSimpleValueType(0));
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
MachineInstr *DefMI = MRI->getVRegDef(VReg);
unsigned SrcReg, DstReg, DefSubIdx;
if (DefMI &&
TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
SubIdx == DefSubIdx &&
TRC == MRI->getRegClass(SrcReg)) {
// Optimize these:
// r1025 = s/zext r1024, 4
// r1026 = extract_subreg r1025, 4
// to a copy
// r1026 = copy r1024
VRBase = MRI->createVirtualRegister(TRC);
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
MRI->clearKillFlags(SrcReg);
} else {
// VReg may not support a SubIdx sub-register, and we may need to
// constrain its register class or issue a COPY to a compatible register
// class.
VReg = ConstrainForSubReg(VReg, SubIdx,
Node->getOperand(0).getSimpleValueType(),
Node->getDebugLoc());
// Create the destreg if it is missing.
if (VRBase == 0)
VRBase = MRI->createVirtualRegister(TRC);
// Create the extract_subreg machine instruction.
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
TII->get(TargetOpcode::COPY), VRBase).addReg(VReg, 0, SubIdx);
}
} else if (Opc == TargetOpcode::INSERT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG) {
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
SDValue N2 = Node->getOperand(2);
unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
// Figure out the register class to create for the destreg. It should be
// the largest legal register class supporting SubIdx sub-registers.
// RegisterCoalescer will constrain it further if it decides to eliminate
// the INSERT_SUBREG instruction.
//
// %dst = INSERT_SUBREG %src, %sub, SubIdx
//
// is lowered by TwoAddressInstructionPass to:
//
// %dst = COPY %src
// %dst:SubIdx = COPY %sub
//
// There is no constraint on the %src register class.
//
const TargetRegisterClass *SRC = TLI->getRegClassFor(Node->getSimpleValueType(0));
SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG");
if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
VRBase = MRI->createVirtualRegister(SRC);
// Create the insert_subreg or subreg_to_reg machine instruction.
MachineInstrBuilder MIB =
BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
// If creating a subreg_to_reg, then the first input operand
// is an implicit value immediate, otherwise it's a register
if (Opc == TargetOpcode::SUBREG_TO_REG) {
const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
MIB.addImm(SD->getZExtValue());
} else
AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
// Add the subregster being inserted
AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
MIB.addImm(SubIdx);
MBB->insert(InsertPos, MIB);
} else
llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
/// COPY_TO_REGCLASS is just a normal copy, except that the destination
/// register is constrained to be in a particular register class.
///
void
InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
DenseMap<SDValue, unsigned> &VRBaseMap) {
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
// Create the new VReg in the destination class and emit a copy.
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
const TargetRegisterClass *DstRC =
TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
NewVReg).addReg(VReg);
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
/// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
///
void InstrEmitter::EmitRegSequence(SDNode *Node,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsClone, bool IsCloned) {
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
unsigned NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
unsigned NumOps = Node->getNumOperands();
assert((NumOps & 1) == 1 &&
"REG_SEQUENCE must have an odd number of operands!");
for (unsigned i = 1; i != NumOps; ++i) {
SDValue Op = Node->getOperand(i);
if ((i & 1) == 0) {
RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
// Skip physical registers as they don't have a vreg to get and we'll
// insert copies for them in TwoAddressInstructionPass anyway.
if (!R || !TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
const TargetRegisterClass *SRC =
TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
if (SRC && SRC != RC) {
MRI->setRegClass(NewVReg, SRC);
RC = SRC;
}
}
}
AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
}
MBB->insert(InsertPos, MIB);
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
/// EmitDbgValue - Generate machine instruction for a dbg_value node.
///
MachineInstr *
InstrEmitter::EmitDbgValue(SDDbgValue *SD,
DenseMap<SDValue, unsigned> &VRBaseMap) {
uint64_t Offset = SD->getOffset();
MDNode *Var = SD->getVariable();
MDNode *Expr = SD->getExpression();
DebugLoc DL = SD->getDebugLoc();
assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
if (SD->getKind() == SDDbgValue::FRAMEIX) {
// Stack address; this needs to be lowered in target-dependent fashion.
// EmitTargetCodeForFrameDebugValue is responsible for allocation.
return BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE))
.addFrameIndex(SD->getFrameIx())
.addImm(Offset)
.addMetadata(Var)
.addMetadata(Expr);
}
// Otherwise, we're going to create an instruction here.
const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
if (SD->getKind() == SDDbgValue::SDNODE) {
SDNode *Node = SD->getSDNode();
SDValue Op = SDValue(Node, SD->getResNo());
// It's possible we replaced this SDNode with other(s) and therefore
// didn't generate code for it. It's better to catch these cases where
// they happen and transfer the debug info, but trying to guarantee that
// in all cases would be very fragile; this is a safeguard for any
// that were missed.
DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op);
if (I==VRBaseMap.end())
MIB.addReg(0U); // undef
else
AddOperand(MIB, Op, (*MIB).getNumOperands(), &II, VRBaseMap,
/*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
} else if (SD->getKind() == SDDbgValue::CONST) {
const Value *V = SD->getConst();
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64)
MIB.addCImm(CI);
else
MIB.addImm(CI->getSExtValue());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
MIB.addFPImm(CF);
} else {
// Could be an Undef. In any case insert an Undef so we can see what we
// dropped.
MIB.addReg(0U);
}
} else {
// Insert an Undef so we can see what we dropped.
MIB.addReg(0U);
}
// Indirect addressing is indicated by an Imm as the second parameter.
if (SD->isIndirect())
MIB.addImm(Offset);
else {
assert(Offset == 0 && "direct value cannot have an offset");
MIB.addReg(0U, RegState::Debug);
}
MIB.addMetadata(Var);
MIB.addMetadata(Expr);
return &*MIB;
}
/// EmitMachineNode - Generate machine code for a target-specific node and
/// needed dependencies.
///
void InstrEmitter::
EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap) {
unsigned Opc = Node->getMachineOpcode();
// Handle subreg insert/extract specially
if (Opc == TargetOpcode::EXTRACT_SUBREG ||
Opc == TargetOpcode::INSERT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG) {
EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
return;
}
// Handle COPY_TO_REGCLASS specially.
if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
EmitCopyToRegClassNode(Node, VRBaseMap);
return;
}
// Handle REG_SEQUENCE specially.
if (Opc == TargetOpcode::REG_SEQUENCE) {
EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
return;
}
if (Opc == TargetOpcode::IMPLICIT_DEF)
// We want a unique VR for each IMPLICIT_DEF use.
return;
const MCInstrDesc &II = TII->get(Opc);
unsigned NumResults = CountResults(Node);
unsigned NumDefs = II.getNumDefs();
const MCPhysReg *ScratchRegs = nullptr;
// Handle STACKMAP and PATCHPOINT specially and then use the generic code.
if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
// Stackmaps do not have arguments and do not preserve their calling
// convention. However, to simplify runtime support, they clobber the same
// scratch registers as AnyRegCC.
unsigned CC = CallingConv::AnyReg;
if (Opc == TargetOpcode::PATCHPOINT) {
CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
NumDefs = NumResults;
}
ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
}
unsigned NumImpUses = 0;
unsigned NodeOperands =
countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
bool HasPhysRegOuts = NumResults > NumDefs && II.getImplicitDefs()!=nullptr;
#ifndef NDEBUG
unsigned NumMIOperands = NodeOperands + NumResults;
if (II.isVariadic())
assert(NumMIOperands >= II.getNumOperands() &&
"Too few operands for a variadic node!");
else
assert(NumMIOperands >= II.getNumOperands() &&
NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +
NumImpUses &&
"#operands for dag node doesn't match .td file!");
#endif
// Create the new machine instruction.
MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
// Add result register values for things that are defined by this
// instruction.
if (NumResults)
CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
// Emit all of the actual operands of this instruction, adding them to the
// instruction as appropriate.
bool HasOptPRefs = NumDefs > NumResults;
assert((!HasOptPRefs || !HasPhysRegOuts) &&
"Unable to cope with optional defs and phys regs defs!");
unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
for (unsigned i = NumSkip; i != NodeOperands; ++i)
AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
// Add scratch registers as implicit def and early clobber
if (ScratchRegs)
for (unsigned i = 0; ScratchRegs[i]; ++i)
MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
RegState::EarlyClobber);
// Transfer all of the memory reference descriptions of this instruction.
MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(),
cast<MachineSDNode>(Node)->memoperands_end());
// Insert the instruction into position in the block. This needs to
// happen before any custom inserter hook is called so that the
// hook knows where in the block to insert the replacement code.
MBB->insert(InsertPos, MIB);
// The MachineInstr may also define physregs instead of virtregs. These
// physreg values can reach other instructions in different ways:
//
// 1. When there is a use of a Node value beyond the explicitly defined
// virtual registers, we emit a CopyFromReg for one of the implicitly
// defined physregs. This only happens when HasPhysRegOuts is true.
//
// 2. A CopyFromReg reading a physreg may be glued to this instruction.
//
// 3. A glued instruction may implicitly use a physreg.
//
// 4. A glued instruction may use a RegisterSDNode operand.
//
// Collect all the used physreg defs, and make sure that any unused physreg
// defs are marked as dead.
SmallVector<unsigned, 8> UsedRegs;
// Additional results must be physical register defs.
if (HasPhysRegOuts) {
for (unsigned i = NumDefs; i < NumResults; ++i) {
unsigned Reg = II.getImplicitDefs()[i - NumDefs];
if (!Node->hasAnyUseOfValue(i))
continue;
// This implicitly defined physreg has a use.
UsedRegs.push_back(Reg);
EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
}
}
// Scan the glue chain for any used physregs.
if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
if (F->getOpcode() == ISD::CopyFromReg) {
UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
continue;
} else if (F->getOpcode() == ISD::CopyToReg) {
// Skip CopyToReg nodes that are internal to the glue chain.
continue;
}
// Collect declared implicit uses.
const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
UsedRegs.append(MCID.getImplicitUses(),
MCID.getImplicitUses() + MCID.getNumImplicitUses());
// In addition to declared implicit uses, we must also check for
// direct RegisterSDNode operands.
for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
unsigned Reg = R->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
UsedRegs.push_back(Reg);
}
}
}
// Finally mark unused registers as dead.
if (!UsedRegs.empty() || II.getImplicitDefs())
MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
// Run post-isel target hook to adjust this instruction if needed.
if (II.hasPostISelHook())
TLI->AdjustInstrPostInstrSelection(MIB, Node);
}
/// EmitSpecialNode - Generate machine code for a target-independent node and
/// needed dependencies.
void InstrEmitter::
EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap) {
switch (Node->getOpcode()) {
default:
#ifndef NDEBUG
Node->dump();
#endif
llvm_unreachable("This target-independent node should have been selected!");
case ISD::EntryToken:
llvm_unreachable("EntryToken should have been excluded from the schedule!");
case ISD::MERGE_VALUES:
case ISD::TokenFactor: // fall thru
break;
case ISD::CopyToReg: {
unsigned SrcReg;
SDValue SrcVal = Node->getOperand(2);
if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
SrcReg = R->getReg();
else
SrcReg = getVR(SrcVal, VRBaseMap);
unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
break;
BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
DestReg).addReg(SrcReg);
break;
}
case ISD::CopyFromReg: {
unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
break;
}
case ISD::EH_LABEL: {
MCSymbol *S = cast<EHLabelSDNode>(Node)->getLabel();
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
TII->get(TargetOpcode::EH_LABEL)).addSym(S);
break;
}
case ISD::LIFETIME_START:
case ISD::LIFETIME_END: {
unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ?
TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END;
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Node->getOperand(1));
BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
.addFrameIndex(FI->getIndex());
break;
}
case ISD::INLINEASM: {
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
--NumOps; // Ignore the glue operand.
// Create the inline asm machine instruction.
MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(),
TII->get(TargetOpcode::INLINEASM));
// Add the asm string as an external symbol operand.
SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
MIB.addExternalSymbol(AsmStr);
// Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
// bits.
int64_t ExtraInfo =
cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
getZExtValue();
MIB.addImm(ExtraInfo);
// Remember to operand index of the group flags.
SmallVector<unsigned, 8> GroupIdx;
// Remember registers that are part of early-clobber defs.
SmallVector<unsigned, 8> ECRegs;
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
GroupIdx.push_back(MIB->getNumOperands());
MIB.addImm(Flags);
++i; // Skip the ID value.
switch (InlineAsm::getKind(Flags)) {
default: llvm_unreachable("Bad flags!");
case InlineAsm::Kind_RegDef:
for (unsigned j = 0; j != NumVals; ++j, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
// FIXME: Add dead flags for physical and virtual registers defined.
// For now, mark physical register defs as implicit to help fast
// regalloc. This makes inline asm look a lot like calls.
MIB.addReg(Reg, RegState::Define |
getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
}
break;
case InlineAsm::Kind_RegDefEarlyClobber:
case InlineAsm::Kind_Clobber:
for (unsigned j = 0; j != NumVals; ++j, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
MIB.addReg(Reg, RegState::Define | RegState::EarlyClobber |
getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
ECRegs.push_back(Reg);
}
break;
case InlineAsm::Kind_RegUse: // Use of register.
case InlineAsm::Kind_Imm: // Immediate.
case InlineAsm::Kind_Mem: // Addressing mode.
// The addressing mode has been selected, just add all of the
// operands to the machine instruction.
for (unsigned j = 0; j != NumVals; ++j, ++i)
AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
/*IsDebug=*/false, IsClone, IsCloned);
// Manually set isTied bits.
if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
unsigned DefGroup = 0;
if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
unsigned DefIdx = GroupIdx[DefGroup] + 1;
unsigned UseIdx = GroupIdx.back() + 1;
for (unsigned j = 0; j != NumVals; ++j)
MIB->tieOperands(DefIdx + j, UseIdx + j);
}
}
break;
}
}
// GCC inline assembly allows input operands to also be early-clobber
// output operands (so long as the operand is written only after it's
// used), but this does not match the semantics of our early-clobber flag.
// If an early-clobber operand register is also an input operand register,
// then remove the early-clobber flag.
for (unsigned Reg : ECRegs) {
if (MIB->readsRegister(Reg, TRI)) {
MachineOperand *MO = MIB->findRegisterDefOperand(Reg, false, TRI);
assert(MO && "No def operand for clobbered register?");
MO->setIsEarlyClobber(false);
}
}
// Get the mdnode from the asm if it exists and add it to the instruction.
SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
if (MD)
MIB.addMetadata(MD);
MBB->insert(InsertPos, MIB);
break;
}
}
}
/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
/// at the given position in the given block.
InstrEmitter::InstrEmitter(MachineBasicBlock *mbb,
MachineBasicBlock::iterator insertpos)
: MF(mbb->getParent()), MRI(&MF->getRegInfo()),
TII(MF->getSubtarget().getInstrInfo()),
TRI(MF->getSubtarget().getRegisterInfo()),
TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
InsertPos(insertpos) {}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp | //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements routines for translating functions from LLVM IR into
// Machine IR.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "function-lowering-info"
/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
/// PHI nodes or outside of the basic block that defines it, or used by a
/// switch or atomic instruction, which may expand to multiple basic blocks.
static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
if (I->use_empty()) return false;
if (isa<PHINode>(I)) return true;
const BasicBlock *BB = I->getParent();
for (const User *U : I->users())
if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
return true;
return false;
}
static ISD::NodeType getPreferredExtendForValue(const Value *V) {
// For the users of the source value being used for compare instruction, if
// the number of signed predicate is greater than unsigned predicate, we
// prefer to use SIGN_EXTEND.
//
// With this optimization, we would be able to reduce some redundant sign or
// zero extension instruction, and eventually more machine CSE opportunities
// can be exposed.
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
unsigned NumOfSigned = 0, NumOfUnsigned = 0;
for (const User *U : V->users()) {
if (const auto *CI = dyn_cast<CmpInst>(U)) {
NumOfSigned += CI->isSigned();
NumOfUnsigned += CI->isUnsigned();
}
}
if (NumOfSigned > NumOfUnsigned)
ExtendKind = ISD::SIGN_EXTEND;
return ExtendKind;
}
void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
SelectionDAG *DAG) {
Fn = &fn;
MF = &mf;
TLI = MF->getSubtarget().getTargetLowering();
RegInfo = &MF->getRegInfo();
MachineModuleInfo &MMI = MF->getMMI();
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
mf.getDataLayout());
CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
Fn->isVarArg(), Outs, Fn->getContext());
// Initialize the mapping of values to registers. This is only set up for
// instruction values that are used outside of the block that defines
// them.
Function::const_iterator BB = Fn->begin(), EB = Fn->end();
for (; BB != EB; ++BB)
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
I != E; ++I) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
// Static allocas can be folded into the initial stack frame adjustment.
if (AI->isStaticAlloca()) {
const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
Type *Ty = AI->getAllocatedType();
uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
AI->getAlignment());
TySize *= CUI->getZExtValue(); // Get total allocated size.
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
StaticAllocaMap[AI] =
MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
} else {
unsigned Align =
std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(
AI->getAllocatedType()),
AI->getAlignment());
unsigned StackAlign =
MF->getSubtarget().getFrameLowering()->getStackAlignment();
if (Align <= StackAlign)
Align = 0;
// Inform the Frame Information that we have variable-sized objects.
MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, AI);
}
}
// Look for inline asm that clobbers the SP register.
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
ImmutableCallSite CS(I);
if (isa<InlineAsm>(CS.getCalledValue())) {
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
std::vector<TargetLowering::AsmOperandInfo> Ops =
TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
for (size_t I = 0, E = Ops.size(); I != E; ++I) {
TargetLowering::AsmOperandInfo &Op = Ops[I];
if (Op.Type == InlineAsm::isClobber) {
// Clobbers don't have SDValue operands, hence SDValue().
TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
std::pair<unsigned, const TargetRegisterClass *> PhysReg =
TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
Op.ConstraintVT);
if (PhysReg.first == SP)
MF->getFrameInfo()->setHasOpaqueSPAdjustment(true);
}
}
}
}
// Look for calls to the @llvm.va_start intrinsic. We can omit some
// prologue boilerplate for variadic functions that don't examine their
// arguments.
if (const auto *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::vastart)
MF->getFrameInfo()->setHasVAStart(true);
}
// If we have a musttail call in a variadic funciton, we need to ensure we
// forward implicit register parameters.
if (const auto *CI = dyn_cast<CallInst>(I)) {
if (CI->isMustTailCall() && Fn->isVarArg())
MF->getFrameInfo()->setHasMustTailInVarArgFunc(true);
}
// Mark values used outside their block as exported, by allocating
// a virtual register for them.
if (isUsedOutsideOfDefiningBlock(I))
if (!isa<AllocaInst>(I) ||
!StaticAllocaMap.count(cast<AllocaInst>(I)))
InitializeRegForValue(I);
// Collect llvm.dbg.declare information. This is done now instead of
// during the initial isel pass through the IR so that it is done
// in a predictable order.
if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) {
assert(DI->getVariable() && "Missing variable");
assert(DI->getDebugLoc() && "Missing location");
if (MMI.hasDebugInfo()) {
// Don't handle byval struct arguments or VLAs, for example.
// Non-byval arguments are handled here (they refer to the stack
// temporary alloca at this point).
const Value *Address = DI->getAddress();
if (Address) {
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
Address = BCI->getOperand(0);
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
DenseMap<const AllocaInst *, int>::iterator SI =
StaticAllocaMap.find(AI);
if (SI != StaticAllocaMap.end()) { // Check for VLAs.
int FI = SI->second;
MMI.setVariableDbgInfo(DI->getVariable(), DI->getExpression(),
FI, DI->getDebugLoc());
}
}
}
}
}
// Decide the preferred extend type for a value.
PreferredExtendType[I] = getPreferredExtendForValue(I);
}
// Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
// also creates the initial PHI MachineInstrs, though none of the input
// operands are populated.
for (BB = Fn->begin(); BB != EB; ++BB) {
MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
MBBMap[BB] = MBB;
MF->push_back(MBB);
// Transfer the address-taken flag. This is necessary because there could
// be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
// the first one should be marked.
if (BB->hasAddressTaken())
MBB->setHasAddressTaken();
// Create Machine PHI nodes for LLVM PHI nodes, lowering them as
// appropriate.
for (BasicBlock::const_iterator I = BB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
if (PN->use_empty()) continue;
// Skip empty types
if (PN->getType()->isEmptyTy())
continue;
DebugLoc DL = PN->getDebugLoc();
unsigned PHIReg = ValueMap[PN];
assert(PHIReg && "PHI node does not have an assigned virtual register!");
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, MF->getDataLayout(), PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
for (unsigned i = 0; i != NumRegisters; ++i)
BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
PHIReg += NumRegisters;
}
}
}
// Mark landing pad blocks.
SmallVector<const LandingPadInst *, 4> LPads;
for (BB = Fn->begin(); BB != EB; ++BB) {
if (const auto *Invoke = dyn_cast<InvokeInst>(BB->getTerminator()))
MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad();
if (BB->isLandingPad())
LPads.push_back(BB->getLandingPadInst());
}
// If this is an MSVC EH personality, we need to do a bit more work.
EHPersonality Personality = EHPersonality::Unknown;
if (Fn->hasPersonalityFn())
Personality = classifyEHPersonality(Fn->getPersonalityFn());
if (!isMSVCEHPersonality(Personality))
return;
if (Personality == EHPersonality::MSVC_Win64SEH ||
Personality == EHPersonality::MSVC_X86SEH) {
addSEHHandlersForLPads(LPads);
}
WinEHFuncInfo &EHInfo = MMI.getWinEHFuncInfo(&fn);
if (Personality == EHPersonality::MSVC_CXX) {
const Function *WinEHParentFn = MMI.getWinEHParent(&fn);
calculateWinCXXEHStateNumbers(WinEHParentFn, EHInfo);
}
// Copy the state numbers to LandingPadInfo for the current function, which
// could be a handler or the parent. This should happen for 32-bit SEH and
// C++ EH.
if (Personality == EHPersonality::MSVC_CXX ||
Personality == EHPersonality::MSVC_X86SEH) {
for (const LandingPadInst *LP : LPads) {
MachineBasicBlock *LPadMBB = MBBMap[LP->getParent()];
MMI.addWinEHState(LPadMBB, EHInfo.LandingPadStateMap[LP]);
}
}
}
void FunctionLoweringInfo::addSEHHandlersForLPads(
ArrayRef<const LandingPadInst *> LPads) {
MachineModuleInfo &MMI = MF->getMMI();
// Iterate over all landing pads with llvm.eh.actions calls.
for (const LandingPadInst *LP : LPads) {
const IntrinsicInst *ActionsCall =
dyn_cast<IntrinsicInst>(LP->getNextNode());
if (!ActionsCall ||
ActionsCall->getIntrinsicID() != Intrinsic::eh_actions)
continue;
// Parse the llvm.eh.actions call we found.
MachineBasicBlock *LPadMBB = MBBMap[LP->getParent()];
SmallVector<std::unique_ptr<ActionHandler>, 4> Actions;
parseEHActions(ActionsCall, Actions);
// Iterate EH actions from most to least precedence, which means
// iterating in reverse.
for (auto I = Actions.rbegin(), E = Actions.rend(); I != E; ++I) {
ActionHandler *Action = I->get();
if (auto *CH = dyn_cast<CatchHandler>(Action)) {
const auto *Filter =
dyn_cast<Function>(CH->getSelector()->stripPointerCasts());
assert((Filter || CH->getSelector()->isNullValue()) &&
"expected function or catch-all");
const auto *RecoverBA =
cast<BlockAddress>(CH->getHandlerBlockOrFunc());
MMI.addSEHCatchHandler(LPadMBB, Filter, RecoverBA);
} else {
assert(isa<CleanupHandler>(Action));
const auto *Fini = cast<Function>(Action->getHandlerBlockOrFunc());
MMI.addSEHCleanupHandler(LPadMBB, Fini);
}
}
}
}
/// clear - Clear out all the function-specific state. This returns this
/// FunctionLoweringInfo to an empty state, ready to be used for a
/// different function.
void FunctionLoweringInfo::clear() {
assert(CatchInfoFound.size() == CatchInfoLost.size() &&
"Not all catch info was assigned to a landing pad!");
MBBMap.clear();
ValueMap.clear();
StaticAllocaMap.clear();
#ifndef NDEBUG
CatchInfoLost.clear();
CatchInfoFound.clear();
#endif
LiveOutRegInfo.clear();
VisitedBBs.clear();
ArgDbgValues.clear();
ByValArgFrameIndexMap.clear();
RegFixups.clear();
StatepointStackSlots.clear();
StatepointRelocatedValues.clear();
PreferredExtendType.clear();
}
/// CreateReg - Allocate a single virtual register for the given type.
unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
return RegInfo->createVirtualRegister(
MF->getSubtarget().getTargetLowering()->getRegClassFor(VT));
}
/// CreateRegs - Allocate the appropriate number of virtual registers of
/// the correctly promoted or expanded types. Assign these registers
/// consecutive vreg numbers and return the first assigned number.
///
/// In the case that the given value has struct or array type, this function
/// will assign registers for each member or element.
///
unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
unsigned FirstReg = 0;
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
EVT ValueVT = ValueVTs[Value];
MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
for (unsigned i = 0; i != NumRegs; ++i) {
unsigned R = CreateReg(RegisterVT);
if (!FirstReg) FirstReg = R;
}
}
return FirstReg;
}
/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
/// the register's LiveOutInfo is for a smaller bit width, it is extended to
/// the larger bit width by zero extension. The bit width must be no smaller
/// than the LiveOutInfo's existing bit width.
const FunctionLoweringInfo::LiveOutInfo *
FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
if (!LiveOutRegInfo.inBounds(Reg))
return nullptr;
LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
if (!LOI->IsValid)
return nullptr;
if (BitWidth > LOI->KnownZero.getBitWidth()) {
LOI->NumSignBits = 1;
LOI->KnownZero = LOI->KnownZero.zextOrTrunc(BitWidth);
LOI->KnownOne = LOI->KnownOne.zextOrTrunc(BitWidth);
}
return LOI;
}
/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
/// register based on the LiveOutInfo of its operands.
void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
Type *Ty = PN->getType();
if (!Ty->isIntegerTy() || Ty->isVectorTy())
return;
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
assert(ValueVTs.size() == 1 &&
"PHIs with non-vector integer types should have a single VT.");
EVT IntVT = ValueVTs[0];
if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
return;
IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
unsigned BitWidth = IntVT.getSizeInBits();
unsigned DestReg = ValueMap[PN];
if (!TargetRegisterInfo::isVirtualRegister(DestReg))
return;
LiveOutRegInfo.grow(DestReg);
LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
Value *V = PN->getIncomingValue(0);
if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
DestLOI.NumSignBits = 1;
APInt Zero(BitWidth, 0);
DestLOI.KnownZero = Zero;
DestLOI.KnownOne = Zero;
return;
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt Val = CI->getValue().zextOrTrunc(BitWidth);
DestLOI.NumSignBits = Val.getNumSignBits();
DestLOI.KnownZero = ~Val;
DestLOI.KnownOne = Val;
} else {
assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
"CopyToReg node was created.");
unsigned SrcReg = ValueMap[V];
if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
DestLOI.IsValid = false;
return;
}
const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
if (!SrcLOI) {
DestLOI.IsValid = false;
return;
}
DestLOI = *SrcLOI;
}
assert(DestLOI.KnownZero.getBitWidth() == BitWidth &&
DestLOI.KnownOne.getBitWidth() == BitWidth &&
"Masks should have the same bit width as the type.");
for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *V = PN->getIncomingValue(i);
if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
DestLOI.NumSignBits = 1;
APInt Zero(BitWidth, 0);
DestLOI.KnownZero = Zero;
DestLOI.KnownOne = Zero;
return;
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt Val = CI->getValue().zextOrTrunc(BitWidth);
DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
DestLOI.KnownZero &= ~Val;
DestLOI.KnownOne &= Val;
continue;
}
assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
"its CopyToReg node was created.");
unsigned SrcReg = ValueMap[V];
if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
DestLOI.IsValid = false;
return;
}
const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
if (!SrcLOI) {
DestLOI.IsValid = false;
return;
}
DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
DestLOI.KnownZero &= SrcLOI->KnownZero;
DestLOI.KnownOne &= SrcLOI->KnownOne;
}
}
/// setArgumentFrameIndex - Record frame index for the byval
/// argument. This overrides previous frame index entry for this argument,
/// if any.
void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
int FI) {
ByValArgFrameIndexMap[A] = FI;
}
/// getArgumentFrameIndex - Get frame index for the byval argument.
/// If the argument does not have any assigned frame index then 0 is
/// returned.
int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
DenseMap<const Argument *, int>::iterator I =
ByValArgFrameIndexMap.find(A);
if (I != ByValArgFrameIndexMap.end())
return I->second;
DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
return 0;
}
/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
/// being passed to this variadic function, and set the MachineModuleInfo's
/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
/// reference to _fltused on Windows, which will link in MSVCRT's
/// floating-point support.
void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
MachineModuleInfo *MMI)
{
FunctionType *FT = cast<FunctionType>(
I.getCalledValue()->getType()->getContainedType(0));
if (FT->isVarArg() && !MMI->usesVAFloatArgument()) {
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
Type* T = I.getArgOperand(i)->getType();
for (auto i : post_order(T)) {
if (i->isFloatingPointTy()) {
MMI->setUsesVAFloatArgument(true);
return;
}
}
}
}
}
/// AddLandingPadInfo - Extract the exception handling information from the
/// landingpad instruction and add them to the specified machine module info.
void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
MachineBasicBlock *MBB) {
MMI.addPersonality(
MBB,
cast<Function>(
I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts()));
if (I.isCleanup())
MMI.addCleanup(MBB);
// FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct,
// but we need to do it this way because of how the DWARF EH emitter
// processes the clauses.
for (unsigned i = I.getNumClauses(); i != 0; --i) {
Value *Val = I.getClause(i - 1);
if (I.isCatch(i - 1)) {
MMI.addCatchTypeInfo(MBB,
dyn_cast<GlobalValue>(Val->stripPointerCasts()));
} else {
// Add filters in a list.
Constant *CVal = cast<Constant>(Val);
SmallVector<const GlobalValue*, 4> FilterList;
for (User::op_iterator
II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II)
FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
MMI.addFilterTypeInfo(MBB, FilterList);
}
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/CMakeLists.txt | set(LLVM_OPTIONAL_SOURCES legalizevectorops.cpp) # HLSL Change - ignore file
add_llvm_library(LLVMSelectionDAG
DAGCombiner.cpp
FastISel.cpp
FunctionLoweringInfo.cpp
InstrEmitter.cpp
LegalizeDAG.cpp
LegalizeFloatTypes.cpp
LegalizeIntegerTypes.cpp
LegalizeTypes.cpp
LegalizeTypesGeneric.cpp
LegalizeVectorOps.cpp
LegalizeVectorTypes.cpp
ResourcePriorityQueue.cpp
ScheduleDAGFast.cpp
ScheduleDAGRRList.cpp
ScheduleDAGSDNodes.cpp
SelectionDAG.cpp
SelectionDAGBuilder.cpp
SelectionDAGDumper.cpp
SelectionDAGISel.cpp
SelectionDAGPrinter.cpp
StatepointLowering.cpp
ScheduleDAGVLIW.cpp
TargetLowering.cpp
TargetSelectionDAGInfo.cpp
)
add_dependencies(LLVMSelectionDAG intrinsics_gen)
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/InstrEmitter.h | //===- InstrEmitter.h - Emit MachineInstrs for the SelectionDAG -*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This declares the Emit routines for the SelectionDAG class, which creates
// MachineInstrs based on the decisions of the SelectionDAG instruction
// selection.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_INSTREMITTER_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_INSTREMITTER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/SelectionDAG.h"
namespace llvm {
class MachineInstrBuilder;
class MCInstrDesc;
class SDDbgValue;
class LLVM_LIBRARY_VISIBILITY InstrEmitter {
MachineFunction *MF;
MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const TargetLowering *TLI;
MachineBasicBlock *MBB;
MachineBasicBlock::iterator InsertPos;
/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
/// implicit physical register output.
void EmitCopyFromReg(SDNode *Node, unsigned ResNo,
bool IsClone, bool IsCloned,
unsigned SrcReg,
DenseMap<SDValue, unsigned> &VRBaseMap);
/// getDstOfCopyToRegUse - If the only use of the specified result number of
/// node is a CopyToReg, return its destination register. Return 0 otherwise.
unsigned getDstOfOnlyCopyToRegUse(SDNode *Node,
unsigned ResNo) const;
void CreateVirtualRegisters(SDNode *Node,
MachineInstrBuilder &MIB,
const MCInstrDesc &II,
bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap);
/// getVR - Return the virtual register corresponding to the specified result
/// of the specified node.
unsigned getVR(SDValue Op,
DenseMap<SDValue, unsigned> &VRBaseMap);
/// AddRegisterOperand - Add the specified register as an operand to the
/// specified machine instr. Insert register copies if the register is
/// not in the required register class.
void AddRegisterOperand(MachineInstrBuilder &MIB,
SDValue Op,
unsigned IIOpNum,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned);
/// AddOperand - Add the specified operand to the specified machine instr. II
/// specifies the instruction information for the node, and IIOpNum is the
/// operand number (in the II) that we are adding. IIOpNum and II are used for
/// assertions only.
void AddOperand(MachineInstrBuilder &MIB,
SDValue Op,
unsigned IIOpNum,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned);
/// ConstrainForSubReg - Try to constrain VReg to a register class that
/// supports SubIdx sub-registers. Emit a copy if that isn't possible.
/// Return the virtual register to use.
unsigned ConstrainForSubReg(unsigned VReg, unsigned SubIdx,
MVT VT, DebugLoc DL);
/// EmitSubregNode - Generate machine code for subreg nodes.
///
void EmitSubregNode(SDNode *Node, DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsClone, bool IsCloned);
/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
/// COPY_TO_REGCLASS is just a normal copy, except that the destination
/// register is constrained to be in a particular register class.
///
void EmitCopyToRegClassNode(SDNode *Node,
DenseMap<SDValue, unsigned> &VRBaseMap);
/// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
///
void EmitRegSequence(SDNode *Node, DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsClone, bool IsCloned);
public:
/// CountResults - The results of target nodes have register or immediate
/// operands first, then an optional chain, and optional flag operands
/// (which do not go into the machine instrs.)
static unsigned CountResults(SDNode *Node);
/// EmitDbgValue - Generate machine instruction for a dbg_value node.
///
MachineInstr *EmitDbgValue(SDDbgValue *SD,
DenseMap<SDValue, unsigned> &VRBaseMap);
/// EmitNode - Generate machine code for a node and needed dependencies.
///
void EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap) {
if (Node->isMachineOpcode())
EmitMachineNode(Node, IsClone, IsCloned, VRBaseMap);
else
EmitSpecialNode(Node, IsClone, IsCloned, VRBaseMap);
}
/// getBlock - Return the current basic block.
MachineBasicBlock *getBlock() { return MBB; }
/// getInsertPos - Return the current insertion position.
MachineBasicBlock::iterator getInsertPos() { return InsertPos; }
/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
/// at the given position in the given block.
InstrEmitter(MachineBasicBlock *mbb, MachineBasicBlock::iterator insertpos);
private:
void EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap);
void EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements routines for translating from LLVM IR into SelectionDAG IR.
//
//===----------------------------------------------------------------------===//
#include "SelectionDAGBuilder.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "isel"
/// LimitFloatPrecision - Generate low-precision inline sequences for
/// some float libcalls (6, 8 or 12 bits).
static unsigned LimitFloatPrecision;
static cl::opt<unsigned, true>
LimitFPPrecision("limit-float-precision",
cl::desc("Generate low-precision inline sequences "
"for some float libcalls"),
cl::location(LimitFloatPrecision),
cl::init(0));
static cl::opt<bool>
EnableFMFInDAG("enable-fmf-dag", cl::init(false), cl::Hidden,
cl::desc("Enable fast-math-flags for DAG nodes"));
// Limit the width of DAG chains. This is important in general to prevent
// DAG-based analysis from blowing up. For example, alias analysis and
// load clustering may not complete in reasonable time. It is difficult to
// recognize and avoid this situation within each individual analysis, and
// future analyses are likely to have the same behavior. Limiting DAG width is
// the safe approach and will be especially important with global DAGs.
//
// MaxParallelChains default is arbitrarily high to avoid affecting
// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
// sequence over this should have been converted to llvm.memcpy by the
// frontend. It easy to induce this behavior with .ll code such as:
// %buffer = alloca [4096 x i8]
// %data = load [4096 x i8]* %argPtr
// store [4096 x i8] %data, [4096 x i8]* %buffer
static const unsigned MaxParallelChains = 64;
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V);
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
/// larger then ValueVT then AssertOp can be used to specify whether the extra
/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
/// (ISD::AssertSext).
static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
const SDValue *Parts,
unsigned NumParts, MVT PartVT, EVT ValueVT,
const Value *V,
ISD::NodeType AssertOp = ISD::DELETED_NODE) {
if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
PartVT, ValueVT, V);
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Val = Parts[0];
if (NumParts > 1) {
// Assemble the value from multiple parts.
if (ValueVT.isInteger()) {
unsigned PartBits = PartVT.getSizeInBits();
unsigned ValueBits = ValueVT.getSizeInBits();
// Assemble the power of 2 part.
unsigned RoundParts = NumParts & (NumParts - 1) ?
1 << Log2_32(NumParts) : NumParts;
unsigned RoundBits = PartBits * RoundParts;
EVT RoundVT = RoundBits == ValueBits ?
ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
SDValue Lo, Hi;
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
if (RoundParts > 2) {
Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
PartVT, HalfVT, V);
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
RoundParts / 2, PartVT, HalfVT, V);
} else {
Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
}
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
if (RoundParts < NumParts) {
// Assemble the trailing non-power-of-2 part.
unsigned OddParts = NumParts - RoundParts;
EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
Hi = getCopyFromParts(DAG, DL,
Parts + RoundParts, OddParts, PartVT, OddVT, V);
// Combine the round and odd parts.
Lo = Val;
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
Hi =
DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
DAG.getConstant(Lo.getValueType().getSizeInBits(), DL,
TLI.getPointerTy(DAG.getDataLayout())));
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
}
} else if (PartVT.isFloatingPoint()) {
// FP split into multiple FP parts (for ppcf128)
assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
"Unexpected split");
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
} else {
// FP split into integer parts (soft fp)
assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split");
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
}
}
// There is now one part, held in Val. Correct it to match ValueVT.
EVT PartEVT = Val.getValueType();
if (PartEVT == ValueVT)
return Val;
if (PartEVT.isInteger() && ValueVT.isInteger()) {
if (ValueVT.bitsLT(PartEVT)) {
// For a truncate, see if we have any information to
// indicate whether the truncated bits will always be
// zero or sign-extension.
if (AssertOp != ISD::DELETED_NODE)
Val = DAG.getNode(AssertOp, DL, PartEVT, Val,
DAG.getValueType(ValueVT));
return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
}
return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
}
if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
// FP_ROUND's are always exact here.
if (ValueVT.bitsLT(Val.getValueType()))
return DAG.getNode(
ISD::FP_ROUND, DL, ValueVT, Val,
DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
}
if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
llvm_unreachable("Unknown mismatch!");
}
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
const Twine &ErrMsg) {
const Instruction *I = dyn_cast_or_null<Instruction>(V);
if (!V)
return Ctx.emitError(ErrMsg);
const char *AsmError = ", possible invalid constraint for vector type";
if (const CallInst *CI = dyn_cast<CallInst>(I))
if (isa<InlineAsm>(CI->getCalledValue()))
return Ctx.emitError(I, ErrMsg + AsmError);
return Ctx.emitError(I, ErrMsg);
}
/// getCopyFromPartsVector - Create a value that contains the specified legal
/// parts combined into the value they represent. If the parts combine to a
/// type larger then ValueVT then AssertOp can be used to specify whether the
/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
/// ValueVT (ISD::AssertSext).
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V) {
assert(ValueVT.isVector() && "Not a vector value");
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Val = Parts[0];
// Handle a multi-element vector.
if (NumParts > 1) {
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
unsigned NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
assert(RegisterVT.getSizeInBits() ==
Parts[0].getSimpleValueType().getSizeInBits() &&
"Part type sizes don't match!");
// Assemble the parts into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
if (NumIntermediates == NumParts) {
// If the register was not expanded, truncate or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
PartVT, IntermediateVT, V);
} else if (NumParts > 0) {
// If the intermediate type was expanded, build the intermediate
// operands from the parts.
assert(NumParts % NumIntermediates == 0 &&
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
PartVT, IntermediateVT, V);
}
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
// intermediate operands.
Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
: ISD::BUILD_VECTOR,
DL, ValueVT, Ops);
}
// There is now one part, held in Val. Correct it to match ValueVT.
EVT PartEVT = Val.getValueType();
if (PartEVT == ValueVT)
return Val;
if (PartEVT.isVector()) {
// If the element type of the source/dest vectors are the same, but the
// parts vector has more elements than the value vector, then we have a
// vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
// elements we want.
if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
"Cannot narrow, it would be a lossy transformation");
return DAG.getNode(
ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
// Vector/Vector bitcast.
if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
"Cannot handle this kind of promotion");
// Promoted vector extract
bool Smaller = ValueVT.bitsLE(PartEVT);
return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
DL, ValueVT, Val);
}
// Trivial bitcast if the types are the same size and the destination
// vector type is legal.
if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
TLI.isTypeLegal(ValueVT))
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
// Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() != 1) {
diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
"non-trivial scalar-to-vector conversion");
return DAG.getUNDEF(ValueVT);
}
if (ValueVT.getVectorNumElements() == 1 &&
ValueVT.getVectorElementType() != PartEVT) {
bool Smaller = ValueVT.bitsLE(PartEVT);
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
DL, ValueVT.getScalarType(), Val);
}
return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
}
static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V);
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
EVT ValueVT = Val.getValueType();
// Handle the vector case separately.
if (ValueVT.isVector())
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
"Copying to an illegal type!");
if (NumParts == 0)
return;
assert(!ValueVT.isVector() && "Vector case handled elsewhere");
EVT PartEVT = PartVT;
if (PartEVT == ValueVT) {
assert(NumParts == 1 && "No-op copy with multiple parts!");
Parts[0] = Val;
return;
}
if (NumParts * PartBits > ValueVT.getSizeInBits()) {
// If the parts cover more bits than the value has, promote the value.
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
assert(NumParts == 1 && "Do not know what to promote to!");
Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
} else {
assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
ValueVT.isInteger() &&
"Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
if (PartVT == MVT::x86mmx)
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
}
} else if (PartBits == ValueVT.getSizeInBits()) {
// Different types of the same size.
assert(NumParts == 1 && PartEVT != ValueVT);
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
// If the parts cover less bits than value has, truncate the value.
assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
ValueVT.isInteger() &&
"Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
if (PartVT == MVT::x86mmx)
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
}
// The value may have changed - recompute ValueVT.
ValueVT = Val.getValueType();
assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
"Failed to tile the value with PartVT!");
if (NumParts == 1) {
if (PartEVT != ValueVT)
diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
"scalar-to-vector conversion failed");
Parts[0] = Val;
return;
}
// Expand the value into multiple parts.
if (NumParts & (NumParts - 1)) {
// The number of parts is not a power of 2. Split off and copy the tail.
assert(PartVT.isInteger() && ValueVT.isInteger() &&
"Do not know what to expand to!");
unsigned RoundParts = 1 << Log2_32(NumParts);
unsigned RoundBits = RoundParts * PartBits;
unsigned OddParts = NumParts - RoundParts;
SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
DAG.getIntPtrConstant(RoundBits, DL));
getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
if (DAG.getDataLayout().isBigEndian())
// The odd parts were reversed by getCopyToParts - unreverse them.
std::reverse(Parts + RoundParts, Parts + NumParts);
NumParts = RoundParts;
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
}
// The number of parts is a power of 2. Repeatedly bisect the value using
// EXTRACT_ELEMENT.
Parts[0] = DAG.getNode(ISD::BITCAST, DL,
EVT::getIntegerVT(*DAG.getContext(),
ValueVT.getSizeInBits()),
Val);
for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
for (unsigned i = 0; i < NumParts; i += StepSize) {
unsigned ThisBits = StepSize * PartBits / 2;
EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
SDValue &Part0 = Parts[i];
SDValue &Part1 = Parts[i+StepSize/2];
Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
if (ThisBits == PartBits && ThisVT != PartVT) {
Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
}
}
}
if (DAG.getDataLayout().isBigEndian())
std::reverse(Parts, Parts + OrigNumParts);
}
/// getCopyToPartsVector - Create a series of nodes that contain the specified
/// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V) {
EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (NumParts == 1) {
EVT PartEVT = PartVT;
if (PartEVT == ValueVT) {
// Nothing to do.
} else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
// Bitconvert vector->vector case.
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (PartVT.isVector() &&
PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
EVT ElementVT = PartVT.getVectorElementType();
// Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
// undef elements.
SmallVector<SDValue, 16> Ops;
for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
Ops.push_back(DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val,
DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
for (unsigned i = ValueVT.getVectorNumElements(),
e = PartVT.getVectorNumElements(); i != e; ++i)
Ops.push_back(DAG.getUNDEF(ElementVT));
Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops);
// FIXME: Use CONCAT for 2x -> 4x.
//SDValue UndefElts = DAG.getUNDEF(VectorTy);
//Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
} else if (PartVT.isVector() &&
PartEVT.getVectorElementType().bitsGE(
ValueVT.getVectorElementType()) &&
PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
// Promoted vector extract
bool Smaller = PartEVT.bitsLE(ValueVT);
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
DL, PartVT, Val);
} else{
// Vector -> scalar conversion.
assert(ValueVT.getVectorNumElements() == 1 &&
"Only trivial vector-to-scalar conversions should get here!");
Val = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
bool Smaller = ValueVT.bitsLE(PartVT);
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
DL, PartVT, Val);
}
Parts[0] = Val;
return;
}
// Handle a multi-element vector.
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
IntermediateVT,
NumIntermediates, RegisterVT);
unsigned NumElements = ValueVT.getVectorNumElements();
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
// Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
if (IntermediateVT.isVector())
Ops[i] =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
DAG.getConstant(i * (NumElements / NumIntermediates), DL,
TLI.getVectorIdxTy(DAG.getDataLayout())));
else
Ops[i] = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
// Split the intermediate operands into legal parts.
if (NumParts == NumIntermediates) {
// If the register was not expanded, promote or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
} else if (NumParts > 0) {
// If the intermediate type was expanded, split each the value into
// legal parts.
assert(NumIntermediates != 0 && "division by zero");
assert(NumParts % NumIntermediates == 0 &&
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
}
}
RegsForValue::RegsForValue() {}
RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt,
EVT valuevt)
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL, unsigned Reg, Type *Ty) {
ComputeValueVTs(TLI, DL, Ty, ValueVTs);
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
EVT ValueVT = ValueVTs[Value];
unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT);
Reg += NumRegs;
}
}
/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
/// this value and returns the result as a ValueVT value. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
FunctionLoweringInfo &FuncInfo,
SDLoc dl,
SDValue &Chain, SDValue *Flag,
const Value *V) const {
// A Value with type {} or [0 x %t] needs no registers.
if (ValueVTs.empty())
return SDValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Assemble the legal parts into the final values.
SmallVector<SDValue, 4> Values(ValueVTs.size());
SmallVector<SDValue, 8> Parts;
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
// Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value];
unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
MVT RegisterVT = RegVTs[Value];
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue P;
if (!Flag) {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
} else {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
*Flag = P.getValue(2);
}
Chain = P.getValue(1);
Parts[i] = P;
// If the source register was virtual and if we know something about it,
// add an assert node.
if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
!RegisterVT.isInteger() || RegisterVT.isVector())
continue;
const FunctionLoweringInfo::LiveOutInfo *LOI =
FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
if (!LOI)
continue;
unsigned RegSize = RegisterVT.getSizeInBits();
unsigned NumSignBits = LOI->NumSignBits;
unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
if (NumZeroBits == RegSize) {
// The current value is a zero.
// Explicitly express that as it would be easier for
// optimizations to kick in.
Parts[i] = DAG.getConstant(0, dl, RegisterVT);
continue;
}
// FIXME: We capture more information than the dag can represent. For
// now, just use the tightest assertzext/assertsext possible.
bool isSExt = true;
EVT FromVT(MVT::Other);
if (NumSignBits == RegSize)
isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
else if (NumZeroBits >= RegSize-1)
isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
else if (NumSignBits > RegSize-8)
isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
else if (NumZeroBits >= RegSize-8)
isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
else if (NumSignBits > RegSize-16)
isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
else if (NumZeroBits >= RegSize-16)
isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
else if (NumSignBits > RegSize-32)
isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
else if (NumZeroBits >= RegSize-32)
isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
else
continue;
// Add an assertion node.
assert(FromVT != MVT::Other);
Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
RegisterVT, P, DAG.getValueType(FromVT));
}
Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
NumRegs, RegisterVT, ValueVT, V);
Part += NumRegs;
Parts.clear();
}
return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
}
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
/// specified value into the registers specified by this object. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
SDValue &Chain, SDValue *Flag, const Value *V,
ISD::NodeType PreferredExtendType) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
ISD::NodeType ExtendKind = PreferredExtendType;
// Get the list of the values's legal parts.
unsigned NumRegs = Regs.size();
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
EVT ValueVT = ValueVTs[Value];
unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
MVT RegisterVT = RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
ExtendKind = ISD::ZERO_EXTEND;
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
&Parts[Part], NumParts, RegisterVT, V, ExtendKind);
Part += NumParts;
}
// Copy the parts into the registers.
SmallVector<SDValue, 8> Chains(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue Part;
if (!Flag) {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
} else {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
*Flag = Part.getValue(1);
}
Chains[i] = Part.getValue(0);
}
if (NumRegs == 1 || Flag)
// If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
// flagged to it. That is the CopyToReg nodes and the user are considered
// a single scheduling unit. If we create a TokenFactor and return it as
// chain, then the TokenFactor is both a predecessor (operand) of the
// user as well as a successor (the TF operands are flagged to the user).
// c1, f1 = CopyToReg
// c2, f2 = CopyToReg
// c3 = TokenFactor c1, c2
// ...
// = op c3, ..., f2
Chain = Chains[NumRegs-1];
else
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
}
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
/// operand list. This adds the code marker and includes the number of
/// values added into it.
void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
unsigned MatchingIdx, SDLoc dl,
SelectionDAG &DAG,
std::vector<SDValue> &Ops) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
if (HasMatching)
Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
else if (!Regs.empty() &&
TargetRegisterInfo::isVirtualRegister(Regs.front())) {
// Put the register class of the virtual registers in the flag word. That
// way, later passes can recompute register class constraints for inline
// assembly as well as normal instructions.
// Don't do this for tied operands that can use the regclass information
// from the def.
const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
}
SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
Ops.push_back(Res);
unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
MVT RegisterVT = RegVTs[Value];
for (unsigned i = 0; i != NumRegs; ++i) {
assert(Reg < Regs.size() && "Mismatch in # registers expected");
unsigned TheReg = Regs[Reg++];
Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
// If we clobbered the stack pointer, MFI should know about it.
assert(DAG.getMachineFunction().getFrameInfo()->
hasOpaqueSPAdjustment());
}
}
}
}
void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
const TargetLibraryInfo *li) {
AA = &aa;
GFI = gfi;
LibInfo = li;
DL = &DAG.getDataLayout();
Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
/// clear - Clear out the current SelectionDAG and the associated
/// state and prepare this SelectionDAGBuilder object to be used
/// for a new block. This doesn't clear out information about
/// additional blocks that are needed to complete switch lowering
/// or PHI node updating; that information is cleared out as it is
/// consumed.
void SelectionDAGBuilder::clear() {
NodeMap.clear();
UnusedArgNodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
CurInst = nullptr;
HasTailCall = false;
SDNodeOrder = LowestSDNodeOrder;
StatepointLowering.clear();
}
/// clearDanglingDebugInfo - Clear the dangling debug information
/// map. This function is separated from the clear so that debug
/// information that is dangling in a basic block can be properly
/// resolved in a different basic block. This allows the
/// SelectionDAG to resolve dangling debug information attached
/// to PHI nodes.
void SelectionDAGBuilder::clearDanglingDebugInfo() {
DanglingDebugInfoMap.clear();
}
/// getRoot - Return the current virtual root of the Selection DAG,
/// flushing any PendingLoad items. This must be done before emitting
/// a store or any other node that may need to be ordered after any
/// prior load instructions.
///
SDValue SelectionDAGBuilder::getRoot() {
if (PendingLoads.empty())
return DAG.getRoot();
if (PendingLoads.size() == 1) {
SDValue Root = PendingLoads[0];
DAG.setRoot(Root);
PendingLoads.clear();
return Root;
}
// Otherwise, we have to make a token factor node.
SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
PendingLoads);
PendingLoads.clear();
DAG.setRoot(Root);
return Root;
}
/// getControlRoot - Similar to getRoot, but instead of flushing all the
/// PendingLoad items, flush all the PendingExports items. It is necessary
/// to do this before emitting a terminator instruction.
///
SDValue SelectionDAGBuilder::getControlRoot() {
SDValue Root = DAG.getRoot();
if (PendingExports.empty())
return Root;
// Turn all of the CopyToReg chains into one factored node.
if (Root.getOpcode() != ISD::EntryToken) {
unsigned i = 0, e = PendingExports.size();
for (; i != e; ++i) {
assert(PendingExports[i].getNode()->getNumOperands() > 1);
if (PendingExports[i].getNode()->getOperand(0) == Root)
break; // Don't add the root if we already indirectly depend on it.
}
if (i == e)
PendingExports.push_back(Root);
}
Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
PendingExports);
PendingExports.clear();
DAG.setRoot(Root);
return Root;
}
void SelectionDAGBuilder::visit(const Instruction &I) {
// Set up outgoing PHI node register values before emitting the terminator.
if (isa<TerminatorInst>(&I))
HandlePHINodesInSuccessorBlocks(I.getParent());
++SDNodeOrder;
CurInst = &I;
visit(I.getOpcode(), I);
if (!isa<TerminatorInst>(&I) && !HasTailCall)
CopyToExportRegsIfNeeded(&I);
CurInst = nullptr;
}
void SelectionDAGBuilder::visitPHI(const PHINode &) {
llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
}
void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
// Note: this doesn't use InstVisitor, because it has to work with
// ConstantExpr's in addition to instructions.
switch (Opcode) {
default: llvm_unreachable("Unknown instruction type encountered!");
// Build the switch statement using the Instruction.def file.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
#include "llvm/IR/Instruction.def"
}
}
// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
// generate the debug data structures now that we've seen its definition.
void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
SDValue Val) {
DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
if (DDI.getDI()) {
const DbgValueInst *DI = DDI.getDI();
DebugLoc dl = DDI.getdl();
unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
DILocalVariable *Variable = DI->getVariable();
DIExpression *Expr = DI->getExpression();
assert(Variable->isValidLocationForIntrinsic(dl) &&
"Expected inlined-at fields to agree");
uint64_t Offset = DI->getOffset();
// A dbg.value for an alloca is always indirect.
bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
SDDbgValue *SDV;
if (Val.getNode()) {
if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, Offset, IsIndirect,
Val)) {
SDV = DAG.getDbgValue(Variable, Expr, Val.getNode(), Val.getResNo(),
IsIndirect, Offset, dl, DbgSDNodeOrder);
DAG.AddDbgValue(SDV, Val.getNode(), false);
}
} else
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
DanglingDebugInfoMap[V] = DanglingDebugInfo();
}
}
/// getCopyFromRegs - If there was virtual register allocated for the value V
/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
SDValue Result;
if (It != FuncInfo.ValueMap.end()) {
unsigned InReg = It->second;
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
DAG.getDataLayout(), InReg, Ty);
SDValue Chain = DAG.getEntryNode();
Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
resolveDanglingDebugInfo(V, Result);
}
return Result;
}
/// getValue - Return an SDValue for the given Value.
SDValue SelectionDAGBuilder::getValue(const Value *V) {
// If we already have an SDValue for this value, use it. It's important
// to do this first, so that we don't create a CopyFromReg if we already
// have a regular SDValue.
SDValue &N = NodeMap[V];
if (N.getNode()) return N;
// If there's a virtual register allocated and initialized for this
// value, use it.
SDValue copyFromReg = getCopyFromRegs(V, V->getType());
if (copyFromReg.getNode()) {
return copyFromReg;
}
// Otherwise create a new SDValue and remember it.
SDValue Val = getValueImpl(V);
NodeMap[V] = Val;
resolveDanglingDebugInfo(V, Val);
return Val;
}
// Return true if SDValue exists for the given Value
bool SelectionDAGBuilder::findValue(const Value *V) const {
return (NodeMap.find(V) != NodeMap.end()) ||
(FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
}
/// getNonRegisterValue - Return an SDValue for the given Value, but
/// don't look in FuncInfo.ValueMap for a virtual register.
SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
// If we already have an SDValue for this value, use it.
SDValue &N = NodeMap[V];
if (N.getNode()) {
if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
// Remove the debug location from the node as the node is about to be used
// in a location which may differ from the original debug location. This
// is relevant to Constant and ConstantFP nodes because they can appear
// as constant expressions inside PHI nodes.
N->setDebugLoc(DebugLoc());
}
return N;
}
// Otherwise create a new SDValue and remember it.
SDValue Val = getValueImpl(V);
NodeMap[V] = Val;
resolveDanglingDebugInfo(V, Val);
return Val;
}
/// getValueImpl - Helper function for getValue and getNonRegisterValue.
/// Create an SDValue for the given value.
SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (const Constant *C = dyn_cast<Constant>(V)) {
EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
return DAG.getConstant(*CI, getCurSDLoc(), VT);
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
if (isa<ConstantPointerNull>(C)) {
unsigned AS = V->getType()->getPointerAddressSpace();
return DAG.getConstant(0, getCurSDLoc(),
TLI.getPointerTy(DAG.getDataLayout(), AS));
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
return DAG.getUNDEF(VT);
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
visit(CE->getOpcode(), *CE);
SDValue N1 = NodeMap[V];
assert(N1.getNode() && "visit didn't populate the NodeMap!");
return N1;
}
if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
SmallVector<SDValue, 4> Constants;
for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
OI != OE; ++OI) {
SDNode *Val = getValue(*OI).getNode();
// If the operand is an empty aggregate, there are no values.
if (!Val) continue;
// Add each leaf value from the operand to the Constants list
// to form a flattened list of all the values.
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
Constants.push_back(SDValue(Val, i));
}
return DAG.getMergeValues(Constants, getCurSDLoc());
}
if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(C)) {
SmallVector<SDValue, 4> Ops;
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
// Add each leaf value from the operand to the Constants list
// to form a flattened list of all the values.
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
Ops.push_back(SDValue(Val, i));
}
if (isa<ArrayType>(CDS->getType()))
return DAG.getMergeValues(Ops, getCurSDLoc());
return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
VT, Ops);
}
if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
"Unknown struct or array constant!");
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
unsigned NumElts = ValueVTs.size();
if (NumElts == 0)
return SDValue(); // empty struct
SmallVector<SDValue, 4> Constants(NumElts);
for (unsigned i = 0; i != NumElts; ++i) {
EVT EltVT = ValueVTs[i];
if (isa<UndefValue>(C))
Constants[i] = DAG.getUNDEF(EltVT);
else if (EltVT.isFloatingPoint())
Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
else
Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
}
return DAG.getMergeValues(Constants, getCurSDLoc());
}
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
return DAG.getBlockAddress(BA, VT);
VectorType *VecTy = cast<VectorType>(V->getType());
unsigned NumElements = VecTy->getNumElements();
// Now that we know the number and type of the elements, get that number of
// elements into the Ops array based on what kind of constant it is.
SmallVector<SDValue, 16> Ops;
if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
for (unsigned i = 0; i != NumElements; ++i)
Ops.push_back(getValue(CV->getOperand(i)));
} else {
assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
EVT EltVT =
TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
SDValue Op;
if (EltVT.isFloatingPoint())
Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
else
Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
Ops.assign(NumElements, Op);
}
// Create a BUILD_VECTOR node.
return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops);
}
// If this is a static alloca, generate it as the frameindex instead of
// computation.
if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
return DAG.getFrameIndex(SI->second,
TLI.getPointerTy(DAG.getDataLayout()));
}
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
Inst->getType());
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
llvm_unreachable("Can't get register for value!");
}
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
auto &DL = DAG.getDataLayout();
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
SmallVector<SDValue, 8> OutVals;
if (!FuncInfo.CanLowerReturn) {
unsigned DemoteReg = FuncInfo.DemoteRegister;
const Function *F = I.getParent()->getParent();
// Emit a store of the return value through the virtual register.
// Leave Outs empty so that LowerReturn won't try to load return
// registers the usual way.
SmallVector<EVT, 1> PtrValueVTs;
ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()),
PtrValueVTs);
SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
SDValue RetOp = getValue(I.getOperand(0));
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
SmallVector<SDValue, 4> Chains(NumValues);
for (unsigned i = 0; i != NumValues; ++i) {
SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
RetPtr.getValueType(), RetPtr,
DAG.getIntPtrConstant(Offsets[i],
getCurSDLoc()));
Chains[i] =
DAG.getStore(Chain, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + i),
// FIXME: better loc info would be nice.
Add, MachinePointerInfo(), false, false, 0);
}
Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
MVT::Other, Chains);
} else if (I.getNumOperands() != 0) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues) {
SDValue RetOp = getValue(I.getOperand(0));
const Function *F = I.getParent()->getParent();
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
Attribute::SExt))
ExtendKind = ISD::SIGN_EXTEND;
else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
Attribute::ZExt))
ExtendKind = ISD::ZERO_EXTEND;
LLVMContext &Context = F->getContext();
bool RetInReg = F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
Attribute::InReg);
for (unsigned j = 0; j != NumValues; ++j) {
EVT VT = ValueVTs[j];
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
VT = TLI.getTypeForExtArgOrReturn(Context, VT, ExtendKind);
unsigned NumParts = TLI.getNumRegisters(Context, VT);
MVT PartVT = TLI.getRegisterType(Context, VT);
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
&Parts[0], NumParts, PartVT, &I, ExtendKind);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
if (RetInReg)
Flags.setInReg();
// Propagate extension type if any
if (ExtendKind == ISD::SIGN_EXTEND)
Flags.setSExt();
else if (ExtendKind == ISD::ZERO_EXTEND)
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i) {
Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
VT, /*isfixed=*/true, 0, 0));
OutVals.push_back(Parts[i]);
}
}
}
}
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
CallingConv::ID CallConv =
DAG.getMachineFunction().getFunction()->getCallingConv();
Chain = DAG.getTargetLoweringInfo().LowerReturn(
Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
// Verify that the target's LowerReturn behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
"LowerReturn didn't return a valid chain!");
// Update the DAG with the new chain value resulting from return lowering.
DAG.setRoot(Chain);
}
/// CopyToExportRegsIfNeeded - If the given value has virtual registers
/// created for it, emit nodes to copy the value into the virtual
/// registers.
void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
// Skip empty types
if (V->getType()->isEmptyTy())
return;
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
if (VMI != FuncInfo.ValueMap.end()) {
assert(!V->use_empty() && "Unused value assigned virtual registers!");
CopyValueToVirtualRegister(V, VMI->second);
}
}
/// ExportFromCurrentBlock - If this condition isn't known to be exported from
/// the current basic block, add it to ValueMap now so that we'll get a
/// CopyTo/FromReg.
void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
// No need to export constants.
if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
// Already exported?
if (FuncInfo.isExportedInst(V)) return;
unsigned Reg = FuncInfo.InitializeRegForValue(V);
CopyValueToVirtualRegister(V, Reg);
}
bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
const BasicBlock *FromBB) {
// The operands of the setcc have to be in this block. We don't know
// how to export them from some other block.
if (const Instruction *VI = dyn_cast<Instruction>(V)) {
// Can export from current BB.
if (VI->getParent() == FromBB)
return true;
// Is already exported, noop.
return FuncInfo.isExportedInst(V);
}
// If this is an argument, we can export it if the BB is the entry block or
// if it is already exported.
if (isa<Argument>(V)) {
if (FromBB == &FromBB->getParent()->getEntryBlock())
return true;
// Otherwise, can only export this if it is already exported.
return FuncInfo.isExportedInst(V);
}
// Otherwise, constants can always be exported.
return true;
}
/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const {
BranchProbabilityInfo *BPI = FuncInfo.BPI;
if (!BPI)
return 0;
const BasicBlock *SrcBB = Src->getBasicBlock();
const BasicBlock *DstBB = Dst->getBasicBlock();
return BPI->getEdgeWeight(SrcBB, DstBB);
}
void SelectionDAGBuilder::
addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
uint32_t Weight /* = 0 */) {
if (!Weight)
Weight = getEdgeWeight(Src, Dst);
Src->addSuccessor(Dst, Weight);
}
static bool InBlock(const Value *V, const BasicBlock *BB) {
if (const Instruction *I = dyn_cast<Instruction>(V))
return I->getParent() == BB;
return true;
}
/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
/// This function emits a branch and is used at the leaves of an OR or an
/// AND operator tree.
///
void
SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
uint32_t TWeight,
uint32_t FWeight) {
const BasicBlock *BB = CurBB->getBasicBlock();
// If the leaf of the tree is a comparison, merge the condition into
// the caseblock.
if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
// The operands of the cmp have to be in this block. We don't know
// how to export them from some other block. If this is the first block
// of the sequence, no exporting is needed.
if (CurBB == SwitchBB ||
(isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
ISD::CondCode Condition;
if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
Condition = getICmpCondCode(IC->getPredicate());
} else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
Condition = getFCmpCondCode(FC->getPredicate());
if (TM.Options.NoNaNsFPMath)
Condition = getFCmpCodeWithoutNaN(Condition);
} else {
(void)Condition; // silence warning.
llvm_unreachable("Unknown compare instruction");
}
CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
TBB, FBB, CurBB, TWeight, FWeight);
SwitchCases.push_back(CB);
return;
}
}
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
nullptr, TBB, FBB, CurBB, TWeight, FWeight);
SwitchCases.push_back(CB);
}
/// Scale down both weights to fit into uint32_t.
static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
uint32_t Scale = (NewMax / UINT32_MAX) + 1;
NewTrue = NewTrue / Scale;
NewFalse = NewFalse / Scale;
}
/// FindMergedConditions - If Cond is an expression like
void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
unsigned Opc, uint32_t TWeight,
uint32_t FWeight) {
// If this node is not part of the or/and tree, emit it as a branch.
const Instruction *BOp = dyn_cast<Instruction>(Cond);
if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
(unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
BOp->getParent() != CurBB->getBasicBlock() ||
!InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
!InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
TWeight, FWeight);
return;
}
// Create TmpBB after CurBB.
MachineFunction::iterator BBI = CurBB;
MachineFunction &MF = DAG.getMachineFunction();
MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
CurBB->getParent()->insert(++BBI, TmpBB);
if (Opc == Instruction::Or) {
// Codegen X | Y as:
// BB1:
// jmp_if_X TBB
// jmp TmpBB
// TmpBB:
// jmp_if_Y TBB
// jmp FBB
//
// We have flexibility in setting Prob for BB1 and Prob for TmpBB.
// The requirement is that
// TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
// = TrueProb for original BB.
// Assuming the original weights are A and B, one choice is to set BB1's
// weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
// assumes that
// TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
// Another choice is to assume TrueProb for BB1 equals to TrueProb for
// TmpBB, but the math is more complicated.
uint64_t NewTrueWeight = TWeight;
uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight;
ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the LHS condition.
FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
NewTrueWeight, NewFalseWeight);
NewTrueWeight = TWeight;
NewFalseWeight = 2 * (uint64_t)FWeight;
ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the RHS condition into TmpBB.
FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
NewTrueWeight, NewFalseWeight);
} else {
assert(Opc == Instruction::And && "Unknown merge op!");
// Codegen X & Y as:
// BB1:
// jmp_if_X TmpBB
// jmp FBB
// TmpBB:
// jmp_if_Y TBB
// jmp FBB
//
// This requires creation of TmpBB after CurBB.
// We have flexibility in setting Prob for BB1 and Prob for TmpBB.
// The requirement is that
// FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
// = FalseProb for original BB.
// Assuming the original weights are A and B, one choice is to set BB1's
// weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
// assumes that
// FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight;
uint64_t NewFalseWeight = FWeight;
ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the LHS condition.
FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
NewTrueWeight, NewFalseWeight);
NewTrueWeight = 2 * (uint64_t)TWeight;
NewFalseWeight = FWeight;
ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the RHS condition into TmpBB.
FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
NewTrueWeight, NewFalseWeight);
}
}
/// If the set of cases should be emitted as a series of branches, return true.
/// If we should emit this as a bunch of and/or'd together conditions, return
/// false.
bool
SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
if (Cases.size() != 2) return true;
// If this is two comparisons of the same values or'd or and'd together, they
// will get folded into a single comparison, so don't emit two blocks.
if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
Cases[0].CmpRHS == Cases[1].CmpRHS) ||
(Cases[0].CmpRHS == Cases[1].CmpLHS &&
Cases[0].CmpLHS == Cases[1].CmpRHS)) {
return false;
}
// Handle: (X != null) | (Y != null) --> (X|Y) != 0
// Handle: (X == null) & (Y == null) --> (X|Y) == 0
if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
Cases[0].CC == Cases[1].CC &&
isa<Constant>(Cases[0].CmpRHS) &&
cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
return false;
if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
return false;
}
return true;
}
void SelectionDAGBuilder::visitBr(const BranchInst &I) {
MachineBasicBlock *BrMBB = FuncInfo.MBB;
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
if (I.isUnconditional()) {
// Update machine-CFG edges.
BrMBB->addSuccessor(Succ0MBB);
// If this is not a fall-through branch or optimizations are switched off,
// emit the branch.
if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Succ0MBB)));
return;
}
// If this condition is one of the special cases we handle, do special stuff
// now.
const Value *CondVal = I.getCondition();
MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
// If this is a series of conditions that are or'd or and'd together, emit
// this as a sequence of branches instead of setcc's with and/or operations.
// As long as jumps are not expensive, this should improve performance.
// For example, instead of something like:
// cmp A, B
// C = seteq
// cmp D, E
// F = setle
// or C, F
// jnz foo
// Emit:
// cmp A, B
// je foo
// cmp D, E
// jle foo
//
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
if (!DAG.getTargetLoweringInfo().isJumpExpensive() &&
BOp->hasOneUse() && (BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
getEdgeWeight(BrMBB, Succ1MBB));
// If the compares in later blocks need to use values not currently
// exported from this block, export them now. This block should always
// be the first entry.
assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
// Allow some cases to be rejected.
if (ShouldEmitAsBranches(SwitchCases)) {
for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
}
// Emit the branch for this block.
visitSwitchCase(SwitchCases[0], BrMBB);
SwitchCases.erase(SwitchCases.begin());
return;
}
// Okay, we decided not to do this, remove any inserted MBB's and clear
// SwitchCases.
for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
FuncInfo.MF->erase(SwitchCases[i].ThisBB);
SwitchCases.clear();
}
}
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
nullptr, Succ0MBB, Succ1MBB, BrMBB);
// Use visitSwitchCase to actually insert the fast branch sequence for this
// cond branch.
visitSwitchCase(CB, BrMBB);
}
/// visitSwitchCase - Emits the necessary code to represent a single node in
/// the binary search tree resulting from lowering a switch instruction.
void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
MachineBasicBlock *SwitchBB) {
SDValue Cond;
SDValue CondLHS = getValue(CB.CmpLHS);
SDLoc dl = getCurSDLoc();
// Build the setcc now.
if (!CB.CmpMHS) {
// Fold "(X == true)" to X and "(X == false)" to !X to
// handle common cases produced by branch lowering.
if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
CB.CC == ISD::SETEQ)
Cond = CondLHS;
else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
CB.CC == ISD::SETEQ) {
SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
} else
Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
} else {
assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
SDValue CmpOp = getValue(CB.CmpMHS);
EVT VT = CmpOp.getValueType();
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
ISD::SETLE);
} else {
SDValue SUB = DAG.getNode(ISD::SUB, dl,
VT, CmpOp, DAG.getConstant(Low, dl, VT));
Cond = DAG.getSetCC(dl, MVT::i1, SUB,
DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
}
}
// Update successor info
addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
// TrueBB and FalseBB are always different unless the incoming IR is
// degenerate. This only happens when running llc on weird IR.
if (CB.TrueBB != CB.FalseBB)
addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
// If the lhs block is the next block, invert the condition so that we can
// fall through to the lhs instead of the rhs block.
if (CB.TrueBB == NextBlock(SwitchBB)) {
std::swap(CB.TrueBB, CB.FalseBB);
SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
}
SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
MVT::Other, getControlRoot(), Cond,
DAG.getBasicBlock(CB.TrueBB));
// Insert the false branch. Do this even if it's a fall through branch,
// this makes it easier to do DAG optimizations which require inverting
// the branch condition.
BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
DAG.getBasicBlock(CB.FalseBB));
DAG.setRoot(BrCond);
}
/// visitJumpTable - Emit JumpTable node in the current MBB
void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
// Emit the code for the jump table
assert(JT.Reg != -1U && "Should lower JT Header first!");
EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
JT.Reg, PTy);
SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
MVT::Other, Index.getValue(1),
Table, Index);
DAG.setRoot(BrJumpTable);
}
/// visitJumpTableHeader - This function emits necessary code to produce index
/// in the JumpTable from switch case.
void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
JumpTableHeader &JTH,
MachineBasicBlock *SwitchBB) {
SDLoc dl = getCurSDLoc();
// Subtract the lowest switch case value from the value being switched on and
// conditional branch to default mbb if the result is greater than the
// difference between smallest and largest cases.
SDValue SwitchOp = getValue(JTH.SValue);
EVT VT = SwitchOp.getValueType();
SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(JTH.First, dl, VT));
// The SDNode we just created, which holds the value being switched on minus
// the smallest case value, needs to be copied to a virtual register so it
// can be used as an index into the jump table in a subsequent basic block.
// This value may be smaller or larger than the target's pointer type, and
// therefore require extension or truncating.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
unsigned JumpTableReg =
FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
JumpTableReg, SwitchOp);
JT.Reg = JumpTableReg;
// Emit the range check for the jump table, and branch to the default block
// for the switch statement if the value being switched on exceeds the largest
// case in the switch.
SDValue CMP = DAG.getSetCC(
dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
Sub.getValueType()),
Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
MVT::Other, CopyTo, CMP,
DAG.getBasicBlock(JT.Default));
// Avoid emitting unnecessary branches to the next block.
if (JT.MBB != NextBlock(SwitchBB))
BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
DAG.getBasicBlock(JT.MBB));
DAG.setRoot(BrCond);
}
/// Codegen a new tail for a stack protector check ParentMBB which has had its
/// tail spliced into a stack protector check success bb.
///
/// For a high level explanation of how this fits into the stack protector
/// generation see the comment on the declaration of class
/// StackProtectorDescriptor.
void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
MachineBasicBlock *ParentBB) {
// First create the loads to the guard/stack slot for the comparison.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
int FI = MFI->getStackProtectorIndex();
const Value *IRGuard = SPD.getGuard();
SDValue GuardPtr = getValue(IRGuard);
SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
unsigned Align = DL->getPrefTypeAlignment(IRGuard->getType());
SDValue Guard;
SDLoc dl = getCurSDLoc();
// If GuardReg is set and useLoadStackGuardNode returns true, retrieve the
// guard value from the virtual register holding the value. Otherwise, emit a
// volatile load to retrieve the stack guard value.
unsigned GuardReg = SPD.getGuardReg();
if (GuardReg && TLI.useLoadStackGuardNode())
Guard = DAG.getCopyFromReg(DAG.getEntryNode(), dl, GuardReg,
PtrTy);
else
Guard = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(),
GuardPtr, MachinePointerInfo(IRGuard, 0),
true, false, false, Align);
SDValue StackSlot = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(),
StackSlotPtr,
MachinePointerInfo::getFixedStack(FI),
true, false, false, Align);
// Perform the comparison via a subtract/getsetcc.
EVT VT = Guard.getValueType();
SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot);
SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(),
Sub.getValueType()),
Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
// If the sub is not 0, then we know the guard/stackslot do not equal, so
// branch to failure MBB.
SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
MVT::Other, StackSlot.getOperand(0),
Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
// Otherwise branch to success MBB.
SDValue Br = DAG.getNode(ISD::BR, dl,
MVT::Other, BrCond,
DAG.getBasicBlock(SPD.getSuccessMBB()));
DAG.setRoot(Br);
}
/// Codegen the failure basic block for a stack protector check.
///
/// A failure stack protector machine basic block consists simply of a call to
/// __stack_chk_fail().
///
/// For a high level explanation of how this fits into the stack protector
/// generation see the comment on the declaration of class
/// StackProtectorDescriptor.
void
SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Chain =
TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
nullptr, 0, false, getCurSDLoc(), false, false).second;
DAG.setRoot(Chain);
}
/// visitBitTestHeader - This function emits necessary code to produce value
/// suitable for "bit tests"
void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
MachineBasicBlock *SwitchBB) {
SDLoc dl = getCurSDLoc();
// Subtract the minimum value
SDValue SwitchOp = getValue(B.SValue);
EVT VT = SwitchOp.getValueType();
SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(B.First, dl, VT));
// Check range
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue RangeCmp = DAG.getSetCC(
dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
Sub.getValueType()),
Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
// Determine the type of the test operands.
bool UsePtrType = false;
if (!TLI.isTypeLegal(VT))
UsePtrType = true;
else {
for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
// Switch table case range are encoded into series of masks.
// Just use pointer type, it's guaranteed to fit.
UsePtrType = true;
break;
}
}
if (UsePtrType) {
VT = TLI.getPointerTy(DAG.getDataLayout());
Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
}
B.RegVT = VT.getSimpleVT();
B.Reg = FuncInfo.CreateReg(B.RegVT);
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
MachineBasicBlock* MBB = B.Cases[0].ThisBB;
addSuccessorWithWeight(SwitchBB, B.Default);
addSuccessorWithWeight(SwitchBB, MBB);
SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
MVT::Other, CopyTo, RangeCmp,
DAG.getBasicBlock(B.Default));
// Avoid emitting unnecessary branches to the next block.
if (MBB != NextBlock(SwitchBB))
BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
DAG.getBasicBlock(MBB));
DAG.setRoot(BrRange);
}
/// visitBitTestCase - this function produces one "bit test"
void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
MachineBasicBlock* NextMBB,
uint32_t BranchWeightToNext,
unsigned Reg,
BitTestCase &B,
MachineBasicBlock *SwitchBB) {
SDLoc dl = getCurSDLoc();
MVT VT = BB.RegVT;
SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
SDValue Cmp;
unsigned PopCount = countPopulation(B.Mask);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (PopCount == 1) {
// Testing for a single bit; just compare the shift count with what it
// would need to be to shift a 1 bit in that position.
Cmp = DAG.getSetCC(
dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
ISD::SETEQ);
} else if (PopCount == BB.Range) {
// There is only one zero bit in the range, test for it directly.
Cmp = DAG.getSetCC(
dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
ISD::SETNE);
} else {
// Make desired shift
SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
DAG.getConstant(1, dl, VT), ShiftOp);
// Emit bit tests and jumps
SDValue AndOp = DAG.getNode(ISD::AND, dl,
VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
Cmp = DAG.getSetCC(
dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
}
// The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
// The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
MVT::Other, getControlRoot(),
Cmp, DAG.getBasicBlock(B.TargetBB));
// Avoid emitting unnecessary branches to the next block.
if (NextMBB != NextBlock(SwitchBB))
BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
DAG.getBasicBlock(NextMBB));
DAG.setRoot(BrAnd);
}
void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
// Retrieve successors.
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
const Value *Callee(I.getCalledValue());
const Function *Fn = dyn_cast<Function>(Callee);
if (isa<InlineAsm>(Callee))
visitInlineAsm(&I);
else if (Fn && Fn->isIntrinsic()) {
switch (Fn->getIntrinsicID()) {
default:
llvm_unreachable("Cannot invoke this intrinsic");
case Intrinsic::donothing:
// Ignore invokes to @llvm.donothing: jump directly to the next BB.
break;
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
visitPatchpoint(&I, LandingPad);
break;
case Intrinsic::experimental_gc_statepoint:
LowerStatepoint(ImmutableStatepoint(&I), LandingPad);
break;
}
} else
LowerCallTo(&I, getValue(Callee), false, LandingPad);
// If the value of the invoke is used outside of its defining block, make it
// available as a virtual register.
// We already took care of the exported value for the statepoint instruction
// during call to the LowerStatepoint.
if (!isStatepoint(I)) {
CopyToExportRegsIfNeeded(&I);
}
// Update successor info
addSuccessorWithWeight(InvokeMBB, Return);
addSuccessorWithWeight(InvokeMBB, LandingPad);
// Drop into normal successor.
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Return)));
}
void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
}
void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
assert(FuncInfo.MBB->isLandingPad() &&
"Call to landingpad not in landing pad!");
MachineBasicBlock *MBB = FuncInfo.MBB;
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
AddLandingPadInfo(LP, MMI, MBB);
// If there aren't registers to copy the values into (e.g., during SjLj
// exceptions), then don't bother to create these DAG nodes.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.getExceptionPointerRegister() == 0 &&
TLI.getExceptionSelectorRegister() == 0)
return;
SmallVector<EVT, 2> ValueVTs;
SDLoc dl = getCurSDLoc();
ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
// Get the two live-in registers as SDValues. The physregs have already been
// copied into virtual registers.
SDValue Ops[2];
if (FuncInfo.ExceptionPointerVirtReg) {
Ops[0] = DAG.getZExtOrTrunc(
DAG.getCopyFromReg(DAG.getEntryNode(), dl,
FuncInfo.ExceptionPointerVirtReg,
TLI.getPointerTy(DAG.getDataLayout())),
dl, ValueVTs[0]);
} else {
Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
}
Ops[1] = DAG.getZExtOrTrunc(
DAG.getCopyFromReg(DAG.getEntryNode(), dl,
FuncInfo.ExceptionSelectorVirtReg,
TLI.getPointerTy(DAG.getDataLayout())),
dl, ValueVTs[1]);
// Merge into one.
SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
DAG.getVTList(ValueVTs), Ops);
setValue(&LP, Res);
}
void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
#ifndef NDEBUG
for (const CaseCluster &CC : Clusters)
assert(CC.Low == CC.High && "Input clusters must be single-case");
#endif
std::sort(Clusters.begin(), Clusters.end(),
[](const CaseCluster &a, const CaseCluster &b) {
return a.Low->getValue().slt(b.Low->getValue());
});
// Merge adjacent clusters with the same destination.
const unsigned N = Clusters.size();
unsigned DstIndex = 0;
for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
CaseCluster &CC = Clusters[SrcIndex];
const ConstantInt *CaseVal = CC.Low;
MachineBasicBlock *Succ = CC.MBB;
if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
(CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
// If this case has the same successor and is a neighbour, merge it into
// the previous cluster.
Clusters[DstIndex - 1].High = CaseVal;
Clusters[DstIndex - 1].Weight += CC.Weight;
assert(Clusters[DstIndex - 1].Weight >= CC.Weight && "Weight overflow!");
} else {
std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
sizeof(Clusters[SrcIndex]));
}
}
Clusters.resize(DstIndex);
}
void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
MachineBasicBlock *Last) {
// Update JTCases.
for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
if (JTCases[i].first.HeaderBB == First)
JTCases[i].first.HeaderBB = Last;
// Update BitTestCases.
for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
if (BitTestCases[i].Parent == First)
BitTestCases[i].Parent = Last;
}
void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
// Update machine-CFG edges with unique successors.
SmallSet<BasicBlock*, 32> Done;
for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
BasicBlock *BB = I.getSuccessor(i);
bool Inserted = Done.insert(BB).second;
if (!Inserted)
continue;
MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
addSuccessorWithWeight(IndirectBrMBB, Succ);
}
DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
MVT::Other, getControlRoot(),
getValue(I.getAddress())));
}
void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
if (DAG.getTarget().Options.TrapUnreachable)
DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
}
void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
Type *Ty = I.getType();
if (isa<Constant>(I.getOperand(0)) &&
I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
SDValue Op2 = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
Op2.getValueType(), Op2));
return;
}
visitBinary(I, ISD::FSUB);
}
void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
bool nuw = false;
bool nsw = false;
bool exact = false;
FastMathFlags FMF;
if (const OverflowingBinaryOperator *OFBinOp =
dyn_cast<const OverflowingBinaryOperator>(&I)) {
nuw = OFBinOp->hasNoUnsignedWrap();
nsw = OFBinOp->hasNoSignedWrap();
}
if (const PossiblyExactOperator *ExactOp =
dyn_cast<const PossiblyExactOperator>(&I))
exact = ExactOp->isExact();
if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I))
FMF = FPOp->getFastMathFlags();
SDNodeFlags Flags;
Flags.setExact(exact);
Flags.setNoSignedWrap(nsw);
Flags.setNoUnsignedWrap(nuw);
if (EnableFMFInDAG) {
Flags.setAllowReciprocal(FMF.allowReciprocal());
Flags.setNoInfs(FMF.noInfs());
Flags.setNoNaNs(FMF.noNaNs());
Flags.setNoSignedZeros(FMF.noSignedZeros());
Flags.setUnsafeAlgebra(FMF.unsafeAlgebra());
}
SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
Op1, Op2, &Flags);
setValue(&I, BinNodeValue);
}
void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
Op2.getValueType(), DAG.getDataLayout());
// Coerce the shift amount to the right type if we can.
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
unsigned ShiftSize = ShiftTy.getSizeInBits();
unsigned Op2Size = Op2.getValueType().getSizeInBits();
SDLoc DL = getCurSDLoc();
// If the operand is smaller than the shift count type, promote it.
if (ShiftSize > Op2Size)
Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
// If the operand is larger than the shift count type but the shift
// count type has enough bits to represent any shift value, truncate
// it now. This is a common case and it exposes the truncate to
// optimization early.
else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
// Otherwise we'll need to temporarily settle for some other convenient
// type. Type legalization will make adjustments once the shiftee is split.
else
Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
}
bool nuw = false;
bool nsw = false;
bool exact = false;
if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
if (const OverflowingBinaryOperator *OFBinOp =
dyn_cast<const OverflowingBinaryOperator>(&I)) {
nuw = OFBinOp->hasNoUnsignedWrap();
nsw = OFBinOp->hasNoSignedWrap();
}
if (const PossiblyExactOperator *ExactOp =
dyn_cast<const PossiblyExactOperator>(&I))
exact = ExactOp->isExact();
}
SDNodeFlags Flags;
Flags.setExact(exact);
Flags.setNoSignedWrap(nsw);
Flags.setNoUnsignedWrap(nuw);
SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
&Flags);
setValue(&I, Res);
}
void SelectionDAGBuilder::visitSDiv(const User &I) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
SDNodeFlags Flags;
Flags.setExact(isa<PossiblyExactOperator>(&I) &&
cast<PossiblyExactOperator>(&I)->isExact());
setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
Op2, &Flags));
}
void SelectionDAGBuilder::visitICmp(const User &I) {
ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
predicate = IC->getPredicate();
else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
predicate = ICmpInst::Predicate(IC->getPredicate());
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode = getICmpCondCode(predicate);
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
}
void SelectionDAGBuilder::visitFCmp(const User &I) {
FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
predicate = FC->getPredicate();
else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
predicate = FCmpInst::Predicate(FC->getPredicate());
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Condition = getFCmpCondCode(predicate);
if (TM.Options.NoNaNsFPMath)
Condition = getFCmpCodeWithoutNaN(Condition);
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
}
void SelectionDAGBuilder::visitSelect(const User &I) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
SmallVector<SDValue, 4> Values(NumValues);
SDValue Cond = getValue(I.getOperand(0));
SDValue LHSVal = getValue(I.getOperand(1));
SDValue RHSVal = getValue(I.getOperand(2));
auto BaseOps = {Cond};
ISD::NodeType OpCode = Cond.getValueType().isVector() ?
ISD::VSELECT : ISD::SELECT;
// Min/max matching is only viable if all output VTs are the same.
if (std::equal(ValueVTs.begin(), ValueVTs.end(), ValueVTs.begin())) {
Value *LHS, *RHS;
SelectPatternFlavor SPF = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
ISD::NodeType Opc = ISD::DELETED_NODE;
switch (SPF) {
case SPF_UMAX: Opc = ISD::UMAX; break;
case SPF_UMIN: Opc = ISD::UMIN; break;
case SPF_SMAX: Opc = ISD::SMAX; break;
case SPF_SMIN: Opc = ISD::SMIN; break;
default: break;
}
EVT VT = ValueVTs[0];
LLVMContext &Ctx = *DAG.getContext();
auto &TLI = DAG.getTargetLoweringInfo();
while (TLI.getTypeAction(Ctx, VT) == TargetLoweringBase::TypeSplitVector)
VT = TLI.getTypeToTransformTo(Ctx, VT);
if (Opc != ISD::DELETED_NODE && TLI.isOperationLegalOrCustom(Opc, VT) &&
// If the underlying comparison instruction is used by any other instruction,
// the consumed instructions won't be destroyed, so it is not profitable
// to convert to a min/max.
cast<SelectInst>(&I)->getCondition()->hasOneUse()) {
OpCode = Opc;
LHSVal = getValue(LHS);
RHSVal = getValue(RHS);
BaseOps = {};
}
}
for (unsigned i = 0; i != NumValues; ++i) {
SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
Ops);
}
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
DAG.getVTList(ValueVTs), Values));
}
void SelectionDAGBuilder::visitTrunc(const User &I) {
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitZExt(const User &I) {
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitSExt(const User &I) {
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPTrunc(const User &I) {
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
SDLoc dl = getCurSDLoc();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
DAG.getTargetConstant(
0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
}
void SelectionDAGBuilder::visitFPExt(const User &I) {
// FPExt is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToUI(const User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToSI(const User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitUIToFP(const User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitSIToFP(const User &I) {
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitPtrToInt(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
void SelectionDAGBuilder::visitIntToPtr(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
void SelectionDAGBuilder::visitBitCast(const User &I) {
SDValue N = getValue(I.getOperand(0));
SDLoc dl = getCurSDLoc();
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
// BitCast assures us that source and destination are the same size so this is
// either a BITCAST or a no-op.
if (DestVT != N.getValueType())
setValue(&I, DAG.getNode(ISD::BITCAST, dl,
DestVT, N)); // convert types.
// Check if the original LLVM IR Operand was a ConstantInt, because getValue()
// might fold any kind of constant expression to an integer constant and that
// is not what we are looking for. Only regcognize a bitcast of a genuine
// constant integer as an opaque constant.
else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
/*isOpaque*/true));
else
setValue(&I, N); // noop cast.
}
void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const Value *SV = I.getOperand(0);
SDValue N = getValue(SV);
EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned SrcAS = SV->getType()->getPointerAddressSpace();
unsigned DestAS = I.getType()->getPointerAddressSpace();
if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
setValue(&I, N);
}
void SelectionDAGBuilder::visitInsertElement(const User &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue InVec = getValue(I.getOperand(0));
SDValue InVal = getValue(I.getOperand(1));
SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
TLI.getVectorIdxTy(DAG.getDataLayout()));
setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
TLI.getValueType(DAG.getDataLayout(), I.getType()),
InVec, InVal, InIdx));
}
void SelectionDAGBuilder::visitExtractElement(const User &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue InVec = getValue(I.getOperand(0));
SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
TLI.getVectorIdxTy(DAG.getDataLayout()));
setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
TLI.getValueType(DAG.getDataLayout(), I.getType()),
InVec, InIdx));
}
// Utility for visitShuffleVector - Return true if every element in Mask,
// beginning from position Pos and ending in Pos+Size, falls within the
// specified sequential range [L, L+Pos). or is undef.
static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
unsigned Pos, unsigned Size, int Low) {
for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
if (Mask[i] >= 0 && Mask[i] != Low)
return false;
return true;
}
void SelectionDAGBuilder::visitShuffleVector(const User &I) {
SDValue Src1 = getValue(I.getOperand(0));
SDValue Src2 = getValue(I.getOperand(1));
SmallVector<int, 8> Mask;
ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
unsigned MaskNumElts = Mask.size();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
EVT SrcVT = Src1.getValueType();
unsigned SrcNumElts = SrcVT.getVectorNumElements();
if (SrcNumElts == MaskNumElts) {
setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
&Mask[0]));
return;
}
// Normalize the shuffle vector since mask and vector length don't match.
if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
// Mask is longer than the source vectors and is a multiple of the source
// vectors. We can use concatenate vector to make the mask and vectors
// lengths match.
if (SrcNumElts*2 == MaskNumElts) {
// First check for Src1 in low and Src2 in high
if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
// The shuffle is concatenating two vectors together.
setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
VT, Src1, Src2));
return;
}
// Then check for Src2 in low and Src1 in high
if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
// The shuffle is concatenating two vectors together.
setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
VT, Src2, Src1));
return;
}
}
// Pad both vectors with undefs to make them the same length as the mask.
unsigned NumConcat = MaskNumElts / SrcNumElts;
bool Src1U = Src1.getOpcode() == ISD::UNDEF;
bool Src2U = Src2.getOpcode() == ISD::UNDEF;
SDValue UndefVal = DAG.getUNDEF(SrcVT);
SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
MOps1[0] = Src1;
MOps2[0] = Src2;
Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
getCurSDLoc(), VT, MOps1);
Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
getCurSDLoc(), VT, MOps2);
// Readjust mask for new input vector length.
SmallVector<int, 8> MappedOps;
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
if (Idx >= (int)SrcNumElts)
Idx -= SrcNumElts - MaskNumElts;
MappedOps.push_back(Idx);
}
setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
&MappedOps[0]));
return;
}
if (SrcNumElts > MaskNumElts) {
// Analyze the access pattern of the vector to see if we can extract
// two subvectors and do the shuffle. The analysis is done by calculating
// the range of elements the mask access on both vectors.
int MinRange[2] = { static_cast<int>(SrcNumElts),
static_cast<int>(SrcNumElts)};
int MaxRange[2] = {-1, -1};
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
unsigned Input = 0;
if (Idx < 0)
continue;
if (Idx >= (int)SrcNumElts) {
Input = 1;
Idx -= SrcNumElts;
}
if (Idx > MaxRange[Input])
MaxRange[Input] = Idx;
if (Idx < MinRange[Input])
MinRange[Input] = Idx;
}
// Check if the access is smaller than the vector size and can we find
// a reasonable extract index.
int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not
// Extract.
int StartIdx[2]; // StartIdx to extract from
for (unsigned Input = 0; Input < 2; ++Input) {
if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
RangeUse[Input] = 0; // Unused
StartIdx[Input] = 0;
continue;
}
// Find a good start index that is a multiple of the mask length. Then
// see if the rest of the elements are in range.
StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
StartIdx[Input] + MaskNumElts <= SrcNumElts)
RangeUse[Input] = 1; // Extract from a multiple of the mask length.
}
if (RangeUse[0] == 0 && RangeUse[1] == 0) {
setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
return;
}
if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
// Extract appropriate subvector and generate a vector shuffle
for (unsigned Input = 0; Input < 2; ++Input) {
SDValue &Src = Input == 0 ? Src1 : Src2;
if (RangeUse[Input] == 0)
Src = DAG.getUNDEF(VT);
else {
SDLoc dl = getCurSDLoc();
Src = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, VT, Src,
DAG.getConstant(StartIdx[Input], dl,
TLI.getVectorIdxTy(DAG.getDataLayout())));
}
}
// Calculate new mask.
SmallVector<int, 8> MappedOps;
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
if (Idx >= 0) {
if (Idx < (int)SrcNumElts)
Idx -= StartIdx[0];
else
Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
}
MappedOps.push_back(Idx);
}
setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
&MappedOps[0]));
return;
}
}
// We can't use either concat vectors or extract subvectors so fall back to
// replacing the shuffle with extract and build vector.
// to insert and build vector.
EVT EltVT = VT.getVectorElementType();
EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
SDLoc dl = getCurSDLoc();
SmallVector<SDValue,8> Ops;
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
SDValue Res;
if (Idx < 0) {
Res = DAG.getUNDEF(EltVT);
} else {
SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
EltVT, Src, DAG.getConstant(Idx, dl, IdxVT));
}
Ops.push_back(Res);
}
setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops));
}
void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
const Value *Op0 = I.getOperand(0);
const Value *Op1 = I.getOperand(1);
Type *AggTy = I.getType();
Type *ValTy = Op1->getType();
bool IntoUndef = isa<UndefValue>(Op0);
bool FromUndef = isa<UndefValue>(Op1);
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> AggValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
SmallVector<EVT, 4> ValValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
unsigned NumAggValues = AggValueVTs.size();
unsigned NumValValues = ValValueVTs.size();
SmallVector<SDValue, 4> Values(NumAggValues);
// Ignore an insertvalue that produces an empty object
if (!NumAggValues) {
setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
return;
}
SDValue Agg = getValue(Op0);
unsigned i = 0;
// Copy the beginning value(s) from the original aggregate.
for (; i != LinearIndex; ++i)
Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
SDValue(Agg.getNode(), Agg.getResNo() + i);
// Copy values from the inserted value(s).
if (NumValValues) {
SDValue Val = getValue(Op1);
for (; i != LinearIndex + NumValValues; ++i)
Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
}
// Copy remaining value(s) from the original aggregate.
for (; i != NumAggValues; ++i)
Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
SDValue(Agg.getNode(), Agg.getResNo() + i);
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
DAG.getVTList(AggValueVTs), Values));
}
void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
const Value *Op0 = I.getOperand(0);
Type *AggTy = Op0->getType();
Type *ValTy = I.getType();
bool OutOfUndef = isa<UndefValue>(Op0);
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> ValValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
unsigned NumValValues = ValValueVTs.size();
// Ignore a extractvalue that produces an empty object
if (!NumValValues) {
setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
return;
}
SmallVector<SDValue, 4> Values(NumValValues);
SDValue Agg = getValue(Op0);
// Copy out the selected value(s).
for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
Values[i - LinearIndex] =
OutOfUndef ?
DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
SDValue(Agg.getNode(), Agg.getResNo() + i);
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
DAG.getVTList(ValValueVTs), Values));
}
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
Value *Op0 = I.getOperand(0);
// Note that the pointer operand may be a vector of pointers. Take the scalar
// element which holds a pointer.
Type *Ty = Op0->getType()->getScalarType();
unsigned AS = Ty->getPointerAddressSpace();
SDValue N = getValue(Op0);
SDLoc dl = getCurSDLoc();
// Normalize Vector GEP - all scalar operands should be converted to the
// splat vector.
unsigned VectorWidth = I.getType()->isVectorTy() ?
cast<VectorType>(I.getType())->getVectorNumElements() : 0;
if (VectorWidth && !N.getValueType().isVector()) {
MVT VT = MVT::getVectorVT(N.getValueType().getSimpleVT(), VectorWidth);
SmallVector<SDValue, 16> Ops(VectorWidth, N);
N = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
const Value *Idx = *OI;
if (StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
if (Field) {
// N = N + Offset
uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
DAG.getConstant(Offset, dl, N.getValueType()));
}
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
MVT PtrTy =
DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS);
unsigned PtrSize = PtrTy.getSizeInBits();
APInt ElementSize(PtrSize, DL->getTypeAllocSize(Ty));
// If this is a scalar constant or a splat vector of constants,
// handle it quickly.
const auto *CI = dyn_cast<ConstantInt>(Idx);
if (!CI && isa<ConstantDataVector>(Idx) &&
cast<ConstantDataVector>(Idx)->getSplatValue())
CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
if (CI) {
if (CI->isZero())
continue;
APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
SDValue OffsVal = VectorWidth ?
DAG.getConstant(Offs, dl, MVT::getVectorVT(PtrTy, VectorWidth)) :
DAG.getConstant(Offs, dl, PtrTy);
N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal);
continue;
}
// N = N + Idx * ElementSize;
SDValue IdxN = getValue(Idx);
if (!IdxN.getValueType().isVector() && VectorWidth) {
MVT VT = MVT::getVectorVT(IdxN.getValueType().getSimpleVT(), VectorWidth);
SmallVector<SDValue, 16> Ops(VectorWidth, IdxN);
IdxN = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
// If the index is smaller or larger than intptr_t, truncate or extend
// it.
IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
// If this is a multiply by a power of two, turn it into a shl
// immediately. This is a very common case.
if (ElementSize != 1) {
if (ElementSize.isPowerOf2()) {
unsigned Amt = ElementSize.logBase2();
IdxN = DAG.getNode(ISD::SHL, dl,
N.getValueType(), IdxN,
DAG.getConstant(Amt, dl, IdxN.getValueType()));
} else {
SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
IdxN = DAG.getNode(ISD::MUL, dl,
N.getValueType(), IdxN, Scale);
}
}
N = DAG.getNode(ISD::ADD, dl,
N.getValueType(), N, IdxN);
}
}
setValue(&I, N);
}
void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
// If this is a fixed sized alloca in the entry block of the function,
// allocate it statically on the stack.
if (FuncInfo.StaticAllocaMap.count(&I))
return; // getValue will auto-populate this.
SDLoc dl = getCurSDLoc();
Type *Ty = I.getAllocatedType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
auto &DL = DAG.getDataLayout();
uint64_t TySize = DL.getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
SDValue AllocSize = getValue(I.getArraySize());
EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout());
if (AllocSize.getValueType() != IntPtr)
AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
AllocSize,
DAG.getConstant(TySize, dl, IntPtr));
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
// the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
unsigned StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlignment();
if (Align <= StackAlign)
Align = 0;
// Round the size of the allocation up to the stack alignment size
// by add SA-1 to the size.
AllocSize = DAG.getNode(ISD::ADD, dl,
AllocSize.getValueType(), AllocSize,
DAG.getIntPtrConstant(StackAlign - 1, dl));
// Mask out the low bits for alignment purposes.
AllocSize = DAG.getNode(ISD::AND, dl,
AllocSize.getValueType(), AllocSize,
DAG.getIntPtrConstant(~(uint64_t)(StackAlign - 1),
dl));
SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align, dl) };
SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
setValue(&I, DSA);
DAG.setRoot(DSA.getValue(1));
assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects());
}
void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
if (I.isAtomic())
return visitAtomicLoad(I);
const Value *SV = I.getOperand(0);
SDValue Ptr = getValue(SV);
Type *Ty = I.getType();
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
// The IR notion of invariant_load only guarantees that all *non-faulting*
// invariant loads result in the same value. The MI notion of invariant load
// guarantees that the load can be legally moved to any location within its
// containing function. The MI notion of invariant_load is stronger than the
// IR notion of invariant_load -- an MI invariant_load is an IR invariant_load
// with a guarantee that the location being loaded from is dereferenceable
// throughout the function's lifetime.
bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr &&
isDereferenceablePointer(SV, *DAG.getTarget().getDataLayout());
unsigned Alignment = I.getAlignment();
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
SDValue Root;
bool ConstantMemory = false;
if (isVolatile || NumValues > MaxParallelChains)
// Serialize volatile loads with other side effects.
Root = getRoot();
else if (AA->pointsToConstantMemory(
MemoryLocation(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
Root = DAG.getEntryNode();
ConstantMemory = true;
} else {
// Do not serialize non-volatile loads against each other.
Root = DAG.getRoot();
}
SDLoc dl = getCurSDLoc();
if (isVolatile)
Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
EVT PtrVT = Ptr.getValueType();
unsigned ChainI = 0;
for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
// Serializing loads here may result in excessive register pressure, and
// TokenFactor places arbitrary choke points on the scheduler. SD scheduling
// could recover a bit by hoisting nodes upward in the chain by recognizing
// they are side-effect free or do not alias. The optimizer should really
// avoid this case by converting large object/array copies to llvm.memcpy
// (MaxParallelChains should always remain as failsafe).
if (ChainI == MaxParallelChains) {
assert(PendingLoads.empty() && "PendingLoads must be serialized first");
SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
makeArrayRef(Chains.data(), ChainI));
Root = Chain;
ChainI = 0;
}
SDValue A = DAG.getNode(ISD::ADD, dl,
PtrVT, Ptr,
DAG.getConstant(Offsets[i], dl, PtrVT));
SDValue L = DAG.getLoad(ValueVTs[i], dl, Root,
A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
isNonTemporal, isInvariant, Alignment, AAInfo,
Ranges);
Values[i] = L;
Chains[ChainI] = L.getValue(1);
}
if (!ConstantMemory) {
SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
makeArrayRef(Chains.data(), ChainI));
if (isVolatile)
DAG.setRoot(Chain);
else
PendingLoads.push_back(Chain);
}
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
DAG.getVTList(ValueVTs), Values));
}
void SelectionDAGBuilder::visitStore(const StoreInst &I) {
if (I.isAtomic())
return visitAtomicStore(I);
const Value *SrcV = I.getOperand(0);
const Value *PtrV = I.getOperand(1);
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
SrcV->getType(), ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
// Get the lowered operands. Note that we do this after
// checking if NumResults is zero, because with zero results
// the operands won't have values in the map.
SDValue Src = getValue(SrcV);
SDValue Ptr = getValue(PtrV);
SDValue Root = getRoot();
SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
EVT PtrVT = Ptr.getValueType();
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
unsigned Alignment = I.getAlignment();
SDLoc dl = getCurSDLoc();
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
unsigned ChainI = 0;
for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
// See visitLoad comments.
if (ChainI == MaxParallelChains) {
SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
makeArrayRef(Chains.data(), ChainI));
Root = Chain;
ChainI = 0;
}
SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
DAG.getConstant(Offsets[i], dl, PtrVT));
SDValue St = DAG.getStore(Root, dl,
SDValue(Src.getNode(), Src.getResNo() + i),
Add, MachinePointerInfo(PtrV, Offsets[i]),
isVolatile, isNonTemporal, Alignment, AAInfo);
Chains[ChainI] = St;
}
SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
makeArrayRef(Chains.data(), ChainI));
DAG.setRoot(StoreNode);
}
void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) {
SDLoc sdl = getCurSDLoc();
// llvm.masked.store.*(Src0, Ptr, alignemt, Mask)
Value *PtrOperand = I.getArgOperand(1);
SDValue Ptr = getValue(PtrOperand);
SDValue Src0 = getValue(I.getArgOperand(0));
SDValue Mask = getValue(I.getArgOperand(3));
EVT VT = Src0.getValueType();
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(PtrOperand),
MachineMemOperand::MOStore, VT.getStoreSize(),
Alignment, AAInfo);
SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
MMO, false);
DAG.setRoot(StoreNode);
setValue(&I, StoreNode);
}
// Gather/scatter receive a vector of pointers.
// This vector of pointers may be represented as a base pointer + vector of
// indices, it depends on GEP and instruction preceeding GEP
// that calculates indices
static bool getUniformBase(Value *& Ptr, SDValue& Base, SDValue& Index,
SelectionDAGBuilder* SDB) {
assert (Ptr->getType()->isVectorTy() && "Uexpected pointer type");
GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
if (!Gep || Gep->getNumOperands() > 2)
return false;
ShuffleVectorInst *ShuffleInst =
dyn_cast<ShuffleVectorInst>(Gep->getPointerOperand());
if (!ShuffleInst || !ShuffleInst->getMask()->isNullValue() ||
cast<Instruction>(ShuffleInst->getOperand(0))->getOpcode() !=
Instruction::InsertElement)
return false;
Ptr = cast<InsertElementInst>(ShuffleInst->getOperand(0))->getOperand(1);
SelectionDAG& DAG = SDB->DAG;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Check is the Ptr is inside current basic block
// If not, look for the shuffle instruction
if (SDB->findValue(Ptr))
Base = SDB->getValue(Ptr);
else if (SDB->findValue(ShuffleInst)) {
SDValue ShuffleNode = SDB->getValue(ShuffleInst);
SDLoc sdl = ShuffleNode;
Base = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, sdl,
ShuffleNode.getValueType().getScalarType(), ShuffleNode,
DAG.getConstant(0, sdl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDB->setValue(Ptr, Base);
}
else
return false;
Value *IndexVal = Gep->getOperand(1);
if (SDB->findValue(IndexVal)) {
Index = SDB->getValue(IndexVal);
if (SExtInst* Sext = dyn_cast<SExtInst>(IndexVal)) {
IndexVal = Sext->getOperand(0);
if (SDB->findValue(IndexVal))
Index = SDB->getValue(IndexVal);
}
return true;
}
return false;
}
void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
SDLoc sdl = getCurSDLoc();
// llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
Value *Ptr = I.getArgOperand(1);
SDValue Src0 = getValue(I.getArgOperand(0));
SDValue Mask = getValue(I.getArgOperand(3));
EVT VT = Src0.getValueType();
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
SDValue Base;
SDValue Index;
Value *BasePtr = Ptr;
bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
MachineMemOperand::MOStore, VT.getStoreSize(),
Alignment, AAInfo);
if (!UniformBase) {
Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
}
SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index };
SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
Ops, MMO);
DAG.setRoot(Scatter);
setValue(&I, Scatter);
}
void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
SDLoc sdl = getCurSDLoc();
// @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
Value *PtrOperand = I.getArgOperand(0);
SDValue Ptr = getValue(PtrOperand);
SDValue Src0 = getValue(I.getArgOperand(3));
SDValue Mask = getValue(I.getArgOperand(2));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SDValue InChain = DAG.getRoot();
if (AA->pointsToConstantMemory(MemoryLocation(
PtrOperand, AA->getTypeStoreSize(I.getType()), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
InChain = DAG.getEntryNode();
}
MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(PtrOperand),
MachineMemOperand::MOLoad, VT.getStoreSize(),
Alignment, AAInfo, Ranges);
SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
ISD::NON_EXTLOAD);
SDValue OutChain = Load.getValue(1);
DAG.setRoot(OutChain);
setValue(&I, Load);
}
void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
SDLoc sdl = getCurSDLoc();
// @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
Value *Ptr = I.getArgOperand(0);
SDValue Src0 = getValue(I.getArgOperand(3));
SDValue Mask = getValue(I.getArgOperand(2));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SDValue Root = DAG.getRoot();
SDValue Base;
SDValue Index;
Value *BasePtr = Ptr;
bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
bool ConstantMemory = false;
if (UniformBase &&
AA->pointsToConstantMemory(
MemoryLocation(BasePtr, AA->getTypeStoreSize(I.getType()), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
Root = DAG.getEntryNode();
ConstantMemory = true;
}
MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
MachineMemOperand::MOLoad, VT.getStoreSize(),
Alignment, AAInfo, Ranges);
if (!UniformBase) {
Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
}
SDValue Ops[] = { Root, Src0, Mask, Base, Index };
SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO);
SDValue OutChain = Gather.getValue(1);
if (!ConstantMemory)
PendingLoads.push_back(OutChain);
setValue(&I, Gather);
}
void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering SuccessOrder = I.getSuccessOrdering();
AtomicOrdering FailureOrder = I.getFailureOrdering();
SynchronizationScope Scope = I.getSynchScope();
SDValue InChain = getRoot();
MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
SDValue L = DAG.getAtomicCmpSwap(
ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
/*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope);
SDValue OutChain = L.getValue(2);
setValue(&I, L);
DAG.setRoot(OutChain);
}
void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
SDLoc dl = getCurSDLoc();
ISD::NodeType NT;
switch (I.getOperation()) {
default: llvm_unreachable("Unknown atomicrmw operation");
case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
}
AtomicOrdering Order = I.getOrdering();
SynchronizationScope Scope = I.getSynchScope();
SDValue InChain = getRoot();
SDValue L =
DAG.getAtomic(NT, dl,
getValue(I.getValOperand()).getSimpleValueType(),
InChain,
getValue(I.getPointerOperand()),
getValue(I.getValOperand()),
I.getPointerOperand(),
/* Alignment=*/ 0, Order, Scope);
SDValue OutChain = L.getValue(1);
setValue(&I, L);
DAG.setRoot(OutChain);
}
void SelectionDAGBuilder::visitFence(const FenceInst &I) {
SDLoc dl = getCurSDLoc();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Ops[3];
Ops[0] = getRoot();
Ops[1] = DAG.getConstant(I.getOrdering(), dl,
TLI.getPointerTy(DAG.getDataLayout()));
Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
TLI.getPointerTy(DAG.getDataLayout()));
DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
}
void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering Order = I.getOrdering();
SynchronizationScope Scope = I.getSynchScope();
SDValue InChain = getRoot();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic load");
MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
MachineMemOperand::MOVolatile |
MachineMemOperand::MOLoad,
VT.getStoreSize(),
I.getAlignment() ? I.getAlignment() :
DAG.getEVTAlignment(VT));
InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
SDValue L =
DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
getValue(I.getPointerOperand()), MMO,
Order, Scope);
SDValue OutChain = L.getValue(1);
setValue(&I, L);
DAG.setRoot(OutChain);
}
void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering Order = I.getOrdering();
SynchronizationScope Scope = I.getSynchScope();
SDValue InChain = getRoot();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT =
TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic store");
SDValue OutChain =
DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
InChain,
getValue(I.getPointerOperand()),
getValue(I.getValueOperand()),
I.getPointerOperand(), I.getAlignment(),
Order, Scope);
DAG.setRoot(OutChain);
}
/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
/// node.
void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
unsigned Intrinsic) {
bool HasChain = !I.doesNotAccessMemory();
bool OnlyLoad = HasChain && I.onlyReadsMemory();
// Build the operand list.
SmallVector<SDValue, 8> Ops;
if (HasChain) { // If this intrinsic has side-effects, chainify it.
if (OnlyLoad) {
// We don't need to serialize loads against other loads.
Ops.push_back(DAG.getRoot());
} else {
Ops.push_back(getRoot());
}
}
// Info is set by getTgtMemInstrinsic
TargetLowering::IntrinsicInfo Info;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
// Add the intrinsic ID as an integer operand if it's not a target intrinsic.
if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
Info.opc == ISD::INTRINSIC_W_CHAIN)
Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
TLI.getPointerTy(DAG.getDataLayout())));
// Add all operands of the call to the operand list.
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
SDValue Op = getValue(I.getArgOperand(i));
Ops.push_back(Op);
}
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
if (HasChain)
ValueVTs.push_back(MVT::Other);
SDVTList VTs = DAG.getVTList(ValueVTs);
// Create the node.
SDValue Result;
if (IsTgtIntrinsic) {
// This is target intrinsic that touches memory
Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
VTs, Ops, Info.memVT,
MachinePointerInfo(Info.ptrVal, Info.offset),
Info.align, Info.vol,
Info.readMem, Info.writeMem, Info.size);
} else if (!HasChain) {
Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
} else if (!I.getType()->isVoidTy()) {
Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
} else {
Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
}
if (HasChain) {
SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
if (OnlyLoad)
PendingLoads.push_back(Chain);
else
DAG.setRoot(Chain);
}
if (!I.getType()->isVoidTy()) {
if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
}
setValue(&I, Result);
}
}
/// GetSignificand - Get the significand and build it into a floating-point
/// number with exponent of 1:
///
/// Op = (Op & 0x007fffff) | 0x3f800000;
///
/// where Op is the hexadecimal representation of floating point value.
static SDValue
GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) {
SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
DAG.getConstant(0x007fffff, dl, MVT::i32));
SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
DAG.getConstant(0x3f800000, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
}
/// GetExponent - Get the exponent:
///
/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
///
/// where Op is the hexadecimal representation of floating point value.
static SDValue
GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
SDLoc dl) {
SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
DAG.getConstant(0x7f800000, dl, MVT::i32));
SDValue t1 = DAG.getNode(
ISD::SRL, dl, MVT::i32, t0,
DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
DAG.getConstant(127, dl, MVT::i32));
return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
}
/// getF32Constant - Get 32-bit floating point constant.
static SDValue
getF32Constant(SelectionDAG &DAG, unsigned Flt, SDLoc dl) {
return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)), dl,
MVT::f32);
}
static SDValue getLimitedPrecisionExp2(SDValue t0, SDLoc dl,
SelectionDAG &DAG) {
// IntegerPartOfX = ((int32_t)(t0);
SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
// FractionalPartOfX = t0 - (float)IntegerPartOfX;
SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
// IntegerPartOfX <<= 23;
IntegerPartOfX = DAG.getNode(
ISD::SHL, dl, MVT::i32, IntegerPartOfX,
DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
DAG.getDataLayout())));
SDValue TwoToFractionalPartOfX;
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
//
// TwoToFractionalPartOfX =
// 0.997535578f +
// (0.735607626f + 0.252464424f * x) * x;
//
// error 0.0144103317, which is 6 bits
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0x3e814304, dl));
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3f3c50c8, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f7f5e7e, dl));
} else if (LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
//
// TwoToFractionalPartOfX =
// 0.999892986f +
// (0.696457318f +
// (0.224338339f + 0.792043434e-1f * x) * x) * x;
//
// error 0.000107046256, which is 13 to 14 bits
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0x3da235e3, dl));
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3e65b8f3, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f324b07, dl));
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3f7ff8fd, dl));
} else { // LimitFloatPrecision <= 18
// For floating-point precision of 18:
//
// TwoToFractionalPartOfX =
// 0.999999982f +
// (0.693148872f +
// (0.240227044f +
// (0.554906021e-1f +
// (0.961591928e-2f +
// (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
// error 2.47208000*10^(-7), which is better than 18 bits
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0x3924b03e, dl));
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3ab24b87, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3c1d8c17, dl));
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3d634a1d, dl));
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
getF32Constant(DAG, 0x3e75fe14, dl));
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
getF32Constant(DAG, 0x3f317234, dl));
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000, dl));
}
// Add the exponent into the result in integer domain.
SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
}
/// expandExp - Lower an exp intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
// Put the exponent in the right bit position for later addition to the
// final result:
//
// #define LOG2OFe 1.4426950f
// t0 = Op * LOG2OFe
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
getF32Constant(DAG, 0x3fb8aa3b, dl));
return getLimitedPrecisionExp2(t0, dl, DAG);
}
// No special expansion.
return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
}
/// expandLog - Lower a log intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Scale the exponent by log(2) [0.69314718f].
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
getF32Constant(DAG, 0x3f317218, dl));
// Get the significand and build it into a floating-point number with
// exponent of 1.
SDValue X = GetSignificand(DAG, Op1, dl);
SDValue LogOfMantissa;
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
//
// LogofMantissa =
// -1.1609546f +
// (1.4034025f - 0.23903021f * x) * x;
//
// error 0.0034276066, which is better than 8 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0xbe74c456, dl));
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3fb3a2b1, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3f949a29, dl));
} else if (LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
//
// LogOfMantissa =
// -1.7417939f +
// (2.8212026f +
// (-1.4699568f +
// (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
//
// error 0.000061011436, which is 14 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0xbd67b6d6, dl));
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3ee4f4b8, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3fbc278b, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x40348e95, dl));
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3fdef31a, dl));
} else { // LimitFloatPrecision <= 18
// For floating-point precision of 18:
//
// LogOfMantissa =
// -2.1072184f +
// (4.2372794f +
// (-3.7029485f +
// (2.2781945f +
// (-0.87823314f +
// (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
//
// error 0.0000023660568, which is better than 18 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0xbc91e5ac, dl));
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3e4350aa, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3f60d3e3, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x4011cdf0, dl));
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
getF32Constant(DAG, 0x406cfd1c, dl));
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
getF32Constant(DAG, 0x408797cb, dl));
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
getF32Constant(DAG, 0x4006dcab, dl));
}
return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
}
// No special expansion.
return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
}
/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Get the exponent.
SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
// Get the significand and build it into a floating-point number with
// exponent of 1.
SDValue X = GetSignificand(DAG, Op1, dl);
// Different possible minimax approximations of significand in
// floating-point for various degrees of accuracy over [1,2].
SDValue Log2ofMantissa;
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
//
// Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
//
// error 0.0049451742, which is more than 7 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0xbeb08fe0, dl));
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
getF32Constant(DAG, 0x40019463, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3fd6633d, dl));
} else if (LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
//
// Log2ofMantissa =
// -2.51285454f +
// (4.07009056f +
// (-2.12067489f +
// (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
//
// error 0.0000876136000, which is better than 13 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0xbda7262e, dl));
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3f25280b, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x4007b923, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x40823e2f, dl));
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
getF32Constant(DAG, 0x4020d29c, dl));
} else { // LimitFloatPrecision <= 18
// For floating-point precision of 18:
//
// Log2ofMantissa =
// -3.0400495f +
// (6.1129976f +
// (-5.3420409f +
// (3.2865683f +
// (-1.2669343f +
// (0.27515199f -
// 0.25691327e-1f * x) * x) * x) * x) * x) * x;
//
// error 0.0000018516, which is better than 18 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0xbcd2769e, dl));
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3e8ce0b9, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3fa22ae7, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x40525723, dl));
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
getF32Constant(DAG, 0x40aaf200, dl));
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
getF32Constant(DAG, 0x40c39dad, dl));
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
getF32Constant(DAG, 0x4042902c, dl));
}
return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
}
// No special expansion.
return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
}
/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Scale the exponent by log10(2) [0.30102999f].
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
getF32Constant(DAG, 0x3e9a209a, dl));
// Get the significand and build it into a floating-point number with
// exponent of 1.
SDValue X = GetSignificand(DAG, Op1, dl);
SDValue Log10ofMantissa;
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
//
// Log10ofMantissa =
// -0.50419619f +
// (0.60948995f - 0.10380950f * x) * x;
//
// error 0.0014886165, which is 6 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0xbdd49a13, dl));
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3f1c0789, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3f011300, dl));
} else if (LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
//
// Log10ofMantissa =
// -0.64831180f +
// (0.91751397f +
// (-0.31664806f + 0.47637168e-1f * x) * x) * x;
//
// error 0.00019228036, which is better than 12 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0x3d431f31, dl));
SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3ea21fb2, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3f6ae232, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f25f7c3, dl));
} else { // LimitFloatPrecision <= 18
// For floating-point precision of 18:
//
// Log10ofMantissa =
// -0.84299375f +
// (1.5327582f +
// (-1.0688956f +
// (0.49102474f +
// (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
//
// error 0.0000037995730, which is better than 18 bits
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
getF32Constant(DAG, 0x3c5d51ce, dl));
SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
getF32Constant(DAG, 0x3e00685a, dl));
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3efb6798, dl));
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f88d192, dl));
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3fc4316c, dl));
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
getF32Constant(DAG, 0x3f57ce70, dl));
}
return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
}
// No special expansion.
return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
}
/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
return getLimitedPrecisionExp2(Op, dl, DAG);
// No special expansion.
return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
}
/// visitPow - Lower a pow intrinsic. Handles the special sequences for
/// limited-precision mode with x == 10.0f.
static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
SelectionDAG &DAG, const TargetLowering &TLI) {
bool IsExp10 = false;
if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
APFloat Ten(10.0f);
IsExp10 = LHSC->isExactlyValue(Ten);
}
}
if (IsExp10) {
// Put the exponent in the right bit position for later addition to the
// final result:
//
// #define LOG2OF10 3.3219281f
// t0 = Op * LOG2OF10;
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
getF32Constant(DAG, 0x40549a78, dl));
return getLimitedPrecisionExp2(t0, dl, DAG);
}
// No special expansion.
return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
}
/// ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
SelectionDAG &DAG) {
// If RHS is a constant, we can expand this out to a multiplication tree,
// otherwise we end up lowering to a call to __powidf2 (for example). When
// optimizing for size, we only want to do this if the expansion would produce
// a small number of multiplies, otherwise we do the full expansion.
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
// Get the exponent as a positive value.
unsigned Val = RHSC->getSExtValue();
if ((int)Val < 0) Val = -Val;
// powi(x, 0) -> 1.0
if (Val == 0)
return DAG.getConstantFP(1.0, DL, LHS.getValueType());
const Function *F = DAG.getMachineFunction().getFunction();
if (!F->hasFnAttribute(Attribute::OptimizeForSize) ||
// If optimizing for size, don't insert too many multiplies. This
// inserts up to 5 multiplies.
countPopulation(Val) + Log2_32(Val) < 7) {
// We use the simple binary decomposition method to generate the multiply
// sequence. There are more optimal ways to do this (for example,
// powi(x,15) generates one more multiply than it should), but this has
// the benefit of being both really simple and much better than a libcall.
SDValue Res; // Logically starts equal to 1.0
SDValue CurSquare = LHS;
while (Val) {
if (Val & 1) {
if (Res.getNode())
Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
else
Res = CurSquare; // 1.0*CurSquare.
}
CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
CurSquare, CurSquare);
Val >>= 1;
}
// If the original was negative, invert the result, producing 1/(x*x*x).
if (RHSC->getSExtValue() < 0)
Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
return Res;
}
}
// Otherwise, expand to a libcall.
return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
}
// getTruncatedArgReg - Find underlying register used for an truncated
// argument.
static unsigned getTruncatedArgReg(const SDValue &N) {
if (N.getOpcode() != ISD::TRUNCATE)
return 0;
const SDValue &Ext = N.getOperand(0);
if (Ext.getOpcode() == ISD::AssertZext ||
Ext.getOpcode() == ISD::AssertSext) {
const SDValue &CFR = Ext.getOperand(0);
if (CFR.getOpcode() == ISD::CopyFromReg)
return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
if (CFR.getOpcode() == ISD::TRUNCATE)
return getTruncatedArgReg(CFR);
}
return 0;
}
/// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
/// argument, create the corresponding DBG_VALUE machine instruction for it now.
/// At the end of instruction selection, they will be inserted to the entry BB.
bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
const Value *V, DILocalVariable *Variable, DIExpression *Expr,
DILocation *DL, int64_t Offset, bool IsIndirect, const SDValue &N) {
const Argument *Arg = dyn_cast<Argument>(V);
if (!Arg)
return false;
MachineFunction &MF = DAG.getMachineFunction();
const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
// Ignore inlined function arguments here.
//
// FIXME: Should we be checking DL->inlinedAt() to determine this?
if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction()))
return false;
Optional<MachineOperand> Op;
// Some arguments' frame index is recorded during argument lowering.
if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
Op = MachineOperand::CreateFI(FI);
if (!Op && N.getNode()) {
unsigned Reg;
if (N.getOpcode() == ISD::CopyFromReg)
Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
else
Reg = getTruncatedArgReg(N);
if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
MachineRegisterInfo &RegInfo = MF.getRegInfo();
unsigned PR = RegInfo.getLiveInPhysReg(Reg);
if (PR)
Reg = PR;
}
if (Reg)
Op = MachineOperand::CreateReg(Reg, false);
}
if (!Op) {
// Check if ValueMap has reg number.
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
if (VMI != FuncInfo.ValueMap.end())
Op = MachineOperand::CreateReg(VMI->second, false);
}
if (!Op && N.getNode())
// Check if frame index is available.
if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
if (FrameIndexSDNode *FINode =
dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
Op = MachineOperand::CreateFI(FINode->getIndex());
if (!Op)
return false;
assert(Variable->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
if (Op->isReg())
FuncInfo.ArgDbgValues.push_back(
BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
Op->getReg(), Offset, Variable, Expr));
else
FuncInfo.ArgDbgValues.push_back(
BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE))
.addOperand(*Op)
.addImm(Offset)
.addMetadata(Variable)
.addMetadata(Expr));
return true;
}
// VisualStudio defines setjmp as _setjmp
#if defined(_MSC_VER) && defined(setjmp) && \
!defined(setjmp_undefined_for_msvc)
# pragma push_macro("setjmp")
# undef setjmp
# define setjmp_undefined_for_msvc
#endif
/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
/// we want to emit this as a call to a named external function, return the name
/// otherwise lower it and return null.
const char *
SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDLoc sdl = getCurSDLoc();
DebugLoc dl = getCurDebugLoc();
SDValue Res;
switch (Intrinsic) {
default:
// By default, turn this into a target intrinsic node.
visitTargetIntrinsic(I, Intrinsic);
return nullptr;
case Intrinsic::vastart: visitVAStart(I); return nullptr;
case Intrinsic::vaend: visitVAEnd(I); return nullptr;
case Intrinsic::vacopy: visitVACopy(I); return nullptr;
case Intrinsic::returnaddress:
setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
TLI.getPointerTy(DAG.getDataLayout()),
getValue(I.getArgOperand(0))));
return nullptr;
case Intrinsic::frameaddress:
setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
TLI.getPointerTy(DAG.getDataLayout()),
getValue(I.getArgOperand(0))));
return nullptr;
case Intrinsic::read_register: {
Value *Reg = I.getArgOperand(0);
SDValue Chain = getRoot();
SDValue RegName =
DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
Res = DAG.getNode(ISD::READ_REGISTER, sdl,
DAG.getVTList(VT, MVT::Other), Chain, RegName);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
return nullptr;
}
case Intrinsic::write_register: {
Value *Reg = I.getArgOperand(0);
Value *RegValue = I.getArgOperand(1);
SDValue Chain = getRoot();
SDValue RegName =
DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
RegName, getValue(RegValue)));
return nullptr;
}
case Intrinsic::setjmp:
return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
case Intrinsic::longjmp:
return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
case Intrinsic::memcpy: {
// FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
< 256 &&
cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
< 256 &&
"Unknown address space");
SDValue Op1 = getValue(I.getArgOperand(0));
SDValue Op2 = getValue(I.getArgOperand(1));
SDValue Op3 = getValue(I.getArgOperand(2));
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
if (!Align)
Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
false, isTC,
MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1)));
updateDAGForMaybeTailCall(MC);
return nullptr;
}
case Intrinsic::memset: {
// FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
< 256 &&
"Unknown address space");
SDValue Op1 = getValue(I.getArgOperand(0));
SDValue Op2 = getValue(I.getArgOperand(1));
SDValue Op3 = getValue(I.getArgOperand(2));
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
if (!Align)
Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
isTC, MachinePointerInfo(I.getArgOperand(0)));
updateDAGForMaybeTailCall(MS);
return nullptr;
}
case Intrinsic::memmove: {
// FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
< 256 &&
cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
< 256 &&
"Unknown address space");
SDValue Op1 = getValue(I.getArgOperand(0));
SDValue Op2 = getValue(I.getArgOperand(1));
SDValue Op3 = getValue(I.getArgOperand(2));
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
if (!Align)
Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
isTC, MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1)));
updateDAGForMaybeTailCall(MM);
return nullptr;
}
case Intrinsic::dbg_declare: {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
DILocalVariable *Variable = DI.getVariable();
DIExpression *Expression = DI.getExpression();
const Value *Address = DI.getAddress();
assert(Variable && "Missing variable");
if (!Address) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return nullptr;
}
// Check if address has undef value.
if (isa<UndefValue>(Address) ||
(Address->use_empty() && !isa<Argument>(Address))) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return nullptr;
}
SDValue &N = NodeMap[Address];
if (!N.getNode() && isa<Argument>(Address))
// Check unused arguments map.
N = UnusedArgNodeMap[Address];
SDDbgValue *SDV;
if (N.getNode()) {
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
Address = BCI->getOperand(0);
// Parameters are handled specially.
bool isParameter = Variable->getTag() == dwarf::DW_TAG_arg_variable ||
isa<Argument>(Address);
const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
if (isParameter && !AI) {
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
if (FINode)
// Byval parameter. We have a frame index at this point.
SDV = DAG.getFrameIndexDbgValue(
Variable, Expression, FINode->getIndex(), 0, dl, SDNodeOrder);
else {
// Address is an argument, so try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
N);
return nullptr;
}
} else if (AI)
SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
true, 0, dl, SDNodeOrder);
else {
// Can't do anything with other non-AI cases yet.
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
DEBUG(Address->dump());
return nullptr;
}
DAG.AddDbgValue(SDV, N.getNode(), isParameter);
} else {
// If Address is an argument then try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
N)) {
// If variable is pinned by a alloca in dominating bb then
// use StaticAllocaMap.
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
if (AI->getParent() != DI.getParent()) {
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
SDV = DAG.getFrameIndexDbgValue(Variable, Expression, SI->second,
0, dl, SDNodeOrder);
DAG.AddDbgValue(SDV, nullptr, false);
return nullptr;
}
}
}
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
return nullptr;
}
case Intrinsic::dbg_value: {
const DbgValueInst &DI = cast<DbgValueInst>(I);
assert(DI.getVariable() && "Missing variable");
DILocalVariable *Variable = DI.getVariable();
DIExpression *Expression = DI.getExpression();
uint64_t Offset = DI.getOffset();
const Value *V = DI.getValue();
if (!V)
return nullptr;
SDDbgValue *SDV;
if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
SDNodeOrder);
DAG.AddDbgValue(SDV, nullptr, false);
} else {
// Do not use getValue() in here; we don't want to generate code at
// this point if it hasn't been done yet.
SDValue N = NodeMap[V];
if (!N.getNode() && isa<Argument>(V))
// Check unused arguments map.
N = UnusedArgNodeMap[V];
if (N.getNode()) {
// A dbg.value for an alloca is always indirect.
bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset,
IsIndirect, N)) {
SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
IsIndirect, Offset, dl, SDNodeOrder);
DAG.AddDbgValue(SDV, N.getNode(), false);
}
} else if (!V->use_empty() ) {
// Do not call getValue(V) yet, as we don't want to generate code.
// Remember it for later.
DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
DanglingDebugInfoMap[V] = DDI;
} else {
// We may expand this to cover more cases. One case where we have no
// data available is an unreferenced parameter.
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
// Build a debug info table entry.
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
V = BCI->getOperand(0);
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
// Don't handle byval struct arguments or VLAs, for example.
if (!AI) {
DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
return nullptr;
}
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI == FuncInfo.StaticAllocaMap.end())
return nullptr; // VLAs.
return nullptr;
}
case Intrinsic::eh_typeid_for: {
// Find the type id for the given typeinfo.
GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
Res = DAG.getConstant(TypeID, sdl, MVT::i32);
setValue(&I, Res);
return nullptr;
}
case Intrinsic::eh_return_i32:
case Intrinsic::eh_return_i64:
DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
MVT::Other,
getControlRoot(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
return nullptr;
case Intrinsic::eh_unwind_init:
DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
return nullptr;
case Intrinsic::eh_dwarf_cfa: {
SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
TLI.getPointerTy(DAG.getDataLayout()));
SDValue Offset = DAG.getNode(ISD::ADD, sdl,
CfaArg.getValueType(),
DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl,
CfaArg.getValueType()),
CfaArg);
SDValue FA = DAG.getNode(
ISD::FRAMEADDR, sdl, TLI.getPointerTy(DAG.getDataLayout()),
DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(),
FA, Offset));
return nullptr;
}
case Intrinsic::eh_sjlj_callsite: {
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
MMI.setCurrentCallSite(CI->getZExtValue());
return nullptr;
}
case Intrinsic::eh_sjlj_functioncontext: {
// Get and store the index of the function context.
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
AllocaInst *FnCtx =
cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
int FI = FuncInfo.StaticAllocaMap[FnCtx];
MFI->setFunctionContextIndex(FI);
return nullptr;
}
case Intrinsic::eh_sjlj_setjmp: {
SDValue Ops[2];
Ops[0] = getRoot();
Ops[1] = getValue(I.getArgOperand(0));
SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
DAG.getVTList(MVT::i32, MVT::Other), Ops);
setValue(&I, Op.getValue(0));
DAG.setRoot(Op.getValue(1));
return nullptr;
}
case Intrinsic::eh_sjlj_longjmp: {
DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
getRoot(), getValue(I.getArgOperand(0))));
return nullptr;
}
case Intrinsic::masked_gather:
visitMaskedGather(I);
return nullptr;
case Intrinsic::masked_load:
visitMaskedLoad(I);
return nullptr;
case Intrinsic::masked_scatter:
visitMaskedScatter(I);
return nullptr;
case Intrinsic::masked_store:
visitMaskedStore(I);
return nullptr;
#if 0 // HLSL Change - remove platform intrinsics
case Intrinsic::x86_mmx_pslli_w:
case Intrinsic::x86_mmx_pslli_d:
case Intrinsic::x86_mmx_pslli_q:
case Intrinsic::x86_mmx_psrli_w:
case Intrinsic::x86_mmx_psrli_d:
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psrai_w:
case Intrinsic::x86_mmx_psrai_d: {
SDValue ShAmt = getValue(I.getArgOperand(1));
if (isa<ConstantSDNode>(ShAmt)) {
visitTargetIntrinsic(I, Intrinsic);
return nullptr;
}
unsigned NewIntrinsic = 0;
EVT ShAmtVT = MVT::v2i32;
switch (Intrinsic) {
case Intrinsic::x86_mmx_pslli_w:
NewIntrinsic = Intrinsic::x86_mmx_psll_w;
break;
case Intrinsic::x86_mmx_pslli_d:
NewIntrinsic = Intrinsic::x86_mmx_psll_d;
break;
case Intrinsic::x86_mmx_pslli_q:
NewIntrinsic = Intrinsic::x86_mmx_psll_q;
break;
case Intrinsic::x86_mmx_psrli_w:
NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
break;
case Intrinsic::x86_mmx_psrli_d:
NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
break;
case Intrinsic::x86_mmx_psrli_q:
NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
break;
case Intrinsic::x86_mmx_psrai_w:
NewIntrinsic = Intrinsic::x86_mmx_psra_w;
break;
case Intrinsic::x86_mmx_psrai_d:
NewIntrinsic = Intrinsic::x86_mmx_psra_d;
break;
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
}
// The vector shift intrinsics with scalars uses 32b shift amounts but
// the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
// to be zero.
// We must do this early because v2i32 is not a legal type.
SDValue ShOps[2];
ShOps[0] = ShAmt;
ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps);
EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
getValue(I.getArgOperand(0)), ShAmt);
setValue(&I, Res);
return nullptr;
}
#endif // HLSL Change - remove platform intrinsics
case Intrinsic::convertff:
case Intrinsic::convertfsi:
case Intrinsic::convertfui:
case Intrinsic::convertsif:
case Intrinsic::convertuif:
case Intrinsic::convertss:
case Intrinsic::convertsu:
case Intrinsic::convertus:
case Intrinsic::convertuu: {
ISD::CvtCode Code = ISD::CVT_INVALID;
switch (Intrinsic) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::convertff: Code = ISD::CVT_FF; break;
case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
case Intrinsic::convertss: Code = ISD::CVT_SS; break;
case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
case Intrinsic::convertus: Code = ISD::CVT_US; break;
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
}
EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
const Value *Op1 = I.getArgOperand(0);
Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1),
DAG.getValueType(DestVT),
DAG.getValueType(getValue(Op1).getValueType()),
getValue(I.getArgOperand(1)),
getValue(I.getArgOperand(2)),
Code);
setValue(&I, Res);
return nullptr;
}
case Intrinsic::powi:
setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)), DAG));
return nullptr;
case Intrinsic::log:
setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
return nullptr;
case Intrinsic::log2:
setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
return nullptr;
case Intrinsic::log10:
setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
return nullptr;
case Intrinsic::exp:
setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
return nullptr;
case Intrinsic::exp2:
setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
return nullptr;
case Intrinsic::pow:
setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)), DAG, TLI));
return nullptr;
case Intrinsic::sqrt:
case Intrinsic::fabs:
case Intrinsic::sin:
case Intrinsic::cos:
case Intrinsic::floor:
case Intrinsic::ceil:
case Intrinsic::trunc:
case Intrinsic::rint:
case Intrinsic::nearbyint:
case Intrinsic::round: {
unsigned Opcode;
switch (Intrinsic) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
case Intrinsic::fabs: Opcode = ISD::FABS; break;
case Intrinsic::sin: Opcode = ISD::FSIN; break;
case Intrinsic::cos: Opcode = ISD::FCOS; break;
case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
case Intrinsic::rint: Opcode = ISD::FRINT; break;
case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
case Intrinsic::round: Opcode = ISD::FROUND; break;
}
setValue(&I, DAG.getNode(Opcode, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0))));
return nullptr;
}
case Intrinsic::minnum:
setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
return nullptr;
case Intrinsic::maxnum:
setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
return nullptr;
case Intrinsic::copysign:
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
return nullptr;
case Intrinsic::fma:
setValue(&I, DAG.getNode(ISD::FMA, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
getValue(I.getArgOperand(2))));
return nullptr;
case Intrinsic::fmuladd: {
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
TLI.isFMAFasterThanFMulAndFAdd(VT)) {
setValue(&I, DAG.getNode(ISD::FMA, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
getValue(I.getArgOperand(2))));
} else {
SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)));
SDValue Add = DAG.getNode(ISD::FADD, sdl,
getValue(I.getArgOperand(0)).getValueType(),
Mul,
getValue(I.getArgOperand(2)));
setValue(&I, Add);
}
return nullptr;
}
case Intrinsic::convert_to_fp16:
setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
getValue(I.getArgOperand(0)),
DAG.getTargetConstant(0, sdl,
MVT::i32))));
return nullptr;
case Intrinsic::convert_from_fp16:
setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
TLI.getValueType(DAG.getDataLayout(), I.getType()),
DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
getValue(I.getArgOperand(0)))));
return nullptr;
case Intrinsic::pcmarker: {
SDValue Tmp = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
return nullptr;
}
case Intrinsic::readcyclecounter: {
SDValue Op = getRoot();
Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
DAG.getVTList(MVT::i64, MVT::Other), Op);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
return nullptr;
}
case Intrinsic::bswap:
setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0))));
return nullptr;
case Intrinsic::cttz: {
SDValue Arg = getValue(I.getArgOperand(0));
ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
sdl, Ty, Arg));
return nullptr;
}
case Intrinsic::ctlz: {
SDValue Arg = getValue(I.getArgOperand(0));
ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
sdl, Ty, Arg));
return nullptr;
}
case Intrinsic::ctpop: {
SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
return nullptr;
}
case Intrinsic::stacksave: {
SDValue Op = getRoot();
Res = DAG.getNode(
ISD::STACKSAVE, sdl,
DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
return nullptr;
}
case Intrinsic::stackrestore: {
Res = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
return nullptr;
}
case Intrinsic::stackprotector: {
// Emit code into the DAG to store the stack guard onto the stack.
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
SDValue Src, Chain = getRoot();
const Value *Ptr = cast<LoadInst>(I.getArgOperand(0))->getPointerOperand();
const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr);
// See if Ptr is a bitcast. If it is, look through it and see if we can get
// global variable __stack_chk_guard.
if (!GV)
if (const Operator *BC = dyn_cast<Operator>(Ptr))
if (BC->getOpcode() == Instruction::BitCast)
GV = dyn_cast<GlobalVariable>(BC->getOperand(0));
if (GV && TLI.useLoadStackGuardNode()) {
// Emit a LOAD_STACK_GUARD node.
MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD,
sdl, PtrTy, Chain);
MachinePointerInfo MPInfo(GV);
MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
unsigned Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOInvariant;
*MemRefs = MF.getMachineMemOperand(MPInfo, Flags,
PtrTy.getSizeInBits() / 8,
DAG.getEVTAlignment(PtrTy));
Node->setMemRefs(MemRefs, MemRefs + 1);
// Copy the guard value to a virtual register so that it can be
// retrieved in the epilogue.
Src = SDValue(Node, 0);
const TargetRegisterClass *RC =
TLI.getRegClassFor(Src.getSimpleValueType());
unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
SPDescriptor.setGuardReg(Reg);
Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src);
} else {
Src = getValue(I.getArgOperand(0)); // The guard's value.
}
AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
int FI = FuncInfo.StaticAllocaMap[Slot];
MFI->setStackProtectorIndex(FI);
SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
// Store the stack protector onto the stack.
Res = DAG.getStore(Chain, sdl, Src, FIN,
MachinePointerInfo::getFixedStack(FI),
true, false, 0);
setValue(&I, Res);
DAG.setRoot(Res);
return nullptr;
}
case Intrinsic::objectsize: {
// If we don't know by now, we're never going to know.
ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
assert(CI && "Non-constant type in __builtin_object_size?");
SDValue Arg = getValue(I.getCalledValue());
EVT Ty = Arg.getValueType();
if (CI->isZero())
Res = DAG.getConstant(-1ULL, sdl, Ty);
else
Res = DAG.getConstant(0, sdl, Ty);
setValue(&I, Res);
return nullptr;
}
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
return nullptr;
case Intrinsic::assume:
case Intrinsic::var_annotation:
// Discard annotate attributes and assumptions
return nullptr;
case Intrinsic::init_trampoline: {
const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
SDValue Ops[6];
Ops[0] = getRoot();
Ops[1] = getValue(I.getArgOperand(0));
Ops[2] = getValue(I.getArgOperand(1));
Ops[3] = getValue(I.getArgOperand(2));
Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
Ops[5] = DAG.getSrcValue(F);
Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
DAG.setRoot(Res);
return nullptr;
}
case Intrinsic::adjust_trampoline: {
setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
TLI.getPointerTy(DAG.getDataLayout()),
getValue(I.getArgOperand(0))));
return nullptr;
}
case Intrinsic::gcroot:
if (GFI) {
const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
GFI->addStackRoot(FI->getIndex(), TypeMap);
}
return nullptr;
case Intrinsic::gcread:
case Intrinsic::gcwrite:
llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
case Intrinsic::flt_rounds:
setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
return nullptr;
case Intrinsic::expect: {
// Just replace __builtin_expect(exp, c) with EXP.
setValue(&I, getValue(I.getArgOperand(0)));
return nullptr;
}
case Intrinsic::debugtrap:
case Intrinsic::trap: {
StringRef TrapFuncName =
I.getAttributes()
.getAttribute(AttributeSet::FunctionIndex, "trap-func-name")
.getValueAsString();
if (TrapFuncName.empty()) {
ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
ISD::TRAP : ISD::DEBUGTRAP;
DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
return nullptr;
}
TargetLowering::ArgListTy Args;
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(sdl).setChain(getRoot()).setCallee(
CallingConv::C, I.getType(),
DAG.getExternalSymbol(TrapFuncName.data(),
TLI.getPointerTy(DAG.getDataLayout())),
std::move(Args), 0);
std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
DAG.setRoot(Result.second);
return nullptr;
}
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow: {
ISD::NodeType Op;
switch (Intrinsic) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
}
SDValue Op1 = getValue(I.getArgOperand(0));
SDValue Op2 = getValue(I.getArgOperand(1));
SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
return nullptr;
}
case Intrinsic::prefetch: {
SDValue Ops[5];
unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
Ops[0] = getRoot();
Ops[1] = getValue(I.getArgOperand(0));
Ops[2] = getValue(I.getArgOperand(1));
Ops[3] = getValue(I.getArgOperand(2));
Ops[4] = getValue(I.getArgOperand(3));
DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
DAG.getVTList(MVT::Other), Ops,
EVT::getIntegerVT(*Context, 8),
MachinePointerInfo(I.getArgOperand(0)),
0, /* align */
false, /* volatile */
rw==0, /* read */
rw==1)); /* write */
return nullptr;
}
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
// Stack coloring is not enabled in O0, discard region information.
if (TM.getOptLevel() == CodeGenOpt::None)
return nullptr;
SmallVector<Value *, 4> Allocas;
GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
// Could not find an Alloca.
if (!LifetimeObject)
continue;
// First check that the Alloca is static, otherwise it won't have a
// valid frame index.
auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
if (SI == FuncInfo.StaticAllocaMap.end())
return nullptr;
int FI = SI->second;
SDValue Ops[2];
Ops[0] = getRoot();
Ops[1] =
DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()), true);
unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
DAG.setRoot(Res);
}
return nullptr;
}
case Intrinsic::invariant_start:
// Discard region information.
setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
return nullptr;
case Intrinsic::invariant_end:
// Discard region information.
return nullptr;
case Intrinsic::stackprotectorcheck: {
// Do not actually emit anything for this basic block. Instead we initialize
// the stack protector descriptor and export the guard variable so we can
// access it in FinishBasicBlock.
const BasicBlock *BB = I.getParent();
SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I);
ExportFromCurrentBlock(SPDescriptor.getGuard());
// Flush our exports since we are going to process a terminator.
(void)getControlRoot();
return nullptr;
}
case Intrinsic::clear_cache:
return TLI.getClearCacheBuiltinName();
case Intrinsic::eh_actions:
setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
return nullptr;
case Intrinsic::donothing:
// ignore
return nullptr;
case Intrinsic::experimental_stackmap: {
visitStackmap(I);
return nullptr;
}
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64: {
visitPatchpoint(&I);
return nullptr;
}
case Intrinsic::experimental_gc_statepoint: {
visitStatepoint(I);
return nullptr;
}
case Intrinsic::experimental_gc_result_int:
case Intrinsic::experimental_gc_result_float:
case Intrinsic::experimental_gc_result_ptr:
case Intrinsic::experimental_gc_result: {
visitGCResult(I);
return nullptr;
}
case Intrinsic::experimental_gc_relocate: {
visitGCRelocate(I);
return nullptr;
}
case Intrinsic::instrprof_increment:
llvm_unreachable("instrprof failed to lower an increment");
case Intrinsic::localescape: {
MachineFunction &MF = DAG.getMachineFunction();
const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
// Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
// is the same on all targets.
for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
if (isa<ConstantPointerNull>(Arg))
continue; // Skip null pointers. They represent a hole in index space.
AllocaInst *Slot = cast<AllocaInst>(Arg);
assert(FuncInfo.StaticAllocaMap.count(Slot) &&
"can only escape static allocas");
int FI = FuncInfo.StaticAllocaMap[Slot];
MCSymbol *FrameAllocSym =
MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
GlobalValue::getRealLinkageName(MF.getName()), Idx);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
TII->get(TargetOpcode::LOCAL_ESCAPE))
.addSym(FrameAllocSym)
.addFrameIndex(FI);
}
return nullptr;
}
case Intrinsic::localrecover: {
// i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
MachineFunction &MF = DAG.getMachineFunction();
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
// Get the symbol that defines the frame offset.
auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
unsigned IdxVal = unsigned(Idx->getLimitedValue(INT_MAX));
MCSymbol *FrameAllocSym =
MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
GlobalValue::getRealLinkageName(Fn->getName()), IdxVal);
// Create a MCSymbol for the label to avoid any target lowering
// that would make this PC relative.
SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
SDValue OffsetVal =
DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
// Add the offset to the FP.
Value *FP = I.getArgOperand(1);
SDValue FPVal = getValue(FP);
SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
setValue(&I, Add);
return nullptr;
}
case Intrinsic::eh_begincatch:
case Intrinsic::eh_endcatch:
llvm_unreachable("begin/end catch intrinsics not lowered in codegen");
case Intrinsic::eh_exceptioncode: {
unsigned Reg = TLI.getExceptionPointerRegister();
assert(Reg && "cannot get exception code on this platform");
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
assert(FuncInfo.MBB->isLandingPad() && "eh.exceptioncode in non-lpad");
unsigned VReg = FuncInfo.MBB->addLiveIn(Reg, PtrRC);
SDValue N =
DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
setValue(&I, N);
return nullptr;
}
}
}
std::pair<SDValue, SDValue>
SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
MachineBasicBlock *LandingPad) {
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
MCSymbol *BeginLabel = nullptr;
if (LandingPad) {
// Insert a label before the invoke call to mark the try range. This can be
// used to detect deletion of the invoke via the MachineModuleInfo.
BeginLabel = MMI.getContext().createTempSymbol();
// For SjLj, keep track of which landing pads go with which invokes
// so as to maintain the ordering of pads in the LSDA.
unsigned CallSiteIndex = MMI.getCurrentCallSite();
if (CallSiteIndex) {
MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex);
// Now that the call site is handled, stop tracking it.
MMI.setCurrentCallSite(0);
}
// Both PendingLoads and PendingExports must be flushed here;
// this call might not return.
(void)getRoot();
DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
CLI.setChain(getRoot());
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
assert((CLI.IsTailCall || Result.second.getNode()) &&
"Non-null chain expected with non-tail call!");
assert((Result.second.getNode() || !Result.first.getNode()) &&
"Null value expected with tail call!");
if (!Result.second.getNode()) {
// As a special case, a null chain means that a tail call has been emitted
// and the DAG root is already updated.
HasTailCall = true;
// Since there's no actual continuation from this block, nothing can be
// relying on us setting vregs for them.
PendingExports.clear();
} else {
DAG.setRoot(Result.second);
}
if (LandingPad) {
// Insert a label at the end of the invoke call to mark the try range. This
// can be used to detect deletion of the invoke via the MachineModuleInfo.
MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
// Inform MachineModuleInfo of range.
MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
}
return Result;
}
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Type *RetTy = FTy->getReturnType();
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Args.reserve(CS.arg_size());
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
const Value *V = *i;
// Skip empty types
if (V->getType()->isEmptyTy())
continue;
SDValue ArgNode = getValue(V);
Entry.Node = ArgNode; Entry.Ty = V->getType();
// Skip the first return-type Attribute to get to params.
Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
Args.push_back(Entry);
// If we have an explicit sret argument that is an Instruction, (i.e., it
// might point to function-local memory), we can't meaningfully tail-call.
if (Entry.isSRet && isa<Instruction>(V))
isTailCall = false;
}
// Check if target-independent constraints permit a tail call here.
// Target-dependent constraints are checked within TLI->LowerCallTo.
if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
isTailCall = false;
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
.setCallee(RetTy, FTy, Callee, std::move(Args), CS)
.setTailCall(isTailCall);
std::pair<SDValue,SDValue> Result = lowerInvokable(CLI, LandingPad);
if (Result.first.getNode())
setValue(CS.getInstruction(), Result.first);
}
/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
/// value is equal or not-equal to zero.
static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
for (const User *U : V->users()) {
if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
if (IC->isEquality())
if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
if (C->isNullValue())
continue;
// Unknown instruction.
return false;
}
return true;
}
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
Type *LoadTy,
SelectionDAGBuilder &Builder) {
// Check to see if this load can be trivially constant folded, e.g. if the
// input is from a string literal.
if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
// Cast pointer to the type we really want to load.
LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
PointerType::getUnqual(LoadTy));
if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
const_cast<Constant *>(LoadInput), *Builder.DL))
return Builder.getValue(LoadCst);
}
// Otherwise, we have to emit the load. If the pointer is to unfoldable but
// still constant memory, the input chain can be the entry node.
SDValue Root;
bool ConstantMemory = false;
// Do not serialize (non-volatile) loads of constant memory with anything.
if (Builder.AA->pointsToConstantMemory(PtrVal)) {
Root = Builder.DAG.getEntryNode();
ConstantMemory = true;
} else {
// Do not serialize non-volatile loads against each other.
Root = Builder.DAG.getRoot();
}
SDValue Ptr = Builder.getValue(PtrVal);
SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
Ptr, MachinePointerInfo(PtrVal),
false /*volatile*/,
false /*nontemporal*/,
false /*isinvariant*/, 1 /* align=1 */);
if (!ConstantMemory)
Builder.PendingLoads.push_back(LoadVal.getValue(1));
return LoadVal;
}
/// processIntegerCallValue - Record the value for an instruction that
/// produces an integer result, converting the type where necessary.
void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
SDValue Value,
bool IsSigned) {
EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType(), true);
if (IsSigned)
Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
else
Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
setValue(&I, Value);
}
/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
/// If so, return true and lower it, otherwise return false and it will be
/// lowered like a normal call.
bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
// Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
if (I.getNumArgOperands() != 3)
return false;
const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
!I.getArgOperand(2)->getType()->isIntegerTy() ||
!I.getType()->isIntegerTy())
return false;
const Value *Size = I.getArgOperand(2);
const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
if (CSize && CSize->getZExtValue() == 0) {
EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType(), true);
setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
return true;
}
const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
std::pair<SDValue, SDValue> Res =
TSI.EmitTargetCodeForMemcmp(DAG, getCurSDLoc(), DAG.getRoot(),
getValue(LHS), getValue(RHS), getValue(Size),
MachinePointerInfo(LHS),
MachinePointerInfo(RHS));
if (Res.first.getNode()) {
processIntegerCallValue(I, Res.first, true);
PendingLoads.push_back(Res.second);
return true;
}
// memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
// memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
if (CSize && IsOnlyUsedInZeroEqualityComparison(&I)) {
bool ActuallyDoIt = true;
MVT LoadVT;
Type *LoadTy;
switch (CSize->getZExtValue()) {
default:
LoadVT = MVT::Other;
LoadTy = nullptr;
ActuallyDoIt = false;
break;
case 2:
LoadVT = MVT::i16;
LoadTy = Type::getInt16Ty(CSize->getContext());
break;
case 4:
LoadVT = MVT::i32;
LoadTy = Type::getInt32Ty(CSize->getContext());
break;
case 8:
LoadVT = MVT::i64;
LoadTy = Type::getInt64Ty(CSize->getContext());
break;
/*
case 16:
LoadVT = MVT::v4i32;
LoadTy = Type::getInt32Ty(CSize->getContext());
LoadTy = VectorType::get(LoadTy, 4);
break;
*/
}
// This turns into unaligned loads. We only do this if the target natively
// supports the MVT we'll be loading or if it is small enough (<= 4) that
// we'll only produce a small number of byte loads.
// Require that we can find a legal MVT, and only do this if the target
// supports unaligned loads of that type. Expanding into byte loads would
// bloat the code.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (ActuallyDoIt && CSize->getZExtValue() > 4) {
unsigned DstAS = LHS->getType()->getPointerAddressSpace();
unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
// TODO: Handle 5 byte compare as 4-byte + 1 byte.
// TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
// TODO: Check alignment of src and dest ptrs.
if (!TLI.isTypeLegal(LoadVT) ||
!TLI.allowsMisalignedMemoryAccesses(LoadVT, SrcAS) ||
!TLI.allowsMisalignedMemoryAccesses(LoadVT, DstAS))
ActuallyDoIt = false;
}
if (ActuallyDoIt) {
SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal,
ISD::SETNE);
processIntegerCallValue(I, Res, false);
return true;
}
}
return false;
}
/// visitMemChrCall -- See if we can lower a memchr call into an optimized
/// form. If so, return true and lower it, otherwise return false and it
/// will be lowered like a normal call.
bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
// Verify that the prototype makes sense. void *memchr(void *, int, size_t)
if (I.getNumArgOperands() != 3)
return false;
const Value *Src = I.getArgOperand(0);
const Value *Char = I.getArgOperand(1);
const Value *Length = I.getArgOperand(2);
if (!Src->getType()->isPointerTy() ||
!Char->getType()->isIntegerTy() ||
!Length->getType()->isIntegerTy() ||
!I.getType()->isPointerTy())
return false;
const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
std::pair<SDValue, SDValue> Res =
TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
getValue(Src), getValue(Char), getValue(Length),
MachinePointerInfo(Src));
if (Res.first.getNode()) {
setValue(&I, Res.first);
PendingLoads.push_back(Res.second);
return true;
}
return false;
}
/// visitStrCpyCall -- See if we can lower a strcpy or stpcpy call into an
/// optimized form. If so, return true and lower it, otherwise return false
/// and it will be lowered like a normal call.
bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
// Verify that the prototype makes sense. char *strcpy(char *, char *)
if (I.getNumArgOperands() != 2)
return false;
const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
if (!Arg0->getType()->isPointerTy() ||
!Arg1->getType()->isPointerTy() ||
!I.getType()->isPointerTy())
return false;
const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
std::pair<SDValue, SDValue> Res =
TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
getValue(Arg0), getValue(Arg1),
MachinePointerInfo(Arg0),
MachinePointerInfo(Arg1), isStpcpy);
if (Res.first.getNode()) {
setValue(&I, Res.first);
DAG.setRoot(Res.second);
return true;
}
return false;
}
/// visitStrCmpCall - See if we can lower a call to strcmp in an optimized form.
/// If so, return true and lower it, otherwise return false and it will be
/// lowered like a normal call.
bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
// Verify that the prototype makes sense. int strcmp(void*,void*)
if (I.getNumArgOperands() != 2)
return false;
const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
if (!Arg0->getType()->isPointerTy() ||
!Arg1->getType()->isPointerTy() ||
!I.getType()->isIntegerTy())
return false;
const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
std::pair<SDValue, SDValue> Res =
TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
getValue(Arg0), getValue(Arg1),
MachinePointerInfo(Arg0),
MachinePointerInfo(Arg1));
if (Res.first.getNode()) {
processIntegerCallValue(I, Res.first, true);
PendingLoads.push_back(Res.second);
return true;
}
return false;
}
/// visitStrLenCall -- See if we can lower a strlen call into an optimized
/// form. If so, return true and lower it, otherwise return false and it
/// will be lowered like a normal call.
bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
// Verify that the prototype makes sense. size_t strlen(char *)
if (I.getNumArgOperands() != 1)
return false;
const Value *Arg0 = I.getArgOperand(0);
if (!Arg0->getType()->isPointerTy() || !I.getType()->isIntegerTy())
return false;
const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
std::pair<SDValue, SDValue> Res =
TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
getValue(Arg0), MachinePointerInfo(Arg0));
if (Res.first.getNode()) {
processIntegerCallValue(I, Res.first, false);
PendingLoads.push_back(Res.second);
return true;
}
return false;
}
/// visitStrNLenCall -- See if we can lower a strnlen call into an optimized
/// form. If so, return true and lower it, otherwise return false and it
/// will be lowered like a normal call.
bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
// Verify that the prototype makes sense. size_t strnlen(char *, size_t)
if (I.getNumArgOperands() != 2)
return false;
const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
if (!Arg0->getType()->isPointerTy() ||
!Arg1->getType()->isIntegerTy() ||
!I.getType()->isIntegerTy())
return false;
const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
std::pair<SDValue, SDValue> Res =
TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
getValue(Arg0), getValue(Arg1),
MachinePointerInfo(Arg0));
if (Res.first.getNode()) {
processIntegerCallValue(I, Res.first, false);
PendingLoads.push_back(Res.second);
return true;
}
return false;
}
/// visitUnaryFloatCall - If a call instruction is a unary floating-point
/// operation (as expected), translate it to an SDNode with the specified opcode
/// and return true.
bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
unsigned Opcode) {
// Sanity check that it really is a unary floating-point call.
if (I.getNumArgOperands() != 1 ||
!I.getArgOperand(0)->getType()->isFloatingPointTy() ||
I.getType() != I.getArgOperand(0)->getType() ||
!I.onlyReadsMemory())
return false;
SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
return true;
}
/// visitBinaryFloatCall - If a call instruction is a binary floating-point
/// operation (as expected), translate it to an SDNode with the specified opcode
/// and return true.
bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
unsigned Opcode) {
// Sanity check that it really is a binary floating-point call.
if (I.getNumArgOperands() != 2 ||
!I.getArgOperand(0)->getType()->isFloatingPointTy() ||
I.getType() != I.getArgOperand(0)->getType() ||
I.getType() != I.getArgOperand(1)->getType() ||
!I.onlyReadsMemory())
return false;
SDValue Tmp0 = getValue(I.getArgOperand(0));
SDValue Tmp1 = getValue(I.getArgOperand(1));
EVT VT = Tmp0.getValueType();
setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
return true;
}
void SelectionDAGBuilder::visitCall(const CallInst &I) {
// Handle inline assembly differently.
if (isa<InlineAsm>(I.getCalledValue())) {
visitInlineAsm(&I);
return;
}
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
ComputeUsesVAFloatArgument(I, &MMI);
const char *RenameFn = nullptr;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
if (unsigned IID = II->getIntrinsicID(F)) {
RenameFn = visitIntrinsicCall(I, IID);
if (!RenameFn)
return;
}
}
if (Intrinsic::ID IID = F->getIntrinsicID()) {
RenameFn = visitIntrinsicCall(I, IID);
if (!RenameFn)
return;
}
}
// Check for well-known libc/libm calls. If the function is internal, it
// can't be a library call.
LibFunc::Func Func;
if (!F->hasLocalLinkage() && F->hasName() &&
LibInfo->getLibFunc(F->getName(), Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
switch (Func) {
default: break;
case LibFunc::copysign:
case LibFunc::copysignf:
case LibFunc::copysignl:
if (I.getNumArgOperands() == 2 && // Basic sanity checks.
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
I.getType() == I.getArgOperand(0)->getType() &&
I.getType() == I.getArgOperand(1)->getType() &&
I.onlyReadsMemory()) {
SDValue LHS = getValue(I.getArgOperand(0));
SDValue RHS = getValue(I.getArgOperand(1));
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
LHS.getValueType(), LHS, RHS));
return;
}
break;
case LibFunc::fabs:
case LibFunc::fabsf:
case LibFunc::fabsl:
if (visitUnaryFloatCall(I, ISD::FABS))
return;
break;
case LibFunc::fmin:
case LibFunc::fminf:
case LibFunc::fminl:
if (visitBinaryFloatCall(I, ISD::FMINNUM))
return;
break;
case LibFunc::fmax:
case LibFunc::fmaxf:
case LibFunc::fmaxl:
if (visitBinaryFloatCall(I, ISD::FMAXNUM))
return;
break;
case LibFunc::sin:
case LibFunc::sinf:
case LibFunc::sinl:
if (visitUnaryFloatCall(I, ISD::FSIN))
return;
break;
case LibFunc::cos:
case LibFunc::cosf:
case LibFunc::cosl:
if (visitUnaryFloatCall(I, ISD::FCOS))
return;
break;
case LibFunc::sqrt:
case LibFunc::sqrtf:
case LibFunc::sqrtl:
case LibFunc::sqrt_finite:
case LibFunc::sqrtf_finite:
case LibFunc::sqrtl_finite:
if (visitUnaryFloatCall(I, ISD::FSQRT))
return;
break;
case LibFunc::floor:
case LibFunc::floorf:
case LibFunc::floorl:
if (visitUnaryFloatCall(I, ISD::FFLOOR))
return;
break;
case LibFunc::nearbyint:
case LibFunc::nearbyintf:
case LibFunc::nearbyintl:
if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
return;
break;
case LibFunc::ceil:
case LibFunc::ceilf:
case LibFunc::ceill:
if (visitUnaryFloatCall(I, ISD::FCEIL))
return;
break;
case LibFunc::rint:
case LibFunc::rintf:
case LibFunc::rintl:
if (visitUnaryFloatCall(I, ISD::FRINT))
return;
break;
case LibFunc::round:
case LibFunc::roundf:
case LibFunc::roundl:
if (visitUnaryFloatCall(I, ISD::FROUND))
return;
break;
case LibFunc::trunc:
case LibFunc::truncf:
case LibFunc::truncl:
if (visitUnaryFloatCall(I, ISD::FTRUNC))
return;
break;
case LibFunc::log2:
case LibFunc::log2f:
case LibFunc::log2l:
if (visitUnaryFloatCall(I, ISD::FLOG2))
return;
break;
case LibFunc::exp2:
case LibFunc::exp2f:
case LibFunc::exp2l:
if (visitUnaryFloatCall(I, ISD::FEXP2))
return;
break;
case LibFunc::memcmp:
if (visitMemCmpCall(I))
return;
break;
case LibFunc::memchr:
if (visitMemChrCall(I))
return;
break;
case LibFunc::strcpy:
if (visitStrCpyCall(I, false))
return;
break;
case LibFunc::stpcpy:
if (visitStrCpyCall(I, true))
return;
break;
case LibFunc::strcmp:
if (visitStrCmpCall(I))
return;
break;
case LibFunc::strlen:
if (visitStrLenCall(I))
return;
break;
case LibFunc::strnlen:
if (visitStrNLenCall(I))
return;
break;
}
}
}
SDValue Callee;
if (!RenameFn)
Callee = getValue(I.getCalledValue());
else
Callee = DAG.getExternalSymbol(
RenameFn,
DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
// Check if we can potentially perform a tail call. More detailed checking is
// be done within LowerCallTo, after more information about the call is known.
LowerCallTo(&I, Callee, I.isTailCall());
}
namespace {
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
public:
/// CallOperand - If this is the result output operand or a clobber
/// this is null, otherwise it is the incoming operand to the CallInst.
/// This gets modified as the asm is processed.
SDValue CallOperand;
/// AssignedRegs - If this is a register or register class operand, this
/// contains the set of register corresponding to the operand.
RegsForValue AssignedRegs;
explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
: TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
}
/// getCallOperandValEVT - Return the EVT of the Value* that this operand
/// corresponds to. If there is no Value* for this operand, it returns
/// MVT::Other.
EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL) const {
if (!CallOperandVal) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal))
return TLI.getPointerTy(DL);
llvm::Type *OpTy = CallOperandVal->getType();
// FIXME: code duplicated from TargetLowering::ParseConstraints().
// If this is an indirect operand, the operand is a pointer to the
// accessed type.
if (isIndirect) {
llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
if (!PtrTy)
report_fatal_error("Indirect operand for inline asm not a pointer!");
OpTy = PtrTy->getElementType();
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
if (StructType *STy = dyn_cast<StructType>(OpTy))
if (STy->getNumElements() == 1)
OpTy = STy->getElementType(0);
// If OpTy is not a single value, it may be a struct/union that we
// can tile with integers.
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
unsigned BitSize = DL.getTypeSizeInBits(OpTy);
switch (BitSize) {
default: break;
case 1:
case 8:
case 16:
case 32:
case 64:
case 128:
OpTy = IntegerType::get(Context, BitSize);
break;
}
}
return TLI.getValueType(DL, OpTy, true);
}
};
typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
} // end anonymous namespace
/// GetRegistersForValue - Assign registers (virtual or physical) for the
/// specified operand. We prefer to assign virtual registers, to allow the
/// register allocator to handle the assignment process. However, if the asm
/// uses features that we can't model on machineinstrs, we have SDISel do the
/// allocation. This produces generally horrible, but correct, code.
///
/// OpInfo describes the operand.
///
static void GetRegistersForValue(SelectionDAG &DAG,
const TargetLowering &TLI,
SDLoc DL,
SDISelAsmOperandInfo &OpInfo) {
LLVMContext &Context = *DAG.getContext();
MachineFunction &MF = DAG.getMachineFunction();
SmallVector<unsigned, 4> Regs;
// If this is a constraint for a single physreg, or a constraint for a
// register class, find it.
std::pair<unsigned, const TargetRegisterClass *> PhysReg =
TLI.getRegForInlineAsmConstraint(MF.getSubtarget().getRegisterInfo(),
OpInfo.ConstraintCode,
OpInfo.ConstraintVT);
unsigned NumRegs = 1;
if (OpInfo.ConstraintVT != MVT::Other) {
// If this is a FP input in an integer register (or visa versa) insert a bit
// cast of the input value. More generally, handle any case where the input
// value disagrees with the register class we plan to stick this in.
if (OpInfo.Type == InlineAsm::isInput &&
PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
// Try to convert to the first EVT that the reg class contains. If the
// types are identical size, use a bitcast to convert (e.g. two differing
// vector types).
MVT RegVT = *PhysReg.second->vt_begin();
if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
} else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
// If the input is a FP value and we want it in FP registers, do a
// bitcast to the corresponding integer type. This turns an f64 value
// into i64, which can be passed with two i32 values on a 32-bit
// machine.
RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
}
}
NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
}
MVT RegVT;
EVT ValueVT = OpInfo.ConstraintVT;
// If this is a constraint for a specific physical register, like {r17},
// assign it now.
if (unsigned AssignedReg = PhysReg.first) {
const TargetRegisterClass *RC = PhysReg.second;
if (OpInfo.ConstraintVT == MVT::Other)
ValueVT = *RC->vt_begin();
// Get the actual register value type. This is important, because the user
// may have asked for (e.g.) the AX register in i32 type. We need to
// remember that AX is actually i16 to get the right extension.
RegVT = *RC->vt_begin();
// This is a explicit reference to a physical register.
Regs.push_back(AssignedReg);
// If this is an expanded reference, add the rest of the regs to Regs.
if (NumRegs != 1) {
TargetRegisterClass::iterator I = RC->begin();
for (; *I != AssignedReg; ++I)
assert(I != RC->end() && "Didn't find reg!");
// Already added the first reg.
--NumRegs; ++I;
for (; NumRegs; --NumRegs, ++I) {
assert(I != RC->end() && "Ran out of registers to allocate!");
Regs.push_back(*I);
}
}
OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
return;
}
// Otherwise, if this was a reference to an LLVM register class, create vregs
// for this reference.
if (const TargetRegisterClass *RC = PhysReg.second) {
RegVT = *RC->vt_begin();
if (OpInfo.ConstraintVT == MVT::Other)
ValueVT = RegVT;
// Create the appropriate number of virtual registers.
MachineRegisterInfo &RegInfo = MF.getRegInfo();
for (; NumRegs; --NumRegs)
Regs.push_back(RegInfo.createVirtualRegister(RC));
OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
return;
}
// Otherwise, we couldn't allocate enough registers for this.
}
/// visitInlineAsm - Handle a call to an InlineAsm object.
///
void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
/// ConstraintOperands - Information about all of the constraints.
SDISelAsmOperandInfoVector ConstraintOperands;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
bool hasMemory = false;
unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
unsigned ResNo = 0; // ResNo - The result number of the next output.
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
MVT OpVT = MVT::Other;
// Compute the value type for each operand.
switch (OpInfo.Type) {
case InlineAsm::isOutput:
// Indirect outputs just consume an argument.
if (OpInfo.isIndirect) {
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
}
// The return value of the call is this value. As such, there is no
// corresponding argument.
assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
}
++ResNo;
break;
case InlineAsm::isInput:
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
case InlineAsm::isClobber:
// Nothing to do.
break;
}
// If this is an input or an indirect output, process the call argument.
// BasicBlocks are labels, currently appearing only in asm's.
if (OpInfo.CallOperandVal) {
if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
} else {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
DAG.getDataLayout()).getSimpleVT();
}
OpInfo.ConstraintVT = OpVT;
// Indirect operand accesses access memory.
if (OpInfo.isIndirect)
hasMemory = true;
else {
for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
TargetLowering::ConstraintType
CType = TLI.getConstraintType(OpInfo.Codes[j]);
if (CType == TargetLowering::C_Memory) {
hasMemory = true;
break;
}
}
}
}
SDValue Chain, Flag;
// We won't need to flush pending loads if this asm doesn't touch
// memory and is nonvolatile.
if (hasMemory || IA->hasSideEffects())
Chain = getRoot();
else
Chain = DAG.getRoot();
// Second pass over the constraints: compute which constraint option to use
// and assign registers to constraints that want a specific physreg.
for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
// If this is an output operand with a matching input operand, look up the
// matching input. If their types mismatch, e.g. one is an integer, the
// other is floating point, or their sizes are different, flag it as an
// error.
if (OpInfo.hasMatchingInput()) {
SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
std::pair<unsigned, const TargetRegisterClass *> MatchRC =
TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
OpInfo.ConstraintVT);
std::pair<unsigned, const TargetRegisterClass *> InputRC =
TLI.getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
Input.ConstraintVT);
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
(MatchRC.second != InputRC.second)) {
report_fatal_error("Unsupported asm: input constraint"
" with a matching output constraint of"
" incompatible type!");
}
Input.ConstraintVT = OpInfo.ConstraintVT;
}
}
// Compute the constraint code and ConstraintType to use.
TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
OpInfo.Type == InlineAsm::isClobber)
continue;
// If this is a memory input, and if the operand is not indirect, do what we
// need to to provide an address for the memory input.
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
!OpInfo.isIndirect) {
assert((OpInfo.isMultipleAlternative ||
(OpInfo.Type == InlineAsm::isInput)) &&
"Can only indirectify direct input operands!");
// Memory operands really want the address of the value. If we don't have
// an indirect input, put it in the constpool if we can, otherwise spill
// it to a stack slot.
// TODO: This isn't quite right. We need to handle these according to
// the addressing mode that the constraint wants. Also, this may take
// an additional register for the computation and we don't want that
// either.
// If the operand is a float, integer, or vector constant, spill to a
// constant pool entry to get its address.
const Value *OpVal = OpInfo.CallOperandVal;
if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
OpInfo.CallOperand = DAG.getConstantPool(
cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
} else {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
Type *Ty = OpVal->getType();
auto &DL = DAG.getDataLayout();
uint64_t TySize = DL.getTypeAllocSize(Ty);
unsigned Align = DL.getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
SDValue StackSlot =
DAG.getFrameIndex(SSFI, TLI.getPointerTy(DAG.getDataLayout()));
Chain = DAG.getStore(Chain, getCurSDLoc(),
OpInfo.CallOperand, StackSlot,
MachinePointerInfo::getFixedStack(SSFI),
false, false, 0);
OpInfo.CallOperand = StackSlot;
}
// There is no longer a Value* corresponding to this operand.
OpInfo.CallOperandVal = nullptr;
// It is now an indirect operand.
OpInfo.isIndirect = true;
}
// If this constraint is for a specific register, allocate it before
// anything else.
if (OpInfo.ConstraintType == TargetLowering::C_Register)
GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
}
// Second pass - Loop over all of the operands, assigning virtual or physregs
// to register class operands.
for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
// C_Register operands have already been allocated, Other/Memory don't need
// to be.
if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
}
// AsmNodeOperands - The operands for the ISD::INLINEASM node.
std::vector<SDValue> AsmNodeOperands;
AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
// If we have a !srcloc metadata node associated with it, we want to attach
// this to the ultimately generated inline asm machineinstr. To do this, we
// pass in the third operand as this (potentially null) inline asm MDNode.
const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
// Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
// bits as operand 3.
unsigned ExtraInfo = 0;
if (IA->hasSideEffects())
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
// Set the asm dialect.
ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
// Determine if this InlineAsm MayLoad or MayStore based on the constraints.
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
// Compute the constraint code and ConstraintType to use.
TLI.ComputeConstraintToUse(OpInfo, SDValue());
// Ideally, we would only check against memory constraints. However, the
// meaning of an other constraint can be target-specific and we can't easily
// reason about it. Therefore, be conservative and set MayLoad/MayStore
// for other constriants as well.
if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
OpInfo.ConstraintType == TargetLowering::C_Other) {
if (OpInfo.Type == InlineAsm::isInput)
ExtraInfo |= InlineAsm::Extra_MayLoad;
else if (OpInfo.Type == InlineAsm::isOutput)
ExtraInfo |= InlineAsm::Extra_MayStore;
else if (OpInfo.Type == InlineAsm::isClobber)
ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
}
}
AsmNodeOperands.push_back(DAG.getTargetConstant(
ExtraInfo, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
// Loop over all of the inputs, copying the operand values into the
// appropriate registers and processing the output regs.
RegsForValue RetValRegs;
// IndirectStoresToEmit - The set of stores to emit after the inline asm node.
std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
switch (OpInfo.Type) {
case InlineAsm::isOutput: {
if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
OpInfo.ConstraintType != TargetLowering::C_Register) {
// Memory output, or 'other' output (e.g. 'X' constraint).
assert(OpInfo.isIndirect && "Memory output must be indirect operand");
unsigned ConstraintID =
TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
assert(ConstraintID != InlineAsm::Constraint_Unknown &&
"Failed to convert memory constraint code to constraint id.");
// Add information to the INLINEASM node to know about this output.
unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
MVT::i32));
AsmNodeOperands.push_back(OpInfo.CallOperand);
break;
}
// Otherwise, this is a register or register class output.
// Copy the output from the appropriate register. Find a register that
// we can use.
if (OpInfo.AssignedRegs.Regs.empty()) {
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(),
"couldn't allocate output register for constraint '" +
Twine(OpInfo.ConstraintCode) + "'");
return;
}
// If this is an indirect operand, store through the pointer after the
// asm.
if (OpInfo.isIndirect) {
IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
OpInfo.CallOperandVal));
} else {
// This is the result value of the call.
assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
// Concatenate this output onto the outputs list.
RetValRegs.append(OpInfo.AssignedRegs);
}
// Add information to the INLINEASM node to know that this register is
// set.
OpInfo.AssignedRegs
.AddInlineAsmOperands(OpInfo.isEarlyClobber
? InlineAsm::Kind_RegDefEarlyClobber
: InlineAsm::Kind_RegDef,
false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
break;
}
case InlineAsm::isInput: {
SDValue InOperandVal = OpInfo.CallOperand;
if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
// If this is required to match an output register we have already set,
// just use its register.
unsigned OperandNo = OpInfo.getMatchedOperand();
// Scan until we find the definition we already emitted of this operand.
// When we find it, create a RegsForValue operand.
unsigned CurOp = InlineAsm::Op_FirstOperand;
for (; OperandNo; --OperandNo) {
// Advance to the next operand.
unsigned OpFlag =
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
assert((InlineAsm::isRegDefKind(OpFlag) ||
InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
}
unsigned OpFlag =
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
if (InlineAsm::isRegDefKind(OpFlag) ||
InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
// Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
if (OpInfo.isIndirect) {
// This happens on gcc/testsuite/gcc.dg/pr8788-1.c
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
" don't know how to handle tied "
"indirect register inputs");
return;
}
RegsForValue MatchedRegs;
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
MatchedRegs.RegVTs.push_back(RegVT);
MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
i != e; ++i) {
if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC));
else {
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(),
"inline asm error: This value"
" type register class is not natively supported!");
return;
}
}
SDLoc dl = getCurSDLoc();
// Use the produced MatchedRegs object to
MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl,
Chain, &Flag, CS.getInstruction());
MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
true, OpInfo.getMatchedOperand(), dl,
DAG, AsmNodeOperands);
break;
}
assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
"Unexpected number of operands");
// Add information to the INLINEASM node to know about this input.
// See InlineAsm.h isUseOperandTiedToDef.
OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
OpInfo.getMatchedOperand());
AsmNodeOperands.push_back(DAG.getTargetConstant(
OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
break;
}
// Treat indirect 'X' constraint as memory.
if (OpInfo.ConstraintType == TargetLowering::C_Other &&
OpInfo.isIndirect)
OpInfo.ConstraintType = TargetLowering::C_Memory;
if (OpInfo.ConstraintType == TargetLowering::C_Other) {
std::vector<SDValue> Ops;
TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
Ops, DAG);
if (Ops.empty()) {
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(),
"invalid operand for inline asm constraint '" +
Twine(OpInfo.ConstraintCode) + "'");
return;
}
// Add information to the INLINEASM node to know about this input.
unsigned ResOpType =
InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
AsmNodeOperands.push_back(DAG.getTargetConstant(
ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
break;
}
if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
assert(InOperandVal.getValueType() ==
TLI.getPointerTy(DAG.getDataLayout()) &&
"Memory operands expect pointer values");
unsigned ConstraintID =
TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
assert(ConstraintID != InlineAsm::Constraint_Unknown &&
"Failed to convert memory constraint code to constraint id.");
// Add information to the INLINEASM node to know about this input.
unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
getCurSDLoc(),
MVT::i32));
AsmNodeOperands.push_back(InOperandVal);
break;
}
assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
OpInfo.ConstraintType == TargetLowering::C_Register) &&
"Unknown constraint type!");
// TODO: Support this.
if (OpInfo.isIndirect) {
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(),
"Don't know how to handle indirect register inputs yet "
"for constraint '" +
Twine(OpInfo.ConstraintCode) + "'");
return;
}
// Copy the input into the appropriate registers.
if (OpInfo.AssignedRegs.Regs.empty()) {
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(),
"couldn't allocate input reg for constraint '" +
Twine(OpInfo.ConstraintCode) + "'");
return;
}
SDLoc dl = getCurSDLoc();
OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
Chain, &Flag, CS.getInstruction());
OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
dl, DAG, AsmNodeOperands);
break;
}
case InlineAsm::isClobber: {
// Add the clobbered value to the operand list, so that the register
// allocator is aware that the physreg got clobbered.
if (!OpInfo.AssignedRegs.Regs.empty())
OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
false, 0, getCurSDLoc(), DAG,
AsmNodeOperands);
break;
}
}
}
// Finish up input operands. Set the input chain and add the flag last.
AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
Flag = Chain.getValue(1);
// If this asm returns a register value, copy the result from that register
// and set it as the value of the call.
if (!RetValRegs.Regs.empty()) {
SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
Chain, &Flag, CS.getInstruction());
// FIXME: Why don't we do this for inline asms with MRVs?
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType());
// If any of the results of the inline asm is a vector, it may have the
// wrong width/num elts. This can happen for register classes that can
// contain multiple different value types. The preg or vreg allocated may
// not have the same VT as was expected. Convert it to the right type
// with bit_convert.
if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
ResultType, Val);
} else if (ResultType != Val.getValueType() &&
ResultType.isInteger() && Val.getValueType().isInteger()) {
// If a result value was tied to an input value, the computed result may
// have a wider width than the expected result. Extract the relevant
// portion.
Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
}
assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
}
setValue(CS.getInstruction(), Val);
// Don't need to use this as a chain in this case.
if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
return;
}
std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
// Process indirect outputs, first output all of the flagged copies out of
// physregs.
for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
const Value *Ptr = IndirectStoresToEmit[i].second;
SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
Chain, &Flag, IA);
StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
}
// Emit the non-flagged stores from the physregs.
SmallVector<SDValue, 8> OutChains;
for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
SDValue Val = DAG.getStore(Chain, getCurSDLoc(),
StoresToEmit[i].first,
getValue(StoresToEmit[i].second),
MachinePointerInfo(StoresToEmit[i].second),
false, false, 0);
OutChains.push_back(Val);
}
if (!OutChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
DAG.setRoot(Chain);
}
void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
MVT::Other, getRoot(),
getValue(I.getArgOperand(0)),
DAG.getSrcValue(I.getArgOperand(0))));
}
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const DataLayout &DL = DAG.getDataLayout();
SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)),
DL.getABITypeAlignment(I.getType()));
setValue(&I, V);
DAG.setRoot(V.getValue(1));
}
void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
MVT::Other, getRoot(),
getValue(I.getArgOperand(0)),
DAG.getSrcValue(I.getArgOperand(0))));
}
void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
MVT::Other, getRoot(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
DAG.getSrcValue(I.getArgOperand(0)),
DAG.getSrcValue(I.getArgOperand(1))));
}
/// \brief Lower an argument list according to the target calling convention.
///
/// \return A tuple of <return-value, token-chain>
///
/// This is a helper for lowering intrinsics that follow a target calling
/// convention or require stack pointer adjustment. Only a subset of the
/// intrinsic's operands need to participate in the calling convention.
std::pair<SDValue, SDValue>
SelectionDAGBuilder::lowerCallOperands(ImmutableCallSite CS, unsigned ArgIdx,
unsigned NumArgs, SDValue Callee,
Type *ReturnTy,
MachineBasicBlock *LandingPad,
bool IsPatchPoint) {
TargetLowering::ArgListTy Args;
Args.reserve(NumArgs);
// Populate the argument list.
// Attributes for args start at offset 1, after the return attribute.
for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
ArgI != ArgE; ++ArgI) {
const Value *V = CS->getOperand(ArgI);
assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
TargetLowering::ArgListEntry Entry;
Entry.Node = getValue(V);
Entry.Ty = V->getType();
Entry.setAttributes(&CS, AttrI);
Args.push_back(Entry);
}
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
.setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args), NumArgs)
.setDiscardResult(CS->use_empty()).setIsPatchPoint(IsPatchPoint);
return lowerInvokable(CLI, LandingPad);
}
/// \brief Add a stack map intrinsic call's live variable operands to a stackmap
/// or patchpoint target node's operand list.
///
/// Constants are converted to TargetConstants purely as an optimization to
/// avoid constant materialization and register allocation.
///
/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
/// generate addess computation nodes, and so ExpandISelPseudo can convert the
/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
/// address materialization and register allocation, but may also be required
/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
/// alloca in the entry block, then the runtime may assume that the alloca's
/// StackMap location can be read immediately after compilation and that the
/// location is valid at any point during execution (this is similar to the
/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
/// only available in a register, then the runtime would need to trap when
/// execution reaches the StackMap in order to read the alloca's location.
static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
SDLoc DL, SmallVectorImpl<SDValue> &Ops,
SelectionDAGBuilder &Builder) {
for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
SDValue OpVal = Builder.getValue(CS.getArgument(i));
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
Ops.push_back(
Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
Ops.push_back(
Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
Ops.push_back(Builder.DAG.getTargetFrameIndex(
FI->getIndex(), TLI.getPointerTy(Builder.DAG.getDataLayout())));
} else
Ops.push_back(OpVal);
}
}
/// \brief Lower llvm.experimental.stackmap directly to its target opcode.
void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
// void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
// [live variables...])
assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
SDValue Chain, InFlag, Callee, NullPtr;
SmallVector<SDValue, 32> Ops;
SDLoc DL = getCurSDLoc();
Callee = getValue(CI.getCalledValue());
NullPtr = DAG.getIntPtrConstant(0, DL, true);
// The stackmap intrinsic only records the live variables (the arguemnts
// passed to it) and emits NOPS (if requested). Unlike the patchpoint
// intrinsic, this won't be lowered to a function call. This means we don't
// have to worry about calling conventions and target specific lowering code.
// Instead we perform the call lowering right here.
//
// chain, flag = CALLSEQ_START(chain, 0)
// chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
// chain, flag = CALLSEQ_END(chain, 0, 0, flag)
//
Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
InFlag = Chain.getValue(1);
// Add the <id> and <numBytes> constants.
SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
Ops.push_back(DAG.getTargetConstant(
cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(DAG.getTargetConstant(
cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
MVT::i32));
// Push live variables for the stack map.
addStackMapLiveVars(&CI, 2, DL, Ops, *this);
// We are not pushing any register mask info here on the operands list,
// because the stackmap doesn't clobber anything.
// Push the chain and the glue flag.
Ops.push_back(Chain);
Ops.push_back(InFlag);
// Create the STACKMAP node.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
Chain = SDValue(SM, 0);
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
// Stackmaps don't generate values, so nothing goes into the NodeMap.
// Set the root to the target-lowered call chain.
DAG.setRoot(Chain);
// Inform the Frame Information that we have a stackmap in this function.
FuncInfo.MF->getFrameInfo()->setHasStackMap();
}
/// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
MachineBasicBlock *LandingPad) {
// void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
// i32 <numBytes>,
// i8* <target>,
// i32 <numArgs>,
// [Args...],
// [live variables...])
CallingConv::ID CC = CS.getCallingConv();
bool IsAnyRegCC = CC == CallingConv::AnyReg;
bool HasDef = !CS->getType()->isVoidTy();
SDLoc dl = getCurSDLoc();
SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
// Handle immediate and symbolic callees.
if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
/*isTarget=*/true);
else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
SDLoc(SymbolicCallee),
SymbolicCallee->getValueType(0));
// Get the real number of arguments participating in the call <numArgs>
SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
// Intrinsics include all meta-operands up to but not including CC.
unsigned NumMetaOpers = PatchPointOpers::CCPos;
assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
"Not enough arguments provided to the patchpoint intrinsic");
// For AnyRegCC the arguments are lowered later on manually.
unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
Type *ReturnTy =
IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
std::pair<SDValue, SDValue> Result =
lowerCallOperands(CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
LandingPad, true);
SDNode *CallEnd = Result.second.getNode();
if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
CallEnd = CallEnd->getOperand(0).getNode();
/// Get a call instruction from the call sequence chain.
/// Tail calls are not allowed.
assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
"Expected a callseq node.");
SDNode *Call = CallEnd->getOperand(0).getNode();
bool HasGlue = Call->getGluedNode();
// Replace the target specific call node with the patchable intrinsic.
SmallVector<SDValue, 8> Ops;
// Add the <id> and <numBytes> constants.
SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
Ops.push_back(DAG.getTargetConstant(
cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(DAG.getTargetConstant(
cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
MVT::i32));
// Add the callee.
Ops.push_back(Callee);
// Adjust <numArgs> to account for any arguments that have been passed on the
// stack instead.
// Call Node: Chain, Target, {Args}, RegMask, [Glue]
unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
// Add the calling convention
Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
// Add the arguments we omitted previously. The register allocator should
// place these in any free register.
if (IsAnyRegCC)
for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
Ops.push_back(getValue(CS.getArgument(i)));
// Push the arguments from the call instruction up to the register mask.
SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
Ops.append(Call->op_begin() + 2, e);
// Push live variables for the stack map.
addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
// Push the register mask info.
if (HasGlue)
Ops.push_back(*(Call->op_end()-2));
else
Ops.push_back(*(Call->op_end()-1));
// Push the chain (this is originally the first operand of the call, but
// becomes now the last or second to last operand).
Ops.push_back(*(Call->op_begin()));
// Push the glue flag (last operand).
if (HasGlue)
Ops.push_back(*(Call->op_end()-1));
SDVTList NodeTys;
if (IsAnyRegCC && HasDef) {
// Create the return types based on the intrinsic definition
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 3> ValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
assert(ValueVTs.size() == 1 && "Expected only one return value type.");
// There is always a chain and a glue type at the end
ValueVTs.push_back(MVT::Other);
ValueVTs.push_back(MVT::Glue);
NodeTys = DAG.getVTList(ValueVTs);
} else
NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
// Replace the target specific call node with a PATCHPOINT node.
MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
dl, NodeTys, Ops);
// Update the NodeMap.
if (HasDef) {
if (IsAnyRegCC)
setValue(CS.getInstruction(), SDValue(MN, 0));
else
setValue(CS.getInstruction(), Result.first);
}
// Fixup the consumers of the intrinsic. The chain and glue may be used in the
// call sequence. Furthermore the location of the chain and glue can change
// when the AnyReg calling convention is used and the intrinsic returns a
// value.
if (IsAnyRegCC && HasDef) {
SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
} else
DAG.ReplaceAllUsesWith(Call, MN);
DAG.DeleteNode(Call);
// Inform the Frame Information that we have a patchpoint in this function.
FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
}
/// Returns an AttributeSet representing the attributes applied to the return
/// value of the given call.
static AttributeSet getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
SmallVector<Attribute::AttrKind, 2> Attrs;
if (CLI.RetSExt)
Attrs.push_back(Attribute::SExt);
if (CLI.RetZExt)
Attrs.push_back(Attribute::ZExt);
if (CLI.IsInReg)
Attrs.push_back(Attribute::InReg);
return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
Attrs);
}
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
/// implementation, which just calls LowerCall.
/// FIXME: When all targets are
/// migrated to using LowerCall, this hook should be integrated into SDISel.
std::pair<SDValue, SDValue>
TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// Handle the incoming return values from the call.
CLI.Ins.clear();
Type *OrigRetTy = CLI.RetTy;
SmallVector<EVT, 4> RetTys;
SmallVector<uint64_t, 4> Offsets;
auto &DL = CLI.DAG.getDataLayout();
ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
bool CanLowerReturn =
this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
CLI.IsVarArg, Outs, CLI.RetTy->getContext());
SDValue DemoteStackSlot;
int DemoteStackIdx = -100;
if (!CanLowerReturn) {
// FIXME: equivalent assert?
// assert(!CS.hasInAllocaArgument() &&
// "sret demotion is incompatible with inalloca");
uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
MachineFunction &MF = CLI.DAG.getMachineFunction();
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy(DL));
ArgListEntry Entry;
Entry.Node = DemoteStackSlot;
Entry.Ty = StackSlotPtrType;
Entry.isSExt = false;
Entry.isZExt = false;
Entry.isInReg = false;
Entry.isSRet = true;
Entry.isNest = false;
Entry.isByVal = false;
Entry.isReturned = false;
Entry.Alignment = Align;
CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
// sret demotion isn't compatible with tail-calls, since the sret argument
// points into the callers stack frame.
CLI.IsTailCall = false;
} else {
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT;
MyFlags.ArgVT = VT;
MyFlags.Used = CLI.IsReturnValueUsed;
if (CLI.RetSExt)
MyFlags.Flags.setSExt();
if (CLI.RetZExt)
MyFlags.Flags.setZExt();
if (CLI.IsInReg)
MyFlags.Flags.setInReg();
CLI.Ins.push_back(MyFlags);
}
}
}
// Handle all of the outgoing arguments.
CLI.Outs.clear();
CLI.OutVals.clear();
ArgListTy &Args = CLI.getArgs();
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
Type *FinalType = Args[i].Ty;
if (Args[i].isByVal)
FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
FinalType, CLI.CallConv, CLI.IsVarArg);
for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
++Value) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
if (Args[i].isZExt)
Flags.setZExt();
if (Args[i].isSExt)
Flags.setSExt();
if (Args[i].isInReg)
Flags.setInReg();
if (Args[i].isSRet)
Flags.setSRet();
if (Args[i].isByVal)
Flags.setByVal();
if (Args[i].isInAlloca) {
Flags.setInAlloca();
// Set the byval flag for CCAssignFn callbacks that don't know about
// inalloca. This way we can know how many bytes we should've allocated
// and how many bytes a callee cleanup function will pop. If we port
// inalloca to more targets, we'll have to add custom inalloca handling
// in the various CC lowering callbacks.
Flags.setByVal();
}
if (Args[i].isByVal || Args[i].isInAlloca) {
PointerType *Ty = cast<PointerType>(Args[i].Ty);
Type *ElementTy = Ty->getElementType();
Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
// For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right.
unsigned FrameAlign;
if (Args[i].Alignment)
FrameAlign = Args[i].Alignment;
else
FrameAlign = getByValTypeAlignment(ElementTy, DL);
Flags.setByValAlign(FrameAlign);
}
if (Args[i].isNest)
Flags.setNest();
if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment);
MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (Args[i].isSExt)
ExtendKind = ISD::SIGN_EXTEND;
else if (Args[i].isZExt)
ExtendKind = ISD::ZERO_EXTEND;
// Conservatively only handle 'returned' on non-vectors for now
if (Args[i].isReturned && !Op.getValueType().isVector()) {
assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
"unexpected use of 'returned'");
// Before passing 'returned' to the target lowering code, ensure that
// either the register MVT and the actual EVT are the same size or that
// the return value and argument are extended in the same way; in these
// cases it's safe to pass the argument register value unchanged as the
// return register value (although it's at the target's option whether
// to do so)
// TODO: allow code generation to take advantage of partially preserved
// registers rather than clobbering the entire register when the
// parameter extension method is not compatible with the return
// extension method
if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
(ExtendKind != ISD::ANY_EXTEND &&
CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt))
Flags.setReturned();
}
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
i < CLI.NumFixedArgs,
i, j*Parts[j].getValueType().getStoreSize());
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
else if (j != 0)
MyFlags.Flags.setOrigAlign(1);
CLI.Outs.push_back(MyFlags);
CLI.OutVals.push_back(Parts[j]);
}
if (NeedsRegBlock && Value == NumValues - 1)
CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
}
}
SmallVector<SDValue, 4> InVals;
CLI.Chain = LowerCall(CLI, InVals);
// Verify that the target's LowerCall behaved as expected.
assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
"LowerCall didn't return a valid chain!");
assert((!CLI.IsTailCall || InVals.empty()) &&
"LowerCall emitted a return value for a tail call!");
assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
"LowerCall didn't emit the correct number of values!");
// For a tail call, the return value is merely live-out and there aren't
// any nodes in the DAG representing it. Return a special value to
// indicate that a tail call has been emitted and no more Instructions
// should be processed in the current block.
if (CLI.IsTailCall) {
CLI.DAG.setRoot(CLI.Chain);
return std::make_pair(SDValue(), SDValue());
}
DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
assert(InVals[i].getNode() &&
"LowerCall emitted a null value!");
assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
"LowerCall emitted a value with the wrong type!");
});
SmallVector<SDValue, 4> ReturnValues;
if (!CanLowerReturn) {
// The instruction result is the result of loading from the
// hidden sret parameter.
SmallVector<EVT, 1> PVTs;
Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0];
unsigned NumValues = RetTys.size();
ReturnValues.resize(NumValues);
SmallVector<SDValue, 4> Chains(NumValues);
for (unsigned i = 0; i < NumValues; ++i) {
SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
CLI.DAG.getConstant(Offsets[i], CLI.DL,
PtrVT));
SDValue L = CLI.DAG.getLoad(
RetTys[i], CLI.DL, CLI.Chain, Add,
MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]), false,
false, false, 1);
ReturnValues[i] = L;
Chains[i] = L.getValue(1);
}
CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
} else {
// Collect the legal value parts into potentially illegal values
// that correspond to the original function's return values.
ISD::NodeType AssertOp = ISD::DELETED_NODE;
if (CLI.RetSExt)
AssertOp = ISD::AssertSext;
else if (CLI.RetZExt)
AssertOp = ISD::AssertZext;
unsigned CurReg = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
NumRegs, RegisterVT, VT, nullptr,
AssertOp));
CurReg += NumRegs;
}
// For a function returning void, there is no return value. We can't create
// such a node, so we just return a null return value in that case. In
// that case, nothing will actually look at the value.
if (ReturnValues.empty())
return std::make_pair(SDValue(), CLI.Chain);
}
SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
CLI.DAG.getVTList(RetTys), ReturnValues);
return std::make_pair(Res, CLI.Chain);
}
void TargetLowering::LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
SDValue Res = LowerOperation(SDValue(N, 0), DAG);
if (Res.getNode())
Results.push_back(Res);
}
SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
llvm_unreachable("LowerOperation not implemented for this target!");
}
void
SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
SDValue Op = getNonRegisterValue(V);
assert((Op.getOpcode() != ISD::CopyFromReg ||
cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
"Copy from a reg to the same reg!");
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
V->getType());
SDValue Chain = DAG.getEntryNode();
ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
FuncInfo.PreferredExtendType.end())
? ISD::ANY_EXTEND
: FuncInfo.PreferredExtendType[V];
RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
PendingExports.push_back(Chain);
}
#include "llvm/CodeGen/SelectionDAGISel.h"
/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
/// entry block, return true. This includes arguments used by switches, since
/// the switch may expand into multiple basic blocks.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
// With FastISel active, we may be splitting blocks, so force creation
// of virtual registers for all non-dead arguments.
if (FastISel)
return A->use_empty();
const BasicBlock *Entry = A->getParent()->begin();
for (const User *U : A->users())
if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
return false; // Use not in entry block.
return true;
}
void SelectionDAGISel::LowerArguments(const Function &F) {
SelectionDAG &DAG = SDB->DAG;
SDLoc dl = SDB->getCurSDLoc();
const DataLayout &DL = DAG.getDataLayout();
SmallVector<ISD::InputArg, 16> Ins;
if (!FuncInfo->CanLowerReturn) {
// Put in an sret pointer parameter before all the other parameters.
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(*TLI, DAG.getDataLayout(),
PointerType::getUnqual(F.getReturnType()), ValueVTs);
// NOTE: Assuming that a pointer will never break down to more than one VT
// or one register.
ISD::ArgFlagsTy Flags;
Flags.setSRet();
MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
ISD::InputArg::NoArgIndex, 0);
Ins.push_back(RetArg);
}
// Set up the incoming argument description vector.
unsigned Idx = 1;
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; ++I, ++Idx) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs);
bool isArgValueUsed = !I->use_empty();
unsigned PartBase = 0;
Type *FinalType = I->getType();
if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
FinalType = cast<PointerType>(FinalType)->getElementType();
bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
FinalType, F.getCallingConv(), F.isVarArg());
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
Flags.setZExt();
if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
Flags.setSExt();
if (F.getAttributes().hasAttribute(Idx, Attribute::InReg))
Flags.setInReg();
if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
Flags.setSRet();
if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
Flags.setByVal();
if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
Flags.setInAlloca();
// Set the byval flag for CCAssignFn callbacks that don't know about
// inalloca. This way we can know how many bytes we should've allocated
// and how many bytes a callee cleanup function will pop. If we port
// inalloca to more targets, we'll have to add custom inalloca handling
// in the various CC lowering callbacks.
Flags.setByVal();
}
if (Flags.isByVal() || Flags.isInAlloca()) {
PointerType *Ty = cast<PointerType>(I->getType());
Type *ElementTy = Ty->getElementType();
Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
unsigned FrameAlign;
if (F.getParamAlignment(Idx))
FrameAlign = F.getParamAlignment(Idx);
else
FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
Flags.setByValAlign(FrameAlign);
}
if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
Flags.setNest();
if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment);
MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
Idx-1, PartBase+i*RegisterVT.getStoreSize());
if (NumRegs > 1 && i == 0)
MyFlags.Flags.setSplit();
// if it isn't first piece, alignment must be 1
else if (i > 0)
MyFlags.Flags.setOrigAlign(1);
Ins.push_back(MyFlags);
}
if (NeedsRegBlock && Value == NumValues - 1)
Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
PartBase += VT.getStoreSize();
}
}
// Call the target to set up the argument values.
SmallVector<SDValue, 8> InVals;
SDValue NewRoot = TLI->LowerFormalArguments(
DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
// Verify that the target's LowerFormalArguments behaved as expected.
assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
"LowerFormalArguments didn't return a valid chain!");
assert(InVals.size() == Ins.size() &&
"LowerFormalArguments didn't emit the correct number of values!");
DEBUG({
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
assert(InVals[i].getNode() &&
"LowerFormalArguments emitted a null value!");
assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
"LowerFormalArguments emitted a value with the wrong type!");
}
});
// Update the DAG with the new chain value resulting from argument lowering.
DAG.setRoot(NewRoot);
// Set up the argument values.
unsigned i = 0;
Idx = 1;
if (!FuncInfo->CanLowerReturn) {
// Create a virtual register for the sret pointer, and put in a copy
// from the sret argument into it.
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(*TLI, DAG.getDataLayout(),
PointerType::getUnqual(F.getReturnType()), ValueVTs);
MVT VT = ValueVTs[0].getSimpleVT();
MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
ISD::NodeType AssertOp = ISD::DELETED_NODE;
SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
RegVT, VT, nullptr, AssertOp);
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
FuncInfo->DemoteRegister = SRetReg;
NewRoot =
SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
DAG.setRoot(NewRoot);
// i indexes lowered arguments. Bump it past the hidden sret argument.
// Idx indexes LLVM arguments. Don't touch it.
++i;
}
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
++I, ++Idx) {
SmallVector<SDValue, 4> ArgValues;
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
// If this argument is unused then remember its value. It is used to generate
// debugging information.
if (I->use_empty() && NumValues) {
SDB->setUnusedArgValue(I, InVals[i]);
// Also remember any frame index for use in FastISel.
if (FrameIndexSDNode *FI =
dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
}
for (unsigned Val = 0; Val != NumValues; ++Val) {
EVT VT = ValueVTs[Val];
MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
if (!I->use_empty()) {
ISD::NodeType AssertOp = ISD::DELETED_NODE;
if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
AssertOp = ISD::AssertSext;
else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
NumParts, PartVT, VT,
nullptr, AssertOp));
}
i += NumParts;
}
// We don't need to do anything else for unused arguments.
if (ArgValues.empty())
continue;
// Note down frame index.
if (FrameIndexSDNode *FI =
dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
SDB->getCurSDLoc());
SDB->setValue(I, Res);
if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
if (LoadSDNode *LNode =
dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
if (FrameIndexSDNode *FI =
dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
}
// If this argument is live outside of the entry block, insert a copy from
// wherever we got it to the vreg that other BB's will reference it as.
if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
// If we can, though, try to skip creating an unnecessary vreg.
// FIXME: This isn't very clean... it would be nice to make this more
// general. It's also subtly incompatible with the hacks FastISel
// uses with vregs.
unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
FuncInfo->ValueMap[I] = Reg;
continue;
}
}
if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
FuncInfo->InitializeRegForValue(I);
SDB->CopyToExportRegsIfNeeded(I);
}
}
assert(i == InVals.size() && "Argument register count mismatch!");
// Finally, if the target has anything special to do, allow it to do so.
EmitFunctionEntryCode();
}
/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
/// ensure constants are generated when needed. Remember the virtual registers
/// that need to be added to the Machine PHI nodes as input. We cannot just
/// directly add them, because expansion might result in multiple MBB's for one
/// BB. As such, the start of the BB might correspond to a different MBB than
/// the end.
///
void
SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
const TerminatorInst *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
// Check PHI nodes in successors that expect a value to be available from this
// block.
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin())) continue;
MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
// If this terminator has multiple identical successors (common for
// switches), only handle each succ once.
if (!SuccsHandled.insert(SuccMBB).second)
continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
// At this point we know that there is a 1-1 correspondence between LLVM PHI
// nodes and Machine PHI nodes, but the incoming operands have not been
// emitted yet.
for (BasicBlock::const_iterator I = SuccBB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
// Ignore dead phi's.
if (PN->use_empty()) continue;
// Skip empty types
if (PN->getType()->isEmptyTy())
continue;
unsigned Reg;
const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
unsigned &RegOut = ConstantsOut[C];
if (RegOut == 0) {
RegOut = FuncInfo.CreateRegs(C->getType());
CopyValueToVirtualRegister(C, RegOut);
}
Reg = RegOut;
} else {
DenseMap<const Value *, unsigned>::iterator I =
FuncInfo.ValueMap.find(PHIOp);
if (I != FuncInfo.ValueMap.end())
Reg = I->second;
else {
assert(isa<AllocaInst>(PHIOp) &&
FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
"Didn't codegen value into a register!??");
Reg = FuncInfo.CreateRegs(PHIOp->getType());
CopyValueToVirtualRegister(PHIOp, Reg);
}
}
// Remember that this register needs to added to the machine PHI node as
// the input for this MBB.
SmallVector<EVT, 4> ValueVTs;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
for (unsigned i = 0, e = NumRegisters; i != e; ++i)
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Reg += NumRegisters;
}
}
}
ConstantsOut.clear();
}
/// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
/// is 0.
MachineBasicBlock *
SelectionDAGBuilder::StackProtectorDescriptor::
AddSuccessorMBB(const BasicBlock *BB,
MachineBasicBlock *ParentMBB,
bool IsLikely,
MachineBasicBlock *SuccMBB) {
// If SuccBB has not been created yet, create it.
if (!SuccMBB) {
MachineFunction *MF = ParentMBB->getParent();
MachineFunction::iterator BBI = ParentMBB;
SuccMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(++BBI, SuccMBB);
}
// Add it as a successor of ParentMBB.
ParentMBB->addSuccessor(
SuccMBB, BranchProbabilityInfo::getBranchWeightStackProtector(IsLikely));
return SuccMBB;
}
MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
MachineFunction::iterator I = MBB;
if (++I == FuncInfo.MF->end())
return nullptr;
return I;
}
/// During lowering new call nodes can be created (such as memset, etc.).
/// Those will become new roots of the current DAG, but complications arise
/// when they are tail calls. In such cases, the call lowering will update
/// the root, but the builder still needs to know that a tail call has been
/// lowered in order to avoid generating an additional return.
void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
// If the node is null, we do have a tail call.
if (MaybeTC.getNode() != nullptr)
DAG.setRoot(MaybeTC);
else
HasTailCall = true;
}
bool SelectionDAGBuilder::isDense(const CaseClusterVector &Clusters,
unsigned *TotalCases, unsigned First,
unsigned Last) {
assert(Last >= First);
assert(TotalCases[Last] >= TotalCases[First]);
APInt LowCase = Clusters[First].Low->getValue();
APInt HighCase = Clusters[Last].High->getValue();
assert(LowCase.getBitWidth() == HighCase.getBitWidth());
// FIXME: A range of consecutive cases has 100% density, but only requires one
// comparison to lower. We should discriminate against such consecutive ranges
// in jump tables.
uint64_t Diff = (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100);
uint64_t Range = Diff + 1;
uint64_t NumCases =
TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
assert(NumCases < UINT64_MAX / 100);
assert(Range >= NumCases);
return NumCases * 100 >= Range * MinJumpTableDensity;
}
static inline bool areJTsAllowed(const TargetLowering &TLI) {
return TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
}
bool SelectionDAGBuilder::buildJumpTable(CaseClusterVector &Clusters,
unsigned First, unsigned Last,
const SwitchInst *SI,
MachineBasicBlock *DefaultMBB,
CaseCluster &JTCluster) {
assert(First <= Last);
uint32_t Weight = 0;
unsigned NumCmps = 0;
std::vector<MachineBasicBlock*> Table;
DenseMap<MachineBasicBlock*, uint32_t> JTWeights;
for (unsigned I = First; I <= Last; ++I) {
assert(Clusters[I].Kind == CC_Range);
Weight += Clusters[I].Weight;
assert(Weight >= Clusters[I].Weight && "Weight overflow!");
APInt Low = Clusters[I].Low->getValue();
APInt High = Clusters[I].High->getValue();
NumCmps += (Low == High) ? 1 : 2;
if (I != First) {
// Fill the gap between this and the previous cluster.
APInt PreviousHigh = Clusters[I - 1].High->getValue();
assert(PreviousHigh.slt(Low));
uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
for (uint64_t J = 0; J < Gap; J++)
Table.push_back(DefaultMBB);
}
uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
for (uint64_t J = 0; J < ClusterSize; ++J)
Table.push_back(Clusters[I].MBB);
JTWeights[Clusters[I].MBB] += Clusters[I].Weight;
}
unsigned NumDests = JTWeights.size();
if (isSuitableForBitTests(NumDests, NumCmps,
Clusters[First].Low->getValue(),
Clusters[Last].High->getValue())) {
// Clusters[First..Last] should be lowered as bit tests instead.
return false;
}
// Create the MBB that will load from and jump through the table.
// Note: We create it here, but it's not inserted into the function yet.
MachineFunction *CurMF = FuncInfo.MF;
MachineBasicBlock *JumpTableMBB =
CurMF->CreateMachineBasicBlock(SI->getParent());
// Add successors. Note: use table order for determinism.
SmallPtrSet<MachineBasicBlock *, 8> Done;
for (MachineBasicBlock *Succ : Table) {
if (Done.count(Succ))
continue;
addSuccessorWithWeight(JumpTableMBB, Succ, JTWeights[Succ]);
Done.insert(Succ);
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
->createJumpTableIndex(Table);
// Set up the jump table info.
JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
JumpTableHeader JTH(Clusters[First].Low->getValue(),
Clusters[Last].High->getValue(), SI->getCondition(),
nullptr, false);
JTCases.emplace_back(std::move(JTH), std::move(JT));
JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
JTCases.size() - 1, Weight);
return true;
}
void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
const SwitchInst *SI,
MachineBasicBlock *DefaultMBB) {
#ifndef NDEBUG
// Clusters must be non-empty, sorted, and only contain Range clusters.
assert(!Clusters.empty());
for (CaseCluster &C : Clusters)
assert(C.Kind == CC_Range);
for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
#endif
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!areJTsAllowed(TLI))
return;
const int64_t N = Clusters.size();
const unsigned MinJumpTableSize = TLI.getMinimumJumpTableEntries();
// TotalCases[i]: Total nbr of cases in Clusters[0..i].
SmallVector<unsigned, 8> TotalCases(N);
for (unsigned i = 0; i < N; ++i) {
APInt Hi = Clusters[i].High->getValue();
APInt Lo = Clusters[i].Low->getValue();
TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
if (i != 0)
TotalCases[i] += TotalCases[i - 1];
}
if (N >= MinJumpTableSize && isDense(Clusters, &TotalCases[0], 0, N - 1)) {
// Cheap case: the whole range might be suitable for jump table.
CaseCluster JTCluster;
if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
Clusters[0] = JTCluster;
Clusters.resize(1);
return;
}
}
// The algorithm below is not suitable for -O0.
if (TM.getOptLevel() == CodeGenOpt::None)
return;
// Split Clusters into minimum number of dense partitions. The algorithm uses
// the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
// for the Case Statement'" (1994), but builds the MinPartitions array in
// reverse order to make it easier to reconstruct the partitions in ascending
// order. In the choice between two optimal partitionings, it picks the one
// which yields more jump tables.
// MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
SmallVector<unsigned, 8> MinPartitions(N);
// LastElement[i] is the last element of the partition starting at i.
SmallVector<unsigned, 8> LastElement(N);
// NumTables[i]: nbr of >= MinJumpTableSize partitions from Clusters[i..N-1].
SmallVector<unsigned, 8> NumTables(N);
// Base case: There is only one way to partition Clusters[N-1].
MinPartitions[N - 1] = 1;
LastElement[N - 1] = N - 1;
assert(MinJumpTableSize > 1);
NumTables[N - 1] = 0;
// Note: loop indexes are signed to avoid underflow.
for (int64_t i = N - 2; i >= 0; i--) {
// Find optimal partitioning of Clusters[i..N-1].
// Baseline: Put Clusters[i] into a partition on its own.
MinPartitions[i] = MinPartitions[i + 1] + 1;
LastElement[i] = i;
NumTables[i] = NumTables[i + 1];
// Search for a solution that results in fewer partitions.
for (int64_t j = N - 1; j > i; j--) {
// Try building a partition from Clusters[i..j].
if (isDense(Clusters, &TotalCases[0], i, j)) {
unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
bool IsTable = j - i + 1 >= MinJumpTableSize;
unsigned Tables = IsTable + (j == N - 1 ? 0 : NumTables[j + 1]);
// If this j leads to fewer partitions, or same number of partitions
// with more lookup tables, it is a better partitioning.
if (NumPartitions < MinPartitions[i] ||
(NumPartitions == MinPartitions[i] && Tables > NumTables[i])) {
MinPartitions[i] = NumPartitions;
LastElement[i] = j;
NumTables[i] = Tables;
}
}
}
}
// Iterate over the partitions, replacing some with jump tables in-place.
unsigned DstIndex = 0;
for (unsigned First = 0, Last; First < N; First = Last + 1) {
Last = LastElement[First];
assert(Last >= First);
assert(DstIndex <= First);
unsigned NumClusters = Last - First + 1;
CaseCluster JTCluster;
if (NumClusters >= MinJumpTableSize &&
buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
Clusters[DstIndex++] = JTCluster;
} else {
for (unsigned I = First; I <= Last; ++I)
std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
}
}
Clusters.resize(DstIndex);
}
bool SelectionDAGBuilder::rangeFitsInWord(const APInt &Low, const APInt &High) {
// FIXME: Using the pointer type doesn't seem ideal.
uint64_t BW = DAG.getDataLayout().getPointerSizeInBits();
uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
return Range <= BW;
}
bool SelectionDAGBuilder::isSuitableForBitTests(unsigned NumDests,
unsigned NumCmps,
const APInt &Low,
const APInt &High) {
// FIXME: I don't think NumCmps is the correct metric: a single case and a
// range of cases both require only one branch to lower. Just looking at the
// number of clusters and destinations should be enough to decide whether to
// build bit tests.
// To lower a range with bit tests, the range must fit the bitwidth of a
// machine word.
if (!rangeFitsInWord(Low, High))
return false;
// Decide whether it's profitable to lower this range with bit tests. Each
// destination requires a bit test and branch, and there is an overall range
// check branch. For a small number of clusters, separate comparisons might be
// cheaper, and for many destinations, splitting the range might be better.
return (NumDests == 1 && NumCmps >= 3) ||
(NumDests == 2 && NumCmps >= 5) ||
(NumDests == 3 && NumCmps >= 6);
}
bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
unsigned First, unsigned Last,
const SwitchInst *SI,
CaseCluster &BTCluster) {
assert(First <= Last);
if (First == Last)
return false;
BitVector Dests(FuncInfo.MF->getNumBlockIDs());
unsigned NumCmps = 0;
for (int64_t I = First; I <= Last; ++I) {
assert(Clusters[I].Kind == CC_Range);
Dests.set(Clusters[I].MBB->getNumber());
NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
}
unsigned NumDests = Dests.count();
APInt Low = Clusters[First].Low->getValue();
APInt High = Clusters[Last].High->getValue();
assert(Low.slt(High));
if (!isSuitableForBitTests(NumDests, NumCmps, Low, High))
return false;
APInt LowBound;
APInt CmpRange;
const int BitWidth = DAG.getTargetLoweringInfo()
.getPointerTy(DAG.getDataLayout())
.getSizeInBits();
assert(rangeFitsInWord(Low, High) && "Case range must fit in bit mask!");
if (Low.isNonNegative() && High.slt(BitWidth)) {
// Optimize the case where all the case values fit in a
// word without having to subtract minValue. In this case,
// we can optimize away the subtraction.
LowBound = APInt::getNullValue(Low.getBitWidth());
CmpRange = High;
} else {
LowBound = Low;
CmpRange = High - Low;
}
CaseBitsVector CBV;
uint32_t TotalWeight = 0;
for (unsigned i = First; i <= Last; ++i) {
// Find the CaseBits for this destination.
unsigned j;
for (j = 0; j < CBV.size(); ++j)
if (CBV[j].BB == Clusters[i].MBB)
break;
if (j == CBV.size())
CBV.push_back(CaseBits(0, Clusters[i].MBB, 0, 0));
CaseBits *CB = &CBV[j];
// Update Mask, Bits and ExtraWeight.
uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
CB->Bits += Hi - Lo + 1;
CB->ExtraWeight += Clusters[i].Weight;
TotalWeight += Clusters[i].Weight;
assert(TotalWeight >= Clusters[i].Weight && "Weight overflow!");
}
BitTestInfo BTI;
std::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
// Sort by weight first, number of bits second.
if (a.ExtraWeight != b.ExtraWeight)
return a.ExtraWeight > b.ExtraWeight;
return a.Bits > b.Bits;
});
for (auto &CB : CBV) {
MachineBasicBlock *BitTestBB =
FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraWeight));
}
BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
SI->getCondition(), -1U, MVT::Other, false, nullptr,
nullptr, std::move(BTI));
BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
BitTestCases.size() - 1, TotalWeight);
return true;
}
void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
const SwitchInst *SI) {
// Partition Clusters into as few subsets as possible, where each subset has a
// range that fits in a machine word and has <= 3 unique destinations.
#ifndef NDEBUG
// Clusters must be sorted and contain Range or JumpTable clusters.
assert(!Clusters.empty());
assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
for (const CaseCluster &C : Clusters)
assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
for (unsigned i = 1; i < Clusters.size(); ++i)
assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
#endif
// The algorithm below is not suitable for -O0.
if (TM.getOptLevel() == CodeGenOpt::None)
return;
// If target does not have legal shift left, do not emit bit tests at all.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT PTy = TLI.getPointerTy(DAG.getDataLayout());
if (!TLI.isOperationLegal(ISD::SHL, PTy))
return;
int BitWidth = PTy.getSizeInBits();
const int64_t N = Clusters.size();
// MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
SmallVector<unsigned, 8> MinPartitions(N);
// LastElement[i] is the last element of the partition starting at i.
SmallVector<unsigned, 8> LastElement(N);
// FIXME: This might not be the best algorithm for finding bit test clusters.
// Base case: There is only one way to partition Clusters[N-1].
MinPartitions[N - 1] = 1;
LastElement[N - 1] = N - 1;
// Note: loop indexes are signed to avoid underflow.
for (int64_t i = N - 2; i >= 0; --i) {
// Find optimal partitioning of Clusters[i..N-1].
// Baseline: Put Clusters[i] into a partition on its own.
MinPartitions[i] = MinPartitions[i + 1] + 1;
LastElement[i] = i;
// Search for a solution that results in fewer partitions.
// Note: the search is limited by BitWidth, reducing time complexity.
for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
// Try building a partition from Clusters[i..j].
// Check the range.
if (!rangeFitsInWord(Clusters[i].Low->getValue(),
Clusters[j].High->getValue()))
continue;
// Check nbr of destinations and cluster types.
// FIXME: This works, but doesn't seem very efficient.
bool RangesOnly = true;
BitVector Dests(FuncInfo.MF->getNumBlockIDs());
for (int64_t k = i; k <= j; k++) {
if (Clusters[k].Kind != CC_Range) {
RangesOnly = false;
break;
}
Dests.set(Clusters[k].MBB->getNumber());
}
if (!RangesOnly || Dests.count() > 3)
break;
// Check if it's a better partition.
unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
if (NumPartitions < MinPartitions[i]) {
// Found a better partition.
MinPartitions[i] = NumPartitions;
LastElement[i] = j;
}
}
}
// Iterate over the partitions, replacing with bit-test clusters in-place.
unsigned DstIndex = 0;
for (unsigned First = 0, Last; First < N; First = Last + 1) {
Last = LastElement[First];
assert(First <= Last);
assert(DstIndex <= First);
CaseCluster BitTestCluster;
if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
Clusters[DstIndex++] = BitTestCluster;
} else {
size_t NumClusters = Last - First + 1;
std::memmove(&Clusters[DstIndex], &Clusters[First],
sizeof(Clusters[0]) * NumClusters);
DstIndex += NumClusters;
}
}
Clusters.resize(DstIndex);
}
void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
MachineBasicBlock *SwitchMBB,
MachineBasicBlock *DefaultMBB) {
MachineFunction *CurMF = FuncInfo.MF;
MachineBasicBlock *NextMBB = nullptr;
MachineFunction::iterator BBI = W.MBB;
if (++BBI != FuncInfo.MF->end())
NextMBB = BBI;
unsigned Size = W.LastCluster - W.FirstCluster + 1;
BranchProbabilityInfo *BPI = FuncInfo.BPI;
if (Size == 2 && W.MBB == SwitchMBB) {
// If any two of the cases has the same destination, and if one value
// is the same as the other, but has one bit unset that the other has set,
// use bit manipulation to do two compares at once. For example:
// "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
// TODO: This could be extended to merge any 2 cases in switches with 3
// cases.
// TODO: Handle cases where W.CaseBB != SwitchBB.
CaseCluster &Small = *W.FirstCluster;
CaseCluster &Big = *W.LastCluster;
if (Small.Low == Small.High && Big.Low == Big.High &&
Small.MBB == Big.MBB) {
const APInt &SmallValue = Small.Low->getValue();
const APInt &BigValue = Big.Low->getValue();
// Check that there is only one bit different.
APInt CommonBit = BigValue ^ SmallValue;
if (CommonBit.isPowerOf2()) {
SDValue CondLHS = getValue(Cond);
EVT VT = CondLHS.getValueType();
SDLoc DL = getCurSDLoc();
SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
DAG.getConstant(CommonBit, DL, VT));
SDValue Cond = DAG.getSetCC(
DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
ISD::SETEQ);
// Update successor info.
// Both Small and Big will jump to Small.BB, so we sum up the weights.
addSuccessorWithWeight(SwitchMBB, Small.MBB, Small.Weight + Big.Weight);
addSuccessorWithWeight(
SwitchMBB, DefaultMBB,
// The default destination is the first successor in IR.
BPI ? BPI->getEdgeWeight(SwitchMBB->getBasicBlock(), (unsigned)0)
: 0);
// Insert the true branch.
SDValue BrCond =
DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
DAG.getBasicBlock(Small.MBB));
// Insert the false branch.
BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
DAG.getBasicBlock(DefaultMBB));
DAG.setRoot(BrCond);
return;
}
}
}
if (TM.getOptLevel() != CodeGenOpt::None) {
// Order cases by weight so the most likely case will be checked first.
std::sort(W.FirstCluster, W.LastCluster + 1,
[](const CaseCluster &a, const CaseCluster &b) {
return a.Weight > b.Weight;
});
// Rearrange the case blocks so that the last one falls through if possible
// without without changing the order of weights.
for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
--I;
if (I->Weight > W.LastCluster->Weight)
break;
if (I->Kind == CC_Range && I->MBB == NextMBB) {
std::swap(*I, *W.LastCluster);
break;
}
}
}
// Compute total weight.
uint32_t UnhandledWeights = 0;
for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) {
UnhandledWeights += I->Weight;
assert(UnhandledWeights >= I->Weight && "Weight overflow!");
}
MachineBasicBlock *CurMBB = W.MBB;
for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
MachineBasicBlock *Fallthrough;
if (I == W.LastCluster) {
// For the last cluster, fall through to the default destination.
Fallthrough = DefaultMBB;
} else {
Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
CurMF->insert(BBI, Fallthrough);
// Put Cond in a virtual register to make it available from the new blocks.
ExportFromCurrentBlock(Cond);
}
switch (I->Kind) {
case CC_JumpTable: {
// FIXME: Optimize away range check based on pivot comparisons.
JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
JumpTable *JT = &JTCases[I->JTCasesIndex].second;
// The jump block hasn't been inserted yet; insert it here.
MachineBasicBlock *JumpMBB = JT->MBB;
CurMF->insert(BBI, JumpMBB);
addSuccessorWithWeight(CurMBB, Fallthrough);
addSuccessorWithWeight(CurMBB, JumpMBB);
// The jump table header will be inserted in our current block, do the
// range check, and fall through to our fallthrough block.
JTH->HeaderBB = CurMBB;
JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
// If we're in the right place, emit the jump table header right now.
if (CurMBB == SwitchMBB) {
visitJumpTableHeader(*JT, *JTH, SwitchMBB);
JTH->Emitted = true;
}
break;
}
case CC_BitTests: {
// FIXME: Optimize away range check based on pivot comparisons.
BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
// The bit test blocks haven't been inserted yet; insert them here.
for (BitTestCase &BTC : BTB->Cases)
CurMF->insert(BBI, BTC.ThisBB);
// Fill in fields of the BitTestBlock.
BTB->Parent = CurMBB;
BTB->Default = Fallthrough;
// If we're in the right place, emit the bit test header header right now.
if (CurMBB ==SwitchMBB) {
visitBitTestHeader(*BTB, SwitchMBB);
BTB->Emitted = true;
}
break;
}
case CC_Range: {
const Value *RHS, *LHS, *MHS;
ISD::CondCode CC;
if (I->Low == I->High) {
// Check Cond == I->Low.
CC = ISD::SETEQ;
LHS = Cond;
RHS=I->Low;
MHS = nullptr;
} else {
// Check I->Low <= Cond <= I->High.
CC = ISD::SETLE;
LHS = I->Low;
MHS = Cond;
RHS = I->High;
}
// The false weight is the sum of all unhandled cases.
UnhandledWeights -= I->Weight;
CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, I->Weight,
UnhandledWeights);
if (CurMBB == SwitchMBB)
visitSwitchCase(CB, SwitchMBB);
else
SwitchCases.push_back(CB);
break;
}
}
CurMBB = Fallthrough;
}
}
unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
CaseClusterIt First,
CaseClusterIt Last) {
return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
if (X.Weight != CC.Weight)
return X.Weight > CC.Weight;
// Ties are broken by comparing the case value.
return X.Low->getValue().slt(CC.Low->getValue());
});
}
void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
const SwitchWorkListItem &W,
Value *Cond,
MachineBasicBlock *SwitchMBB) {
assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
"Clusters not sorted?");
assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
// Balance the tree based on branch weights to create a near-optimal (in terms
// of search time given key frequency) binary search tree. See e.g. Kurt
// Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
CaseClusterIt LastLeft = W.FirstCluster;
CaseClusterIt FirstRight = W.LastCluster;
uint32_t LeftWeight = LastLeft->Weight;
uint32_t RightWeight = FirstRight->Weight;
// Move LastLeft and FirstRight towards each other from opposite directions to
// find a partitioning of the clusters which balances the weight on both
// sides. If LeftWeight and RightWeight are equal, alternate which side is
// taken to ensure 0-weight nodes are distributed evenly.
unsigned I = 0;
while (LastLeft + 1 < FirstRight) {
if (LeftWeight < RightWeight || (LeftWeight == RightWeight && (I & 1)))
LeftWeight += (++LastLeft)->Weight;
else
RightWeight += (--FirstRight)->Weight;
I++;
}
for (;;) {
// Our binary search tree differs from a typical BST in that ours can have up
// to three values in each leaf. The pivot selection above doesn't take that
// into account, which means the tree might require more nodes and be less
// efficient. We compensate for this here.
unsigned NumLeft = LastLeft - W.FirstCluster + 1;
unsigned NumRight = W.LastCluster - FirstRight + 1;
if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
// If one side has less than 3 clusters, and the other has more than 3,
// consider taking a cluster from the other side.
if (NumLeft < NumRight) {
// Consider moving the first cluster on the right to the left side.
CaseCluster &CC = *FirstRight;
unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
if (LeftSideRank <= RightSideRank) {
// Moving the cluster to the left does not demote it.
++LastLeft;
++FirstRight;
continue;
}
} else {
assert(NumRight < NumLeft);
// Consider moving the last element on the left to the right side.
CaseCluster &CC = *LastLeft;
unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
if (RightSideRank <= LeftSideRank) {
// Moving the cluster to the right does not demot it.
--LastLeft;
--FirstRight;
continue;
}
}
}
break;
}
assert(LastLeft + 1 == FirstRight);
assert(LastLeft >= W.FirstCluster);
assert(FirstRight <= W.LastCluster);
// Use the first element on the right as pivot since we will make less-than
// comparisons against it.
CaseClusterIt PivotCluster = FirstRight;
assert(PivotCluster > W.FirstCluster);
assert(PivotCluster <= W.LastCluster);
CaseClusterIt FirstLeft = W.FirstCluster;
CaseClusterIt LastRight = W.LastCluster;
const ConstantInt *Pivot = PivotCluster->Low;
// New blocks will be inserted immediately after the current one.
MachineFunction::iterator BBI = W.MBB;
++BBI;
// We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
// we can branch to its destination directly if it's squeezed exactly in
// between the known lower bound and Pivot - 1.
MachineBasicBlock *LeftMBB;
if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
FirstLeft->Low == W.GE &&
(FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
LeftMBB = FirstLeft->MBB;
} else {
LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
FuncInfo.MF->insert(BBI, LeftMBB);
WorkList.push_back({LeftMBB, FirstLeft, LastLeft, W.GE, Pivot});
// Put Cond in a virtual register to make it available from the new blocks.
ExportFromCurrentBlock(Cond);
}
// Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
// single cluster, RHS.Low == Pivot, and we can branch to its destination
// directly if RHS.High equals the current upper bound.
MachineBasicBlock *RightMBB;
if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
RightMBB = FirstRight->MBB;
} else {
RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
FuncInfo.MF->insert(BBI, RightMBB);
WorkList.push_back({RightMBB, FirstRight, LastRight, Pivot, W.LT});
// Put Cond in a virtual register to make it available from the new blocks.
ExportFromCurrentBlock(Cond);
}
// Create the CaseBlock record that will be used to lower the branch.
CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
LeftWeight, RightWeight);
if (W.MBB == SwitchMBB)
visitSwitchCase(CB, SwitchMBB);
else
SwitchCases.push_back(CB);
}
void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
// Extract cases from the switch.
BranchProbabilityInfo *BPI = FuncInfo.BPI;
CaseClusterVector Clusters;
Clusters.reserve(SI.getNumCases());
for (auto I : SI.cases()) {
MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
const ConstantInt *CaseVal = I.getCaseValue();
uint32_t Weight =
BPI ? BPI->getEdgeWeight(SI.getParent(), I.getSuccessorIndex()) : 0;
Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Weight));
}
MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
// Cluster adjacent cases with the same destination. We do this at all
// optimization levels because it's cheap to do and will make codegen faster
// if there are many clusters.
sortAndRangeify(Clusters);
if (TM.getOptLevel() != CodeGenOpt::None) {
// Replace an unreachable default with the most popular destination.
// FIXME: Exploit unreachable default more aggressively.
bool UnreachableDefault =
isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
if (UnreachableDefault && !Clusters.empty()) {
DenseMap<const BasicBlock *, unsigned> Popularity;
unsigned MaxPop = 0;
const BasicBlock *MaxBB = nullptr;
for (auto I : SI.cases()) {
const BasicBlock *BB = I.getCaseSuccessor();
if (++Popularity[BB] > MaxPop) {
MaxPop = Popularity[BB];
MaxBB = BB;
}
}
// Set new default.
assert(MaxPop > 0 && MaxBB);
DefaultMBB = FuncInfo.MBBMap[MaxBB];
// Remove cases that were pointing to the destination that is now the
// default.
CaseClusterVector New;
New.reserve(Clusters.size());
for (CaseCluster &CC : Clusters) {
if (CC.MBB != DefaultMBB)
New.push_back(CC);
}
Clusters = std::move(New);
}
}
// If there is only the default destination, jump there directly.
MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
if (Clusters.empty()) {
SwitchMBB->addSuccessor(DefaultMBB);
if (DefaultMBB != NextBlock(SwitchMBB)) {
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
}
return;
}
findJumpTables(Clusters, &SI, DefaultMBB);
findBitTestClusters(Clusters, &SI);
DEBUG({
dbgs() << "Case clusters: ";
for (const CaseCluster &C : Clusters) {
if (C.Kind == CC_JumpTable) dbgs() << "JT:";
if (C.Kind == CC_BitTests) dbgs() << "BT:";
C.Low->getValue().print(dbgs(), true);
if (C.Low != C.High) {
dbgs() << '-';
C.High->getValue().print(dbgs(), true);
}
dbgs() << ' ';
}
dbgs() << '\n';
});
assert(!Clusters.empty());
SwitchWorkList WorkList;
CaseClusterIt First = Clusters.begin();
CaseClusterIt Last = Clusters.end() - 1;
WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr});
while (!WorkList.empty()) {
SwitchWorkListItem W = WorkList.back();
WorkList.pop_back();
unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None) {
// For optimized builds, lower large range as a balanced binary tree.
splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
continue;
}
lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/StatepointLowering.h | //===-- StatepointLowering.h - SDAGBuilder's statepoint code -*- C++ -*---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file includes support code use by SelectionDAGBuilder when lowering a
// statepoint sequence in SelectionDAG IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_STATEPOINTLOWERING_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_STATEPOINTLOWERING_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include <vector>
namespace llvm {
class SelectionDAGBuilder;
/// This class tracks both per-statepoint and per-selectiondag information.
/// For each statepoint it tracks locations of it's gc valuess (incoming and
/// relocated) and list of gcreloc calls scheduled for visiting (this is
/// used for a debug mode consistency check only). The spill slot tracking
/// works in concert with information in FunctionLoweringInfo.
class StatepointLoweringState {
public:
StatepointLoweringState() : NextSlotToAllocate(0) {}
/// Reset all state tracking for a newly encountered safepoint. Also
/// performs some consistency checking.
void startNewStatepoint(SelectionDAGBuilder &Builder);
/// Clear the memory usage of this object. This is called from
/// SelectionDAGBuilder::clear. We require this is never called in the
/// midst of processing a statepoint sequence.
void clear();
/// Returns the spill location of a value incoming to the current
/// statepoint. Will return SDValue() if this value hasn't been
/// spilled. Otherwise, the value has already been spilled and no
/// further action is required by the caller.
SDValue getLocation(SDValue val) {
if (!Locations.count(val))
return SDValue();
return Locations[val];
}
void setLocation(SDValue val, SDValue Location) {
assert(!Locations.count(val) &&
"Trying to allocate already allocated location");
Locations[val] = Location;
}
/// Record the fact that we expect to encounter a given gc_relocate
/// before the next statepoint. If we don't see it, we'll report
/// an assertion.
void scheduleRelocCall(const CallInst &RelocCall) {
PendingGCRelocateCalls.push_back(&RelocCall);
}
/// Remove this gc_relocate from the list we're expecting to see
/// before the next statepoint. If we weren't expecting to see
/// it, we'll report an assertion.
void relocCallVisited(const CallInst &RelocCall) {
SmallVectorImpl<const CallInst *>::iterator itr =
std::find(PendingGCRelocateCalls.begin(), PendingGCRelocateCalls.end(),
&RelocCall);
assert(itr != PendingGCRelocateCalls.end() &&
"Visited unexpected gcrelocate call");
PendingGCRelocateCalls.erase(itr);
}
// TODO: Should add consistency tracking to ensure we encounter
// expected gc_result calls too.
/// Get a stack slot we can use to store an value of type ValueType. This
/// will hopefully be a recylced slot from another statepoint.
SDValue allocateStackSlot(EVT ValueType, SelectionDAGBuilder &Builder);
void reserveStackSlot(int Offset) {
assert(Offset >= 0 && Offset < (int)AllocatedStackSlots.size() &&
"out of bounds");
assert(!AllocatedStackSlots[Offset] && "already reserved!");
assert(NextSlotToAllocate <= (unsigned)Offset && "consistency!");
AllocatedStackSlots[Offset] = true;
}
bool isStackSlotAllocated(int Offset) {
assert(Offset >= 0 && Offset < (int)AllocatedStackSlots.size() &&
"out of bounds");
return AllocatedStackSlots[Offset];
}
private:
/// Maps pre-relocation value (gc pointer directly incoming into statepoint)
/// into it's location (currently only stack slots)
DenseMap<SDValue, SDValue> Locations;
/// A boolean indicator for each slot listed in the FunctionInfo as to
/// whether it has been used in the current statepoint. Since we try to
/// preserve stack slots across safepoints, there can be gaps in which
/// slots have been allocated.
SmallVector<bool, 50> AllocatedStackSlots;
/// Points just beyond the last slot known to have been allocated
unsigned NextSlotToAllocate;
/// Keep track of pending gcrelocate calls for consistency check
SmallVector<const CallInst *, 10> PendingGCRelocateCalls;
};
} // end namespace llvm
#endif // LLVM_LIB_CODEGEN_SELECTIONDAG_STATEPOINTLOWERING_H
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LLVMBuild.txt | ;===- ./lib/CodeGen/SelectionDAG/LLVMBuild.txt -----------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = Library
name = SelectionDAG
parent = CodeGen
required_libraries = Analysis CodeGen Core Support Target TransformUtils
; MC - HLSL Change
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp | //===-- TargetSelectionDAGInfo.cpp - SelectionDAG Info --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the TargetSelectionDAGInfo class.
//
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp | //===-- LegalizeTypes.cpp - Common code for DAG type legalizer ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SelectionDAG::LegalizeTypes method. It transforms
// an arbitrary well-formed SelectionDAG to only consist of legal types. This
// is common code shared among the LegalizeTypes*.cpp files.
//
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "legalize-types"
static cl::opt<bool>
EnableExpensiveChecks("enable-legalize-types-checking", cl::Hidden);
/// PerformExpensiveChecks - Do extensive, expensive, sanity checking.
void DAGTypeLegalizer::PerformExpensiveChecks() {
// If a node is not processed, then none of its values should be mapped by any
// of PromotedIntegers, ExpandedIntegers, ..., ReplacedValues.
// If a node is processed, then each value with an illegal type must be mapped
// by exactly one of PromotedIntegers, ExpandedIntegers, ..., ReplacedValues.
// Values with a legal type may be mapped by ReplacedValues, but not by any of
// the other maps.
// Note that these invariants may not hold momentarily when processing a node:
// the node being processed may be put in a map before being marked Processed.
// Note that it is possible to have nodes marked NewNode in the DAG. This can
// occur in two ways. Firstly, a node may be created during legalization but
// never passed to the legalization core. This is usually due to the implicit
// folding that occurs when using the DAG.getNode operators. Secondly, a new
// node may be passed to the legalization core, but when analyzed may morph
// into a different node, leaving the original node as a NewNode in the DAG.
// A node may morph if one of its operands changes during analysis. Whether
// it actually morphs or not depends on whether, after updating its operands,
// it is equivalent to an existing node: if so, it morphs into that existing
// node (CSE). An operand can change during analysis if the operand is a new
// node that morphs, or it is a processed value that was mapped to some other
// value (as recorded in ReplacedValues) in which case the operand is turned
// into that other value. If a node morphs then the node it morphed into will
// be used instead of it for legalization, however the original node continues
// to live on in the DAG.
// The conclusion is that though there may be nodes marked NewNode in the DAG,
// all uses of such nodes are also marked NewNode: the result is a fungus of
// NewNodes growing on top of the useful nodes, and perhaps using them, but
// not used by them.
// If a value is mapped by ReplacedValues, then it must have no uses, except
// by nodes marked NewNode (see above).
// The final node obtained by mapping by ReplacedValues is not marked NewNode.
// Note that ReplacedValues should be applied iteratively.
// Note that the ReplacedValues map may also map deleted nodes (by iterating
// over the DAG we never dereference deleted nodes). This means that it may
// also map nodes marked NewNode if the deallocated memory was reallocated as
// another node, and that new node was not seen by the LegalizeTypes machinery
// (for example because it was created but not used). In general, we cannot
// distinguish between new nodes and deleted nodes.
SmallVector<SDNode*, 16> NewNodes;
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = DAG.allnodes_end(); I != E; ++I) {
// Remember nodes marked NewNode - they are subject to extra checking below.
if (I->getNodeId() == NewNode)
NewNodes.push_back(I);
for (unsigned i = 0, e = I->getNumValues(); i != e; ++i) {
SDValue Res(I, i);
bool Failed = false;
unsigned Mapped = 0;
if (ReplacedValues.find(Res) != ReplacedValues.end()) {
Mapped |= 1;
// Check that remapped values are only used by nodes marked NewNode.
for (SDNode::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI)
if (UI.getUse().getResNo() == i)
assert(UI->getNodeId() == NewNode &&
"Remapped value has non-trivial use!");
// Check that the final result of applying ReplacedValues is not
// marked NewNode.
SDValue NewVal = ReplacedValues[Res];
DenseMap<SDValue, SDValue>::iterator I = ReplacedValues.find(NewVal);
while (I != ReplacedValues.end()) {
NewVal = I->second;
I = ReplacedValues.find(NewVal);
}
assert(NewVal.getNode()->getNodeId() != NewNode &&
"ReplacedValues maps to a new node!");
}
if (PromotedIntegers.find(Res) != PromotedIntegers.end())
Mapped |= 2;
if (SoftenedFloats.find(Res) != SoftenedFloats.end())
Mapped |= 4;
if (ScalarizedVectors.find(Res) != ScalarizedVectors.end())
Mapped |= 8;
if (ExpandedIntegers.find(Res) != ExpandedIntegers.end())
Mapped |= 16;
if (ExpandedFloats.find(Res) != ExpandedFloats.end())
Mapped |= 32;
if (SplitVectors.find(Res) != SplitVectors.end())
Mapped |= 64;
if (WidenedVectors.find(Res) != WidenedVectors.end())
Mapped |= 128;
if (I->getNodeId() != Processed) {
// Since we allow ReplacedValues to map deleted nodes, it may map nodes
// marked NewNode too, since a deleted node may have been reallocated as
// another node that has not been seen by the LegalizeTypes machinery.
if ((I->getNodeId() == NewNode && Mapped > 1) ||
(I->getNodeId() != NewNode && Mapped != 0)) {
dbgs() << "Unprocessed value in a map!";
Failed = true;
}
} else if (isTypeLegal(Res.getValueType()) || IgnoreNodeResults(I)) {
if (Mapped > 1) {
dbgs() << "Value with legal type was transformed!";
Failed = true;
}
} else {
if (Mapped == 0) {
dbgs() << "Processed value not in any map!";
Failed = true;
} else if (Mapped & (Mapped - 1)) {
dbgs() << "Value in multiple maps!";
Failed = true;
}
}
if (Failed) {
if (Mapped & 1)
dbgs() << " ReplacedValues";
if (Mapped & 2)
dbgs() << " PromotedIntegers";
if (Mapped & 4)
dbgs() << " SoftenedFloats";
if (Mapped & 8)
dbgs() << " ScalarizedVectors";
if (Mapped & 16)
dbgs() << " ExpandedIntegers";
if (Mapped & 32)
dbgs() << " ExpandedFloats";
if (Mapped & 64)
dbgs() << " SplitVectors";
if (Mapped & 128)
dbgs() << " WidenedVectors";
dbgs() << "\n";
llvm_unreachable(nullptr);
}
}
}
// Checked that NewNodes are only used by other NewNodes.
for (unsigned i = 0, e = NewNodes.size(); i != e; ++i) {
SDNode *N = NewNodes[i];
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
UI != UE; ++UI)
assert(UI->getNodeId() == NewNode && "NewNode used by non-NewNode!");
}
}
/// run - This is the main entry point for the type legalizer. This does a
/// top-down traversal of the dag, legalizing types as it goes. Returns "true"
/// if it made any changes.
bool DAGTypeLegalizer::run() {
bool Changed = false;
// Create a dummy node (which is not added to allnodes), that adds a reference
// to the root node, preventing it from being deleted, and tracking any
// changes of the root.
HandleSDNode Dummy(DAG.getRoot());
Dummy.setNodeId(Unanalyzed);
// The root of the dag may dangle to deleted nodes until the type legalizer is
// done. Set it to null to avoid confusion.
DAG.setRoot(SDValue());
// Walk all nodes in the graph, assigning them a NodeId of 'ReadyToProcess'
// (and remembering them) if they are leaves and assigning 'Unanalyzed' if
// non-leaves.
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = DAG.allnodes_end(); I != E; ++I) {
if (I->getNumOperands() == 0) {
I->setNodeId(ReadyToProcess);
Worklist.push_back(I);
} else {
I->setNodeId(Unanalyzed);
}
}
// Now that we have a set of nodes to process, handle them all.
while (!Worklist.empty()) {
#ifndef XDEBUG
if (EnableExpensiveChecks)
#endif
PerformExpensiveChecks();
SDNode *N = Worklist.back();
Worklist.pop_back();
assert(N->getNodeId() == ReadyToProcess &&
"Node should be ready if on worklist!");
if (IgnoreNodeResults(N))
goto ScanOperands;
// Scan the values produced by the node, checking to see if any result
// types are illegal.
for (unsigned i = 0, NumResults = N->getNumValues(); i < NumResults; ++i) {
EVT ResultVT = N->getValueType(i);
switch (getTypeAction(ResultVT)) {
case TargetLowering::TypeLegal:
break;
// The following calls must take care of *all* of the node's results,
// not just the illegal result they were passed (this includes results
// with a legal type). Results can be remapped using ReplaceValueWith,
// or their promoted/expanded/etc values registered in PromotedIntegers,
// ExpandedIntegers etc.
case TargetLowering::TypePromoteInteger:
PromoteIntegerResult(N, i);
Changed = true;
goto NodeDone;
case TargetLowering::TypeExpandInteger:
ExpandIntegerResult(N, i);
Changed = true;
goto NodeDone;
case TargetLowering::TypeSoftenFloat:
SoftenFloatResult(N, i);
Changed = true;
goto NodeDone;
case TargetLowering::TypeExpandFloat:
ExpandFloatResult(N, i);
Changed = true;
goto NodeDone;
case TargetLowering::TypeScalarizeVector:
ScalarizeVectorResult(N, i);
Changed = true;
goto NodeDone;
case TargetLowering::TypeSplitVector:
SplitVectorResult(N, i);
Changed = true;
goto NodeDone;
case TargetLowering::TypeWidenVector:
WidenVectorResult(N, i);
Changed = true;
goto NodeDone;
case TargetLowering::TypePromoteFloat:
PromoteFloatResult(N, i);
Changed = true;
goto NodeDone;
}
}
ScanOperands:
// Scan the operand list for the node, handling any nodes with operands that
// are illegal.
{
unsigned NumOperands = N->getNumOperands();
bool NeedsReanalyzing = false;
unsigned i;
for (i = 0; i != NumOperands; ++i) {
if (IgnoreNodeResults(N->getOperand(i).getNode()))
continue;
EVT OpVT = N->getOperand(i).getValueType();
switch (getTypeAction(OpVT)) {
case TargetLowering::TypeLegal:
continue;
// The following calls must either replace all of the node's results
// using ReplaceValueWith, and return "false"; or update the node's
// operands in place, and return "true".
case TargetLowering::TypePromoteInteger:
NeedsReanalyzing = PromoteIntegerOperand(N, i);
Changed = true;
break;
case TargetLowering::TypeExpandInteger:
NeedsReanalyzing = ExpandIntegerOperand(N, i);
Changed = true;
break;
case TargetLowering::TypeSoftenFloat:
NeedsReanalyzing = SoftenFloatOperand(N, i);
Changed = true;
break;
case TargetLowering::TypeExpandFloat:
NeedsReanalyzing = ExpandFloatOperand(N, i);
Changed = true;
break;
case TargetLowering::TypeScalarizeVector:
NeedsReanalyzing = ScalarizeVectorOperand(N, i);
Changed = true;
break;
case TargetLowering::TypeSplitVector:
NeedsReanalyzing = SplitVectorOperand(N, i);
Changed = true;
break;
case TargetLowering::TypeWidenVector:
NeedsReanalyzing = WidenVectorOperand(N, i);
Changed = true;
break;
case TargetLowering::TypePromoteFloat:
NeedsReanalyzing = PromoteFloatOperand(N, i);
Changed = true;
break;
}
break;
}
// The sub-method updated N in place. Check to see if any operands are new,
// and if so, mark them. If the node needs revisiting, don't add all users
// to the worklist etc.
if (NeedsReanalyzing) {
assert(N->getNodeId() == ReadyToProcess && "Node ID recalculated?");
N->setNodeId(NewNode);
// Recompute the NodeId and correct processed operands, adding the node to
// the worklist if ready.
SDNode *M = AnalyzeNewNode(N);
if (M == N)
// The node didn't morph - nothing special to do, it will be revisited.
continue;
// The node morphed - this is equivalent to legalizing by replacing every
// value of N with the corresponding value of M. So do that now.
assert(N->getNumValues() == M->getNumValues() &&
"Node morphing changed the number of results!");
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
// Replacing the value takes care of remapping the new value.
ReplaceValueWith(SDValue(N, i), SDValue(M, i));
assert(N->getNodeId() == NewNode && "Unexpected node state!");
// The node continues to live on as part of the NewNode fungus that
// grows on top of the useful nodes. Nothing more needs to be done
// with it - move on to the next node.
continue;
}
if (i == NumOperands) {
DEBUG(dbgs() << "Legally typed node: "; N->dump(&DAG); dbgs() << "\n");
}
}
NodeDone:
// If we reach here, the node was processed, potentially creating new nodes.
// Mark it as processed and add its users to the worklist as appropriate.
assert(N->getNodeId() == ReadyToProcess && "Node ID recalculated?");
N->setNodeId(Processed);
for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
UI != E; ++UI) {
SDNode *User = *UI;
int NodeId = User->getNodeId();
// This node has two options: it can either be a new node or its Node ID
// may be a count of the number of operands it has that are not ready.
if (NodeId > 0) {
User->setNodeId(NodeId-1);
// If this was the last use it was waiting on, add it to the ready list.
if (NodeId-1 == ReadyToProcess)
Worklist.push_back(User);
continue;
}
// If this is an unreachable new node, then ignore it. If it ever becomes
// reachable by being used by a newly created node then it will be handled
// by AnalyzeNewNode.
if (NodeId == NewNode)
continue;
// Otherwise, this node is new: this is the first operand of it that
// became ready. Its new NodeId is the number of operands it has minus 1
// (as this node is now processed).
assert(NodeId == Unanalyzed && "Unknown node ID!");
User->setNodeId(User->getNumOperands() - 1);
// If the node only has a single operand, it is now ready.
if (User->getNumOperands() == 1)
Worklist.push_back(User);
}
}
#ifndef XDEBUG
if (EnableExpensiveChecks)
#endif
PerformExpensiveChecks();
// If the root changed (e.g. it was a dead load) update the root.
DAG.setRoot(Dummy.getValue());
// Remove dead nodes. This is important to do for cleanliness but also before
// the checking loop below. Implicit folding by the DAG.getNode operators and
// node morphing can cause unreachable nodes to be around with their flags set
// to new.
DAG.RemoveDeadNodes();
// In a debug build, scan all the nodes to make sure we found them all. This
// ensures that there are no cycles and that everything got processed.
#ifndef NDEBUG
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = DAG.allnodes_end(); I != E; ++I) {
bool Failed = false;
// Check that all result types are legal.
if (!IgnoreNodeResults(I))
for (unsigned i = 0, NumVals = I->getNumValues(); i < NumVals; ++i)
if (!isTypeLegal(I->getValueType(i))) {
dbgs() << "Result type " << i << " illegal!\n";
Failed = true;
}
// Check that all operand types are legal.
for (unsigned i = 0, NumOps = I->getNumOperands(); i < NumOps; ++i)
if (!IgnoreNodeResults(I->getOperand(i).getNode()) &&
!isTypeLegal(I->getOperand(i).getValueType())) {
dbgs() << "Operand type " << i << " illegal!\n";
Failed = true;
}
if (I->getNodeId() != Processed) {
if (I->getNodeId() == NewNode)
dbgs() << "New node not analyzed?\n";
else if (I->getNodeId() == Unanalyzed)
dbgs() << "Unanalyzed node not noticed?\n";
else if (I->getNodeId() > 0)
dbgs() << "Operand not processed?\n";
else if (I->getNodeId() == ReadyToProcess)
dbgs() << "Not added to worklist?\n";
Failed = true;
}
if (Failed) {
I->dump(&DAG); dbgs() << "\n";
llvm_unreachable(nullptr);
}
}
#endif
return Changed;
}
/// AnalyzeNewNode - The specified node is the root of a subtree of potentially
/// new nodes. Correct any processed operands (this may change the node) and
/// calculate the NodeId. If the node itself changes to a processed node, it
/// is not remapped - the caller needs to take care of this.
/// Returns the potentially changed node.
SDNode *DAGTypeLegalizer::AnalyzeNewNode(SDNode *N) {
// If this was an existing node that is already done, we're done.
if (N->getNodeId() != NewNode && N->getNodeId() != Unanalyzed)
return N;
// Remove any stale map entries.
ExpungeNode(N);
// Okay, we know that this node is new. Recursively walk all of its operands
// to see if they are new also. The depth of this walk is bounded by the size
// of the new tree that was constructed (usually 2-3 nodes), so we don't worry
// about revisiting of nodes.
//
// As we walk the operands, keep track of the number of nodes that are
// processed. If non-zero, this will become the new nodeid of this node.
// Operands may morph when they are analyzed. If so, the node will be
// updated after all operands have been analyzed. Since this is rare,
// the code tries to minimize overhead in the non-morphing case.
SmallVector<SDValue, 8> NewOps;
unsigned NumProcessed = 0;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDValue OrigOp = N->getOperand(i);
SDValue Op = OrigOp;
AnalyzeNewValue(Op); // Op may morph.
if (Op.getNode()->getNodeId() == Processed)
++NumProcessed;
if (!NewOps.empty()) {
// Some previous operand changed. Add this one to the list.
NewOps.push_back(Op);
} else if (Op != OrigOp) {
// This is the first operand to change - add all operands so far.
NewOps.append(N->op_begin(), N->op_begin() + i);
NewOps.push_back(Op);
}
}
// Some operands changed - update the node.
if (!NewOps.empty()) {
SDNode *M = DAG.UpdateNodeOperands(N, NewOps);
if (M != N) {
// The node morphed into a different node. Normally for this to happen
// the original node would have to be marked NewNode. However this can
// in theory momentarily not be the case while ReplaceValueWith is doing
// its stuff. Mark the original node NewNode to help sanity checking.
N->setNodeId(NewNode);
if (M->getNodeId() != NewNode && M->getNodeId() != Unanalyzed)
// It morphed into a previously analyzed node - nothing more to do.
return M;
// It morphed into a different new node. Do the equivalent of passing
// it to AnalyzeNewNode: expunge it and calculate the NodeId. No need
// to remap the operands, since they are the same as the operands we
// remapped above.
N = M;
ExpungeNode(N);
}
}
// Calculate the NodeId.
N->setNodeId(N->getNumOperands() - NumProcessed);
if (N->getNodeId() == ReadyToProcess)
Worklist.push_back(N);
return N;
}
/// AnalyzeNewValue - Call AnalyzeNewNode, updating the node in Val if needed.
/// If the node changes to a processed node, then remap it.
void DAGTypeLegalizer::AnalyzeNewValue(SDValue &Val) {
Val.setNode(AnalyzeNewNode(Val.getNode()));
if (Val.getNode()->getNodeId() == Processed)
// We were passed a processed node, or it morphed into one - remap it.
RemapValue(Val);
}
/// ExpungeNode - If N has a bogus mapping in ReplacedValues, eliminate it.
/// This can occur when a node is deleted then reallocated as a new node -
/// the mapping in ReplacedValues applies to the deleted node, not the new
/// one.
/// The only map that can have a deleted node as a source is ReplacedValues.
/// Other maps can have deleted nodes as targets, but since their looked-up
/// values are always immediately remapped using RemapValue, resulting in a
/// not-deleted node, this is harmless as long as ReplacedValues/RemapValue
/// always performs correct mappings. In order to keep the mapping correct,
/// ExpungeNode should be called on any new nodes *before* adding them as
/// either source or target to ReplacedValues (which typically means calling
/// Expunge when a new node is first seen, since it may no longer be marked
/// NewNode by the time it is added to ReplacedValues).
void DAGTypeLegalizer::ExpungeNode(SDNode *N) {
if (N->getNodeId() != NewNode)
return;
// If N is not remapped by ReplacedValues then there is nothing to do.
unsigned i, e;
for (i = 0, e = N->getNumValues(); i != e; ++i)
if (ReplacedValues.find(SDValue(N, i)) != ReplacedValues.end())
break;
if (i == e)
return;
// Remove N from all maps - this is expensive but rare.
for (DenseMap<SDValue, SDValue>::iterator I = PromotedIntegers.begin(),
E = PromotedIntegers.end(); I != E; ++I) {
assert(I->first.getNode() != N);
RemapValue(I->second);
}
for (DenseMap<SDValue, SDValue>::iterator I = SoftenedFloats.begin(),
E = SoftenedFloats.end(); I != E; ++I) {
assert(I->first.getNode() != N);
RemapValue(I->second);
}
for (DenseMap<SDValue, SDValue>::iterator I = ScalarizedVectors.begin(),
E = ScalarizedVectors.end(); I != E; ++I) {
assert(I->first.getNode() != N);
RemapValue(I->second);
}
for (DenseMap<SDValue, SDValue>::iterator I = WidenedVectors.begin(),
E = WidenedVectors.end(); I != E; ++I) {
assert(I->first.getNode() != N);
RemapValue(I->second);
}
for (DenseMap<SDValue, std::pair<SDValue, SDValue> >::iterator
I = ExpandedIntegers.begin(), E = ExpandedIntegers.end(); I != E; ++I){
assert(I->first.getNode() != N);
RemapValue(I->second.first);
RemapValue(I->second.second);
}
for (DenseMap<SDValue, std::pair<SDValue, SDValue> >::iterator
I = ExpandedFloats.begin(), E = ExpandedFloats.end(); I != E; ++I) {
assert(I->first.getNode() != N);
RemapValue(I->second.first);
RemapValue(I->second.second);
}
for (DenseMap<SDValue, std::pair<SDValue, SDValue> >::iterator
I = SplitVectors.begin(), E = SplitVectors.end(); I != E; ++I) {
assert(I->first.getNode() != N);
RemapValue(I->second.first);
RemapValue(I->second.second);
}
for (DenseMap<SDValue, SDValue>::iterator I = ReplacedValues.begin(),
E = ReplacedValues.end(); I != E; ++I)
RemapValue(I->second);
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
ReplacedValues.erase(SDValue(N, i));
}
/// RemapValue - If the specified value was already legalized to another value,
/// replace it by that value.
void DAGTypeLegalizer::RemapValue(SDValue &N) {
DenseMap<SDValue, SDValue>::iterator I = ReplacedValues.find(N);
if (I != ReplacedValues.end()) {
// Use path compression to speed up future lookups if values get multiply
// replaced with other values.
RemapValue(I->second);
N = I->second;
// Note that it is possible to have N.getNode()->getNodeId() == NewNode at
// this point because it is possible for a node to be put in the map before
// being processed.
}
}
namespace {
/// NodeUpdateListener - This class is a DAGUpdateListener that listens for
/// updates to nodes and recomputes their ready state.
class NodeUpdateListener : public SelectionDAG::DAGUpdateListener {
DAGTypeLegalizer &DTL;
SmallSetVector<SDNode*, 16> &NodesToAnalyze;
public:
explicit NodeUpdateListener(DAGTypeLegalizer &dtl,
SmallSetVector<SDNode*, 16> &nta)
: SelectionDAG::DAGUpdateListener(dtl.getDAG()),
DTL(dtl), NodesToAnalyze(nta) {}
void NodeDeleted(SDNode *N, SDNode *E) override {
assert(N->getNodeId() != DAGTypeLegalizer::ReadyToProcess &&
N->getNodeId() != DAGTypeLegalizer::Processed &&
"Invalid node ID for RAUW deletion!");
// It is possible, though rare, for the deleted node N to occur as a
// target in a map, so note the replacement N -> E in ReplacedValues.
assert(E && "Node not replaced?");
DTL.NoteDeletion(N, E);
// In theory the deleted node could also have been scheduled for analysis.
// So remove it from the set of nodes which will be analyzed.
NodesToAnalyze.remove(N);
// In general nothing needs to be done for E, since it didn't change but
// only gained new uses. However N -> E was just added to ReplacedValues,
// and the result of a ReplacedValues mapping is not allowed to be marked
// NewNode. So if E is marked NewNode, then it needs to be analyzed.
if (E->getNodeId() == DAGTypeLegalizer::NewNode)
NodesToAnalyze.insert(E);
}
void NodeUpdated(SDNode *N) override {
// Node updates can mean pretty much anything. It is possible that an
// operand was set to something already processed (f.e.) in which case
// this node could become ready. Recompute its flags.
assert(N->getNodeId() != DAGTypeLegalizer::ReadyToProcess &&
N->getNodeId() != DAGTypeLegalizer::Processed &&
"Invalid node ID for RAUW deletion!");
N->setNodeId(DAGTypeLegalizer::NewNode);
NodesToAnalyze.insert(N);
}
};
}
/// ReplaceValueWith - The specified value was legalized to the specified other
/// value. Update the DAG and NodeIds replacing any uses of From to use To
/// instead.
void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
assert(From.getNode() != To.getNode() && "Potential legalization loop!");
// If expansion produced new nodes, make sure they are properly marked.
ExpungeNode(From.getNode());
AnalyzeNewValue(To); // Expunges To.
// Anything that used the old node should now use the new one. Note that this
// can potentially cause recursive merging.
SmallSetVector<SDNode*, 16> NodesToAnalyze;
NodeUpdateListener NUL(*this, NodesToAnalyze);
do {
DAG.ReplaceAllUsesOfValueWith(From, To);
// The old node may still be present in a map like ExpandedIntegers or
// PromotedIntegers. Inform maps about the replacement.
ReplacedValues[From] = To;
// Process the list of nodes that need to be reanalyzed.
while (!NodesToAnalyze.empty()) {
SDNode *N = NodesToAnalyze.back();
NodesToAnalyze.pop_back();
if (N->getNodeId() != DAGTypeLegalizer::NewNode)
// The node was analyzed while reanalyzing an earlier node - it is safe
// to skip. Note that this is not a morphing node - otherwise it would
// still be marked NewNode.
continue;
// Analyze the node's operands and recalculate the node ID.
SDNode *M = AnalyzeNewNode(N);
if (M != N) {
// The node morphed into a different node. Make everyone use the new
// node instead.
assert(M->getNodeId() != NewNode && "Analysis resulted in NewNode!");
assert(N->getNumValues() == M->getNumValues() &&
"Node morphing changed the number of results!");
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
SDValue OldVal(N, i);
SDValue NewVal(M, i);
if (M->getNodeId() == Processed)
RemapValue(NewVal);
DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal);
// OldVal may be a target of the ReplacedValues map which was marked
// NewNode to force reanalysis because it was updated. Ensure that
// anything that ReplacedValues mapped to OldVal will now be mapped
// all the way to NewVal.
ReplacedValues[OldVal] = NewVal;
}
// The original node continues to exist in the DAG, marked NewNode.
}
}
// When recursively update nodes with new nodes, it is possible to have
// new uses of From due to CSE. If this happens, replace the new uses of
// From with To.
} while (!From.use_empty());
}
void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
assert(Result.getValueType() ==
TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
"Invalid type for promoted integer");
AnalyzeNewValue(Result);
SDValue &OpEntry = PromotedIntegers[Op];
assert(!OpEntry.getNode() && "Node is already promoted!");
OpEntry = Result;
}
void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) {
assert(Result.getValueType() ==
TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
"Invalid type for softened float");
AnalyzeNewValue(Result);
SDValue &OpEntry = SoftenedFloats[Op];
assert(!OpEntry.getNode() && "Node is already converted to integer!");
OpEntry = Result;
}
void DAGTypeLegalizer::SetPromotedFloat(SDValue Op, SDValue Result) {
assert(Result.getValueType() ==
TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
"Invalid type for promoted float");
AnalyzeNewValue(Result);
SDValue &OpEntry = PromotedFloats[Op];
assert(!OpEntry.getNode() && "Node is already promoted!");
OpEntry = Result;
}
void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) {
// Note that in some cases vector operation operands may be greater than
// the vector element type. For example BUILD_VECTOR of type <1 x i1> with
// a constant i8 operand.
assert(Result.getValueType().getSizeInBits() >=
Op.getValueType().getVectorElementType().getSizeInBits() &&
"Invalid type for scalarized vector");
AnalyzeNewValue(Result);
SDValue &OpEntry = ScalarizedVectors[Op];
assert(!OpEntry.getNode() && "Node is already scalarized!");
OpEntry = Result;
}
void DAGTypeLegalizer::GetExpandedInteger(SDValue Op, SDValue &Lo,
SDValue &Hi) {
std::pair<SDValue, SDValue> &Entry = ExpandedIntegers[Op];
RemapValue(Entry.first);
RemapValue(Entry.second);
assert(Entry.first.getNode() && "Operand isn't expanded");
Lo = Entry.first;
Hi = Entry.second;
}
void DAGTypeLegalizer::SetExpandedInteger(SDValue Op, SDValue Lo,
SDValue Hi) {
assert(Lo.getValueType() ==
TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
Hi.getValueType() == Lo.getValueType() &&
"Invalid type for expanded integer");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
AnalyzeNewValue(Lo);
AnalyzeNewValue(Hi);
// Remember that this is the result of the node.
std::pair<SDValue, SDValue> &Entry = ExpandedIntegers[Op];
assert(!Entry.first.getNode() && "Node already expanded");
Entry.first = Lo;
Entry.second = Hi;
}
void DAGTypeLegalizer::GetExpandedFloat(SDValue Op, SDValue &Lo,
SDValue &Hi) {
std::pair<SDValue, SDValue> &Entry = ExpandedFloats[Op];
RemapValue(Entry.first);
RemapValue(Entry.second);
assert(Entry.first.getNode() && "Operand isn't expanded");
Lo = Entry.first;
Hi = Entry.second;
}
void DAGTypeLegalizer::SetExpandedFloat(SDValue Op, SDValue Lo,
SDValue Hi) {
assert(Lo.getValueType() ==
TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
Hi.getValueType() == Lo.getValueType() &&
"Invalid type for expanded float");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
AnalyzeNewValue(Lo);
AnalyzeNewValue(Hi);
// Remember that this is the result of the node.
std::pair<SDValue, SDValue> &Entry = ExpandedFloats[Op];
assert(!Entry.first.getNode() && "Node already expanded");
Entry.first = Lo;
Entry.second = Hi;
}
void DAGTypeLegalizer::GetSplitVector(SDValue Op, SDValue &Lo,
SDValue &Hi) {
std::pair<SDValue, SDValue> &Entry = SplitVectors[Op];
RemapValue(Entry.first);
RemapValue(Entry.second);
assert(Entry.first.getNode() && "Operand isn't split");
Lo = Entry.first;
Hi = Entry.second;
}
void DAGTypeLegalizer::SetSplitVector(SDValue Op, SDValue Lo,
SDValue Hi) {
assert(Lo.getValueType().getVectorElementType() ==
Op.getValueType().getVectorElementType() &&
2*Lo.getValueType().getVectorNumElements() ==
Op.getValueType().getVectorNumElements() &&
Hi.getValueType() == Lo.getValueType() &&
"Invalid type for split vector");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
AnalyzeNewValue(Lo);
AnalyzeNewValue(Hi);
// Remember that this is the result of the node.
std::pair<SDValue, SDValue> &Entry = SplitVectors[Op];
assert(!Entry.first.getNode() && "Node already split");
Entry.first = Lo;
Entry.second = Hi;
}
void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
assert(Result.getValueType() ==
TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
"Invalid type for widened vector");
AnalyzeNewValue(Result);
SDValue &OpEntry = WidenedVectors[Op];
assert(!OpEntry.getNode() && "Node already widened!");
OpEntry = Result;
}
//===----------------------------------------------------------------------===//
// Utilities.
//===----------------------------------------------------------------------===//
/// BitConvertToInteger - Convert to an integer of the same size.
SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
unsigned BitWidth = Op.getValueType().getSizeInBits();
return DAG.getNode(ISD::BITCAST, SDLoc(Op),
EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);
}
/// BitConvertVectorToIntegerVector - Convert to a vector of integers of the
/// same size.
SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
assert(Op.getValueType().isVector() && "Only applies to vectors!");
unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits();
EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = Op.getValueType().getVectorNumElements();
return DAG.getNode(ISD::BITCAST, SDLoc(Op),
EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op);
}
SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op,
EVT DestVT) {
SDLoc dl(Op);
// Create the stack frame object. Make sure it is aligned for both
// the source and destination types.
SDValue StackPtr = DAG.CreateStackTemporary(Op.getValueType(), DestVT);
// Emit a store to the stack slot.
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op, StackPtr,
MachinePointerInfo(), false, false, 0);
// Result is a load from the stack slot.
return DAG.getLoad(DestVT, dl, Store, StackPtr, MachinePointerInfo(),
false, false, false, 0);
}
/// CustomLowerNode - Replace the node's results with custom code provided
/// by the target and return "true", or do nothing and return "false".
/// The last parameter is FALSE if we are dealing with a node with legal
/// result types and illegal operand. The second parameter denotes the type of
/// illegal OperandNo in that case.
/// The last parameter being TRUE means we are dealing with a
/// node with illegal result types. The second parameter denotes the type of
/// illegal ResNo in that case.
bool DAGTypeLegalizer::CustomLowerNode(SDNode *N, EVT VT, bool LegalizeResult) {
// See if the target wants to custom lower this node.
if (TLI.getOperationAction(N->getOpcode(), VT) != TargetLowering::Custom)
return false;
SmallVector<SDValue, 8> Results;
if (LegalizeResult)
TLI.ReplaceNodeResults(N, Results, DAG);
else
TLI.LowerOperationWrapper(N, Results, DAG);
if (Results.empty())
// The target didn't want to custom lower it after all.
return false;
// When called from DAGTypeLegalizer::ExpandIntegerResult, we might need to
// provide the same kind of custom splitting behavior.
if (Results.size() == N->getNumValues() + 1 && LegalizeResult) {
// We've legalized a return type by splitting it. If there is a chain,
// replace that too.
SetExpandedInteger(SDValue(N, 0), Results[0], Results[1]);
if (N->getNumValues() > 1)
ReplaceValueWith(SDValue(N, 1), Results[2]);
return true;
}
// Make everything that once used N's values now use those in Results instead.
assert(Results.size() == N->getNumValues() &&
"Custom lowering returned the wrong number of results!");
for (unsigned i = 0, e = Results.size(); i != e; ++i) {
ReplaceValueWith(SDValue(N, i), Results[i]);
}
return true;
}
/// CustomWidenLowerNode - Widen the node's results with custom code provided
/// by the target and return "true", or do nothing and return "false".
bool DAGTypeLegalizer::CustomWidenLowerNode(SDNode *N, EVT VT) {
// See if the target wants to custom lower this node.
if (TLI.getOperationAction(N->getOpcode(), VT) != TargetLowering::Custom)
return false;
SmallVector<SDValue, 8> Results;
TLI.ReplaceNodeResults(N, Results, DAG);
if (Results.empty())
// The target didn't want to custom widen lower its result after all.
return false;
// Update the widening map.
assert(Results.size() == N->getNumValues() &&
"Custom lowering returned the wrong number of results!");
for (unsigned i = 0, e = Results.size(); i != e; ++i)
SetWidenedVector(SDValue(N, i), Results[i]);
return true;
}
SDValue DAGTypeLegalizer::DisintegrateMERGE_VALUES(SDNode *N, unsigned ResNo) {
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
if (i != ResNo)
ReplaceValueWith(SDValue(N, i), SDValue(N->getOperand(i)));
return SDValue(N->getOperand(ResNo));
}
/// GetPairElements - Use ISD::EXTRACT_ELEMENT nodes to extract the low and
/// high parts of the given value.
void DAGTypeLegalizer::GetPairElements(SDValue Pair,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(Pair);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), Pair.getValueType());
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NVT, Pair,
DAG.getIntPtrConstant(0, dl));
Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NVT, Pair,
DAG.getIntPtrConstant(1, dl));
}
SDValue DAGTypeLegalizer::GetVectorElementPointer(SDValue VecPtr, EVT EltVT,
SDValue Index) {
SDLoc dl(Index);
// Make sure the index type is big enough to compute in.
Index = DAG.getZExtOrTrunc(Index, dl, TLI.getPointerTy(DAG.getDataLayout()));
// Calculate the element offset and add it to the pointer.
unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
Index = DAG.getNode(ISD::MUL, dl, Index.getValueType(), Index,
DAG.getConstant(EltSize, dl, Index.getValueType()));
return DAG.getNode(ISD::ADD, dl, Index.getValueType(), Index, VecPtr);
}
/// JoinIntegers - Build an integer with low bits Lo and high bits Hi.
SDValue DAGTypeLegalizer::JoinIntegers(SDValue Lo, SDValue Hi) {
// Arbitrarily use dlHi for result SDLoc
SDLoc dlHi(Hi);
SDLoc dlLo(Lo);
EVT LVT = Lo.getValueType();
EVT HVT = Hi.getValueType();
EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
LVT.getSizeInBits() + HVT.getSizeInBits());
Lo = DAG.getNode(ISD::ZERO_EXTEND, dlLo, NVT, Lo);
Hi = DAG.getNode(ISD::ANY_EXTEND, dlHi, NVT, Hi);
Hi = DAG.getNode(ISD::SHL, dlHi, NVT, Hi,
DAG.getConstant(LVT.getSizeInBits(), dlHi,
TLI.getPointerTy(DAG.getDataLayout())));
return DAG.getNode(ISD::OR, dlHi, NVT, Lo, Hi);
}
/// LibCallify - Convert the node into a libcall with the same prototype.
SDValue DAGTypeLegalizer::LibCallify(RTLIB::Libcall LC, SDNode *N,
bool isSigned) {
unsigned NumOps = N->getNumOperands();
SDLoc dl(N);
if (NumOps == 0) {
return TLI.makeLibCall(DAG, LC, N->getValueType(0), nullptr, 0, isSigned,
dl).first;
} else if (NumOps == 1) {
SDValue Op = N->getOperand(0);
return TLI.makeLibCall(DAG, LC, N->getValueType(0), &Op, 1, isSigned,
dl).first;
} else if (NumOps == 2) {
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
return TLI.makeLibCall(DAG, LC, N->getValueType(0), Ops, 2, isSigned,
dl).first;
}
SmallVector<SDValue, 8> Ops(NumOps);
for (unsigned i = 0; i < NumOps; ++i)
Ops[i] = N->getOperand(i);
return TLI.makeLibCall(DAG, LC, N->getValueType(0),
&Ops[0], NumOps, isSigned, dl).first;
}
// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
// ExpandLibCall except that the first operand is the in-chain.
std::pair<SDValue, SDValue>
DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node,
bool isSigned) {
SDValue InChain = Node->getOperand(0);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
.setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
.setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo;
}
/// PromoteTargetBoolean - Promote the given target boolean to a target boolean
/// of the given type. A target boolean is an integer value, not necessarily of
/// type i1, the bits of which conform to getBooleanContents.
///
/// ValVT is the type of values that produced the boolean.
SDValue DAGTypeLegalizer::PromoteTargetBoolean(SDValue Bool, EVT ValVT) {
SDLoc dl(Bool);
EVT BoolVT = getSetCCResultType(ValVT);
ISD::NodeType ExtendCode =
TargetLowering::getExtendForContent(TLI.getBooleanContents(ValVT));
return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
}
/// SplitInteger - Return the lower LoVT bits of Op in Lo and the upper HiVT
/// bits in Hi.
void DAGTypeLegalizer::SplitInteger(SDValue Op,
EVT LoVT, EVT HiVT,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(Op);
assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() ==
Op.getValueType().getSizeInBits() && "Invalid integer splitting!");
Lo = DAG.getNode(ISD::TRUNCATE, dl, LoVT, Op);
Hi = DAG.getNode(ISD::SRL, dl, Op.getValueType(), Op,
DAG.getConstant(LoVT.getSizeInBits(), dl,
TLI.getPointerTy(DAG.getDataLayout())));
Hi = DAG.getNode(ISD::TRUNCATE, dl, HiVT, Hi);
}
/// SplitInteger - Return the lower and upper halves of Op's bits in a value
/// type half the size of Op's.
void DAGTypeLegalizer::SplitInteger(SDValue Op,
SDValue &Lo, SDValue &Hi) {
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(),
Op.getValueType().getSizeInBits()/2);
SplitInteger(Op, HalfVT, HalfVT, Lo, Hi);
}
//===----------------------------------------------------------------------===//
// Entry Point
//===----------------------------------------------------------------------===//
/// LegalizeTypes - This transforms the SelectionDAG into a SelectionDAG that
/// only uses types natively supported by the target. Returns "true" if it made
/// any changes.
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
bool SelectionDAG::LegalizeTypes() {
return DAGTypeLegalizer(*this).run();
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | //===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run
// both before and after the DAG is legalized.
//
// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
// primarily intended to handle simplification opportunities that are implicit
// in the LLVM IR and exposed by the various codegen lowering phases.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "dagcombine"
STATISTIC(NodesCombined , "Number of dag nodes combined");
STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
STATISTIC(OpsNarrowed , "Number of load/op/store narrowed");
STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int");
STATISTIC(SlicedLoads, "Number of load sliced");
namespace {
static cl::opt<bool>
CombinerAA("combiner-alias-analysis", cl::Hidden,
cl::desc("Enable DAG combiner alias-analysis heuristics"));
static cl::opt<bool>
CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
cl::desc("Enable DAG combiner's use of IR alias analysis"));
static cl::opt<bool>
UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
cl::desc("Enable DAG combiner's use of TBAA"));
#ifndef NDEBUG
static cl::opt<std::string>
CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
cl::desc("Only use DAG-combiner alias analysis in this"
" function"));
#endif
/// Hidden option to stress test load slicing, i.e., when this option
/// is enabled, load slicing bypasses most of its profitability guards.
static cl::opt<bool>
StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden,
cl::desc("Bypass the profitability model of load "
"slicing"),
cl::init(false));
static cl::opt<bool>
MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true),
cl::desc("DAG combiner may split indexing from loads"));
//------------------------------ DAGCombiner ---------------------------------//
class DAGCombiner {
SelectionDAG &DAG;
const TargetLowering &TLI;
CombineLevel Level;
CodeGenOpt::Level OptLevel;
bool LegalOperations;
bool LegalTypes;
bool ForCodeSize;
/// \brief Worklist of all of the nodes that need to be simplified.
///
/// This must behave as a stack -- new nodes to process are pushed onto the
/// back and when processing we pop off of the back.
///
/// The worklist will not contain duplicates but may contain null entries
/// due to nodes being deleted from the underlying DAG.
SmallVector<SDNode *, 64> Worklist;
/// \brief Mapping from an SDNode to its position on the worklist.
///
/// This is used to find and remove nodes from the worklist (by nulling
/// them) when they are deleted from the underlying DAG. It relies on
/// stable indices of nodes within the worklist.
DenseMap<SDNode *, unsigned> WorklistMap;
/// \brief Set of nodes which have been combined (at least once).
///
/// This is used to allow us to reliably add any operands of a DAG node
/// which have not yet been combined to the worklist.
SmallPtrSet<SDNode *, 64> CombinedNodes;
// AA - Used for DAG load/store alias analysis.
AliasAnalysis &AA;
/// When an instruction is simplified, add all users of the instruction to
/// the work lists because they might get more simplified now.
void AddUsersToWorklist(SDNode *N) {
for (SDNode *Node : N->uses())
AddToWorklist(Node);
}
/// Call the node-specific routine that folds each particular type of node.
SDValue visit(SDNode *N);
public:
/// Add to the worklist making sure its instance is at the back (next to be
/// processed.)
void AddToWorklist(SDNode *N) {
// Skip handle nodes as they can't usefully be combined and confuse the
// zero-use deletion strategy.
if (N->getOpcode() == ISD::HANDLENODE)
return;
if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second)
Worklist.push_back(N);
}
/// Remove all instances of N from the worklist.
void removeFromWorklist(SDNode *N) {
CombinedNodes.erase(N);
auto It = WorklistMap.find(N);
if (It == WorklistMap.end())
return; // Not in the worklist.
// Null out the entry rather than erasing it to avoid a linear operation.
Worklist[It->second] = nullptr;
WorklistMap.erase(It);
}
void deleteAndRecombine(SDNode *N);
bool recursivelyDeleteUnusedNodes(SDNode *N);
SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
bool AddTo = true);
SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
return CombineTo(N, &Res, 1, AddTo);
}
SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
bool AddTo = true) {
SDValue To[] = { Res0, Res1 };
return CombineTo(N, To, 2, AddTo);
}
void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
private:
/// Check the specified integer node value to see if it can be simplified or
/// if things it uses can be simplified by bit propagation.
/// If so, return true.
bool SimplifyDemandedBits(SDValue Op) {
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
APInt Demanded = APInt::getAllOnesValue(BitWidth);
return SimplifyDemandedBits(Op, Demanded);
}
bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded);
bool CombineToPreIndexedLoadStore(SDNode *N);
bool CombineToPostIndexedLoadStore(SDNode *N);
SDValue SplitIndexingFromLoad(LoadSDNode *LD);
bool SliceUpLoad(SDNode *N);
/// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed
/// load.
///
/// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced.
/// \param InVecVT type of the input vector to EVE with bitcasts resolved.
/// \param EltNo index of the vector element to load.
/// \param OriginalLoad load that EVE came from to be replaced.
/// \returns EVE on success SDValue() on failure.
SDValue ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad);
void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
SDValue PromoteIntBinOp(SDValue Op);
SDValue PromoteIntShiftOp(SDValue Op);
SDValue PromoteExtend(SDValue Op);
bool PromoteLoad(SDValue Op);
void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue Trunc, SDValue ExtLoad, SDLoc DL,
ISD::NodeType ExtType);
/// Call the node-specific routine that knows how to fold each
/// particular type of node. If that doesn't do anything, try the
/// target-specific DAG combines.
SDValue combine(SDNode *N);
// Visitation implementation - Implement dag node combining for different
// node types. The semantics are as follows:
// Return Value:
// SDValue.getNode() == 0 - No change was made
// SDValue.getNode() == N - N was replaced, is dead and has been handled.
// otherwise - N should be replaced by the returned Operand.
//
SDValue visitTokenFactor(SDNode *N);
SDValue visitMERGE_VALUES(SDNode *N);
SDValue visitADD(SDNode *N);
SDValue visitSUB(SDNode *N);
SDValue visitADDC(SDNode *N);
SDValue visitSUBC(SDNode *N);
SDValue visitADDE(SDNode *N);
SDValue visitSUBE(SDNode *N);
SDValue visitMUL(SDNode *N);
SDValue visitSDIV(SDNode *N);
SDValue visitUDIV(SDNode *N);
SDValue visitSREM(SDNode *N);
SDValue visitUREM(SDNode *N);
SDValue visitMULHU(SDNode *N);
SDValue visitMULHS(SDNode *N);
SDValue visitSMUL_LOHI(SDNode *N);
SDValue visitUMUL_LOHI(SDNode *N);
SDValue visitSMULO(SDNode *N);
SDValue visitUMULO(SDNode *N);
SDValue visitSDIVREM(SDNode *N);
SDValue visitUDIVREM(SDNode *N);
SDValue visitAND(SDNode *N);
SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *LocReference);
SDValue visitOR(SDNode *N);
SDValue visitORLike(SDValue N0, SDValue N1, SDNode *LocReference);
SDValue visitXOR(SDNode *N);
SDValue SimplifyVBinOp(SDNode *N);
SDValue visitSHL(SDNode *N);
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
SDValue visitRotate(SDNode *N);
SDValue visitBSWAP(SDNode *N);
SDValue visitCTLZ(SDNode *N);
SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTTZ(SDNode *N);
SDValue visitCTTZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTPOP(SDNode *N);
SDValue visitSELECT(SDNode *N);
SDValue visitVSELECT(SDNode *N);
SDValue visitSELECT_CC(SDNode *N);
SDValue visitSETCC(SDNode *N);
SDValue visitSIGN_EXTEND(SDNode *N);
SDValue visitZERO_EXTEND(SDNode *N);
SDValue visitANY_EXTEND(SDNode *N);
SDValue visitSIGN_EXTEND_INREG(SDNode *N);
SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N);
SDValue visitTRUNCATE(SDNode *N);
SDValue visitBITCAST(SDNode *N);
SDValue visitBUILD_PAIR(SDNode *N);
SDValue visitFADD(SDNode *N);
SDValue visitFSUB(SDNode *N);
SDValue visitFMUL(SDNode *N);
SDValue visitFMA(SDNode *N);
SDValue visitFDIV(SDNode *N);
SDValue visitFREM(SDNode *N);
SDValue visitFSQRT(SDNode *N);
SDValue visitFCOPYSIGN(SDNode *N);
SDValue visitSINT_TO_FP(SDNode *N);
SDValue visitUINT_TO_FP(SDNode *N);
SDValue visitFP_TO_SINT(SDNode *N);
SDValue visitFP_TO_UINT(SDNode *N);
SDValue visitFP_ROUND(SDNode *N);
SDValue visitFP_ROUND_INREG(SDNode *N);
SDValue visitFP_EXTEND(SDNode *N);
SDValue visitFNEG(SDNode *N);
SDValue visitFABS(SDNode *N);
SDValue visitFCEIL(SDNode *N);
SDValue visitFTRUNC(SDNode *N);
SDValue visitFFLOOR(SDNode *N);
SDValue visitFMINNUM(SDNode *N);
SDValue visitFMAXNUM(SDNode *N);
SDValue visitBRCOND(SDNode *N);
SDValue visitBR_CC(SDNode *N);
SDValue visitLOAD(SDNode *N);
SDValue visitSTORE(SDNode *N);
SDValue visitINSERT_VECTOR_ELT(SDNode *N);
SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
SDValue visitBUILD_VECTOR(SDNode *N);
SDValue visitCONCAT_VECTORS(SDNode *N);
SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
SDValue visitVECTOR_SHUFFLE(SDNode *N);
SDValue visitSCALAR_TO_VECTOR(SDNode *N);
SDValue visitINSERT_SUBVECTOR(SDNode *N);
SDValue visitMLOAD(SDNode *N);
SDValue visitMSTORE(SDNode *N);
SDValue visitMGATHER(SDNode *N);
SDValue visitMSCATTER(SDNode *N);
SDValue visitFP_TO_FP16(SDNode *N);
SDValue visitFADDForFMACombine(SDNode *N);
SDValue visitFSUBForFMACombine(SDNode *N);
SDValue XformToShuffleWithZero(SDNode *N);
SDValue ReassociateOps(unsigned Opc, SDLoc DL, SDValue LHS, SDValue RHS);
SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt);
bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
SDValue SimplifySelect(SDLoc DL, SDValue N0, SDValue N1, SDValue N2);
SDValue SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1, SDValue N2,
SDValue N3, ISD::CondCode CC,
bool NotExtCompare = false);
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
SDLoc DL, bool foldBooleans = true);
bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
SDValue &CC) const;
bool isOneUseSetCC(SDValue N) const;
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
SDValue CombineExtLoad(SDNode *N);
SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
SDValue BuildSDIV(SDNode *N);
SDValue BuildSDIVPow2(SDNode *N);
SDValue BuildUDIV(SDNode *N);
SDValue BuildReciprocalEstimate(SDValue Op);
SDValue BuildRsqrtEstimate(SDValue Op);
SDValue BuildRsqrtNROneConst(SDValue Op, SDValue Est, unsigned Iterations);
SDValue BuildRsqrtNRTwoConst(SDValue Op, SDValue Est, unsigned Iterations);
SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
bool DemandHighBits = true);
SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
SDValue InnerPos, SDValue InnerNeg,
unsigned PosOpcode, unsigned NegOpcode,
SDLoc DL);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL);
SDValue ReduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
SDValue TransformFPLoadStorePair(SDNode *N);
SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N);
SDValue GetDemandedBits(SDValue V, const APInt &Mask);
/// Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void GatherAllAliases(SDNode *N, SDValue OriginalChain,
SmallVectorImpl<SDValue> &Aliases);
/// Return true if there is any possibility that the two addresses overlap.
bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const;
/// Walk up chain skipping non-aliasing memory nodes, looking for a better
/// chain (aliasing node.)
SDValue FindBetterChain(SDNode *N, SDValue Chain);
/// Holds a pointer to an LSBaseSDNode as well as information on where it
/// is located in a sequence of memory operations connected by a chain.
struct MemOpLink {
MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq):
MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { }
// Ptr to the mem node.
LSBaseSDNode *MemNode;
// Offset from the base ptr.
int64_t OffsetFromBase;
// What is the sequence number of this mem node.
// Lowest mem operand in the DAG starts at zero.
unsigned SequenceNum;
};
/// This is a helper function for MergeStoresOfConstantsOrVecElts. Returns a
/// constant build_vector of the stored constant values in Stores.
SDValue getMergedConstantVectorStore(SelectionDAG &DAG,
SDLoc SL,
ArrayRef<MemOpLink> Stores,
EVT Ty) const;
/// This is a helper function for MergeConsecutiveStores. When the source
/// elements of the consecutive stores are all constants or all extracted
/// vector elements, try to merge them into one larger store.
/// \return True if a merged store was created.
bool MergeStoresOfConstantsOrVecElts(SmallVectorImpl<MemOpLink> &StoreNodes,
EVT MemVT, unsigned NumElem,
bool IsConstantSrc, bool UseVector);
/// This is a helper function for MergeConsecutiveStores.
/// Stores that may be merged are placed in StoreNodes.
/// Loads that may alias with those stores are placed in AliasLoadNodes.
void getStoreMergeAndAliasCandidates(
StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes);
/// Merge consecutive store operations into a wide store.
/// This optimization uses wide integers or vectors when possible.
/// \return True if some memory operations were changed.
bool MergeConsecutiveStores(StoreSDNode *N);
/// \brief Try to transform a truncation where C is a constant:
/// (trunc (and X, C)) -> (and (trunc X), (trunc C))
///
/// \p N needs to be a truncation and its first operand an AND. Other
/// requirements are checked by the function (e.g. that trunc is
/// single-use) and if missed an empty SDValue is returned.
SDValue distributeTruncateThroughAnd(SDNode *N);
public:
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {
auto *F = DAG.getMachineFunction().getFunction();
ForCodeSize = F->hasFnAttribute(Attribute::OptimizeForSize) ||
F->hasFnAttribute(Attribute::MinSize);
}
/// Runs the dag combiner on all nodes in the work list
void Run(CombineLevel AtLevel);
SelectionDAG &getDAG() const { return DAG; }
/// Returns a type large enough to hold any valid shift amount - before type
/// legalization these can be huge.
EVT getShiftAmountTy(EVT LHSTy) {
assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
if (LHSTy.isVector())
return LHSTy;
auto &DL = DAG.getDataLayout();
return LegalTypes ? TLI.getScalarShiftAmountTy(DL, LHSTy)
: TLI.getPointerTy(DL);
}
/// This method returns true if we are running before type legalization or
/// if the specified VT is legal.
bool isTypeLegal(const EVT &VT) {
if (!LegalTypes) return true;
return TLI.isTypeLegal(VT);
}
/// Convenience wrapper around TargetLowering::getSetCCResultType
EVT getSetCCResultType(EVT VT) const {
return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
};
}
namespace {
/// This class is a DAGUpdateListener that removes any deleted
/// nodes from the worklist.
class WorklistRemover : public SelectionDAG::DAGUpdateListener {
DAGCombiner &DC;
public:
explicit WorklistRemover(DAGCombiner &dc)
: SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
void NodeDeleted(SDNode *N, SDNode *E) override {
DC.removeFromWorklist(N);
}
};
}
//===----------------------------------------------------------------------===//
// TargetLowering::DAGCombinerInfo implementation
//===----------------------------------------------------------------------===//
void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
((DAGCombiner*)DC)->AddToWorklist(N);
}
void TargetLowering::DAGCombinerInfo::RemoveFromWorklist(SDNode *N) {
((DAGCombiner*)DC)->removeFromWorklist(N);
}
SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) {
return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
}
SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res, bool AddTo) {
return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
}
SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
}
void TargetLowering::DAGCombinerInfo::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
}
//===----------------------------------------------------------------------===//
// Helper Functions
//===----------------------------------------------------------------------===//
void DAGCombiner::deleteAndRecombine(SDNode *N) {
removeFromWorklist(N);
// If the operands of this node are only used by the node, they will now be
// dead. Make sure to re-visit them and recursively delete dead nodes.
for (const SDValue &Op : N->ops())
// For an operand generating multiple values, one of the values may
// become dead allowing further simplification (e.g. split index
// arithmetic from an indexed load).
if (Op->hasOneUse() || Op->getNumValues() > 1)
AddToWorklist(Op.getNode());
DAG.DeleteNode(N);
}
/// Return 1 if we can compute the negated form of the specified expression for
/// the same cost as the expression itself, or 2 if we can compute the negated
/// form more cheaply than the expression itself.
static char isNegatibleForFree(SDValue Op, bool LegalOperations,
const TargetLowering &TLI,
const TargetOptions *Options,
unsigned Depth = 0) {
// fneg is removable even if it has multiple uses.
if (Op.getOpcode() == ISD::FNEG) return 2;
// Don't allow anything with multiple uses.
if (!Op.hasOneUse()) return 0;
// Don't recurse exponentially.
if (Depth > 6) return 0;
switch (Op.getOpcode()) {
default: return false;
case ISD::ConstantFP:
// Don't invert constant FP values after legalize. The negated constant
// isn't necessarily legal.
return LegalOperations ? 0 : 1;
case ISD::FADD:
// FIXME: determine better conditions for this xform.
if (!Options->UnsafeFPMath) return 0;
// After operation legalization, it might not be legal to create new FSUBs.
if (LegalOperations &&
!TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType()))
return 0;
// fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
Options, Depth + 1))
return V;
// fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
Depth + 1);
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
if (!Options->UnsafeFPMath) return 0;
// fold (fneg (fsub A, B)) -> (fsub B, A)
return 1;
case ISD::FMUL:
case ISD::FDIV:
if (Options->HonorSignDependentRoundingFPMath()) return 0;
// fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
Options, Depth + 1))
return V;
return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
Depth + 1);
case ISD::FP_EXTEND:
case ISD::FP_ROUND:
case ISD::FSIN:
return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options,
Depth + 1);
}
}
/// If isNegatibleForFree returns true, return the newly negated expression.
static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
bool LegalOperations, unsigned Depth = 0) {
const TargetOptions &Options = DAG.getTarget().Options;
// fneg is removable even if it has multiple uses.
if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
// Don't allow anything with multiple uses.
assert(Op.hasOneUse() && "Unknown reuse!");
assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown code");
case ISD::ConstantFP: {
APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
V.changeSign();
return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType());
}
case ISD::FADD:
// FIXME: determine better conditions for this xform.
assert(Options.UnsafeFPMath);
// fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
DAG.getTargetLoweringInfo(), &Options, Depth+1))
return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, Depth+1),
Op.getOperand(1));
// fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(1), DAG,
LegalOperations, Depth+1),
Op.getOperand(0));
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
assert(Options.UnsafeFPMath);
// fold (fneg (fsub 0, B)) -> B
if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
if (N0CFP->isZero())
return Op.getOperand(1);
// fold (fneg (fsub A, B)) -> (fsub B, A)
return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(0));
case ISD::FMUL:
case ISD::FDIV:
assert(!Options.HonorSignDependentRoundingFPMath());
// fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
DAG.getTargetLoweringInfo(), &Options, Depth+1))
return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, Depth+1),
Op.getOperand(1));
// fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
Op.getOperand(0),
GetNegatedExpression(Op.getOperand(1), DAG,
LegalOperations, Depth+1));
case ISD::FP_EXTEND:
case ISD::FSIN:
return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, Depth+1));
case ISD::FP_ROUND:
return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, Depth+1),
Op.getOperand(1));
}
}
// Return true if this node is a setcc, or is a select_cc
// that selects between the target values used for true and false, making it
// equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
// the appropriate nodes based on the type of node we are checking. This
// simplifies life a bit for the callers.
bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
SDValue &CC) const {
if (N.getOpcode() == ISD::SETCC) {
LHS = N.getOperand(0);
RHS = N.getOperand(1);
CC = N.getOperand(2);
return true;
}
if (N.getOpcode() != ISD::SELECT_CC ||
!TLI.isConstTrueVal(N.getOperand(2).getNode()) ||
!TLI.isConstFalseVal(N.getOperand(3).getNode()))
return false;
if (TLI.getBooleanContents(N.getValueType()) ==
TargetLowering::UndefinedBooleanContent)
return false;
LHS = N.getOperand(0);
RHS = N.getOperand(1);
CC = N.getOperand(4);
return true;
}
/// Return true if this is a SetCC-equivalent operation with only one use.
/// If this is true, it allows the users to invert the operation for free when
/// it is profitable to do so.
bool DAGCombiner::isOneUseSetCC(SDValue N) const {
SDValue N0, N1, N2;
if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
return true;
return false;
}
/// Returns true if N is a BUILD_VECTOR node whose
/// elements are all the same constant or undefined.
static bool isConstantSplatVector(SDNode *N, APInt& SplatValue) {
BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(N);
if (!C)
return false;
APInt SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
EVT EltVT = N->getValueType(0).getVectorElementType();
return (C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
HasAnyUndefs) &&
EltVT.getSizeInBits() >= SplatBitSize);
}
// \brief Returns the SDNode if it is a constant integer BuildVector
// or constant integer.
static SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) {
if (isa<ConstantSDNode>(N))
return N.getNode();
if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
return N.getNode();
return nullptr;
}
// \brief Returns the SDNode if it is a constant float BuildVector
// or constant float.
static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) {
if (isa<ConstantFPSDNode>(N))
return N.getNode();
if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
return N.getNode();
return nullptr;
}
// \brief Returns the SDNode if it is a constant splat BuildVector or constant
// int.
static ConstantSDNode *isConstOrConstSplat(SDValue N) {
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
return CN;
if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
BitVector UndefElements;
ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
// BuildVectors can truncate their operands. Ignore that case here.
// FIXME: We blindly ignore splats which include undef which is overly
// pessimistic.
if (CN && UndefElements.none() &&
CN->getValueType(0) == N.getValueType().getScalarType())
return CN;
}
return nullptr;
}
// \brief Returns the SDNode if it is a constant splat BuildVector or constant
// float.
static ConstantFPSDNode *isConstOrConstSplatFP(SDValue N) {
if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
return CN;
if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
BitVector UndefElements;
ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
if (CN && UndefElements.none())
return CN;
}
return nullptr;
}
SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL,
SDValue N0, SDValue N1) {
EVT VT = N0.getValueType();
if (N0.getOpcode() == Opc) {
if (SDNode *L = isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
if (SDNode *R = isConstantIntBuildVectorOrConstantInt(N1)) {
// reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, L, R))
return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
return SDValue();
}
if (N0.hasOneUse()) {
// reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one
// use
SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1);
if (!OpNode.getNode())
return SDValue();
AddToWorklist(OpNode.getNode());
return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
}
}
}
if (N1.getOpcode() == Opc) {
if (SDNode *R = isConstantIntBuildVectorOrConstantInt(N1.getOperand(1))) {
if (SDNode *L = isConstantIntBuildVectorOrConstantInt(N0)) {
// reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, R, L))
return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
return SDValue();
}
if (N1.hasOneUse()) {
// reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one
// use
SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N1.getOperand(0), N0);
if (!OpNode.getNode())
return SDValue();
AddToWorklist(OpNode.getNode());
return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
}
}
}
return SDValue();
}
SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
bool AddTo) {
assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
++NodesCombined;
DEBUG(dbgs() << "\nReplacing.1 ";
N->dump(&DAG);
dbgs() << "\nWith: ";
To[0].getNode()->dump(&DAG);
dbgs() << " and " << NumTo-1 << " other values\n");
for (unsigned i = 0, e = NumTo; i != e; ++i)
assert((!To[i].getNode() ||
N->getValueType(i) == To[i].getValueType()) &&
"Cannot combine value to value of different type!");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesWith(N, To);
if (AddTo) {
// Push the new nodes and any users onto the worklist
for (unsigned i = 0, e = NumTo; i != e; ++i) {
if (To[i].getNode()) {
AddToWorklist(To[i].getNode());
AddUsersToWorklist(To[i].getNode());
}
}
}
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
// something else needing this node.
if (N->use_empty())
deleteAndRecombine(N);
return SDValue(N, 0);
}
void DAGCombiner::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
// Replace all uses. If any nodes become isomorphic to other nodes and
// are deleted, make sure to remove them from our worklist.
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);
// Push the new node and any (possibly new) users onto the worklist.
AddToWorklist(TLO.New.getNode());
AddUsersToWorklist(TLO.New.getNode());
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
// something else needing this node.
if (TLO.Old.getNode()->use_empty())
deleteAndRecombine(TLO.Old.getNode());
}
/// Check the specified integer node value to see if it can be simplified or if
/// things it uses can be simplified by bit propagation. If so, return true.
bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
APInt KnownZero, KnownOne;
if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
return false;
// Revisit the node.
AddToWorklist(Op.getNode());
// Replace the old value with the new one.
++NodesCombined;
DEBUG(dbgs() << "\nReplacing.2 ";
TLO.Old.getNode()->dump(&DAG);
dbgs() << "\nWith: ";
TLO.New.getNode()->dump(&DAG);
dbgs() << '\n');
CommitTargetLoweringOpt(TLO);
return true;
}
void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
SDLoc dl(Load);
EVT VT = Load->getValueType(0);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, SDValue(ExtLoad, 0));
DEBUG(dbgs() << "\nReplacing.9 ";
Load->dump(&DAG);
dbgs() << "\nWith: ";
Trunc.getNode()->dump(&DAG);
dbgs() << '\n');
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
deleteAndRecombine(Load);
AddToWorklist(Trunc.getNode());
}
SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
Replace = false;
SDLoc dl(Op);
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD
: ISD::EXTLOAD)
: LD->getExtensionType();
Replace = true;
return DAG.getExtLoad(ExtType, dl, PVT,
LD->getChain(), LD->getBasePtr(),
MemVT, LD->getMemOperand());
}
unsigned Opc = Op.getOpcode();
switch (Opc) {
default: break;
case ISD::AssertSext:
return DAG.getNode(ISD::AssertSext, dl, PVT,
SExtPromoteOperand(Op.getOperand(0), PVT),
Op.getOperand(1));
case ISD::AssertZext:
return DAG.getNode(ISD::AssertZext, dl, PVT,
ZExtPromoteOperand(Op.getOperand(0), PVT),
Op.getOperand(1));
case ISD::Constant: {
unsigned ExtOpc =
Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
return DAG.getNode(ExtOpc, dl, PVT, Op);
}
}
if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
return SDValue();
return DAG.getNode(ISD::ANY_EXTEND, dl, PVT, Op);
}
SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
return SDValue();
EVT OldVT = Op.getValueType();
SDLoc dl(Op);
bool Replace = false;
SDValue NewOp = PromoteOperand(Op, PVT, Replace);
if (!NewOp.getNode())
return SDValue();
AddToWorklist(NewOp.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NewOp.getValueType(), NewOp,
DAG.getValueType(OldVT));
}
SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
EVT OldVT = Op.getValueType();
SDLoc dl(Op);
bool Replace = false;
SDValue NewOp = PromoteOperand(Op, PVT, Replace);
if (!NewOp.getNode())
return SDValue();
AddToWorklist(NewOp.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
return DAG.getZeroExtendInReg(NewOp, dl, OldVT);
}
/// Promote the specified integer binary operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
if (!LegalOperations)
return SDValue();
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return SDValue();
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return SDValue();
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
bool Replace0 = false;
SDValue N0 = Op.getOperand(0);
SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
if (!NN0.getNode())
return SDValue();
bool Replace1 = false;
SDValue N1 = Op.getOperand(1);
SDValue NN1;
if (N0 == N1)
NN1 = NN0;
else {
NN1 = PromoteOperand(N1, PVT, Replace1);
if (!NN1.getNode())
return SDValue();
}
AddToWorklist(NN0.getNode());
if (NN1.getNode())
AddToWorklist(NN1.getNode());
if (Replace0)
ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
if (Replace1)
ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
DEBUG(dbgs() << "\nPromoting ";
Op.getNode()->dump(&DAG));
SDLoc dl(Op);
return DAG.getNode(ISD::TRUNCATE, dl, VT,
DAG.getNode(Opc, dl, PVT, NN0, NN1));
}
return SDValue();
}
/// Promote the specified integer shift operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
if (!LegalOperations)
return SDValue();
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return SDValue();
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return SDValue();
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
bool Replace = false;
SDValue N0 = Op.getOperand(0);
if (Opc == ISD::SRA)
N0 = SExtPromoteOperand(Op.getOperand(0), PVT);
else if (Opc == ISD::SRL)
N0 = ZExtPromoteOperand(Op.getOperand(0), PVT);
else
N0 = PromoteOperand(N0, PVT, Replace);
if (!N0.getNode())
return SDValue();
AddToWorklist(N0.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
DEBUG(dbgs() << "\nPromoting ";
Op.getNode()->dump(&DAG));
SDLoc dl(Op);
return DAG.getNode(ISD::TRUNCATE, dl, VT,
DAG.getNode(Opc, dl, PVT, N0, Op.getOperand(1)));
}
return SDValue();
}
SDValue DAGCombiner::PromoteExtend(SDValue Op) {
if (!LegalOperations)
return SDValue();
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return SDValue();
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return SDValue();
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
// fold (aext (aext x)) -> (aext x)
// fold (aext (zext x)) -> (zext x)
// fold (aext (sext x)) -> (sext x)
DEBUG(dbgs() << "\nPromoting ";
Op.getNode()->dump(&DAG));
return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0));
}
return SDValue();
}
bool DAGCombiner::PromoteLoad(SDValue Op) {
if (!LegalOperations)
return false;
EVT VT = Op.getValueType();
if (VT.isVector() || !VT.isInteger())
return false;
// If operation type is 'undesirable', e.g. i16 on x86, consider
// promoting it.
unsigned Opc = Op.getOpcode();
if (TLI.isTypeDesirableForOp(Opc, VT))
return false;
EVT PVT = VT;
// Consult target whether it is a good idea to promote this operation and
// what's the right type to promote it to.
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
SDLoc dl(Op);
SDNode *N = Op.getNode();
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD
: ISD::EXTLOAD)
: LD->getExtensionType();
SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT,
LD->getChain(), LD->getBasePtr(),
MemVT, LD->getMemOperand());
SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD);
DEBUG(dbgs() << "\nPromoting ";
N->dump(&DAG);
dbgs() << "\nTo: ";
Result.getNode()->dump(&DAG);
dbgs() << '\n');
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
deleteAndRecombine(N);
AddToWorklist(Result.getNode());
return true;
}
return false;
}
/// \brief Recursively delete a node which has no uses and any operands for
/// which it is the only use.
///
/// Note that this both deletes the nodes and removes them from the worklist.
/// It also adds any nodes who have had a user deleted to the worklist as they
/// may now have only one use and subject to other combines.
bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) {
if (!N->use_empty())
return false;
SmallSetVector<SDNode *, 16> Nodes;
Nodes.insert(N);
do {
N = Nodes.pop_back_val();
if (!N)
continue;
if (N->use_empty()) {
for (const SDValue &ChildN : N->op_values())
Nodes.insert(ChildN.getNode());
removeFromWorklist(N);
DAG.DeleteNode(N);
} else {
AddToWorklist(N);
}
} while (!Nodes.empty());
return true;
}
//===----------------------------------------------------------------------===//
// Main DAG Combiner implementation
//===----------------------------------------------------------------------===//
void DAGCombiner::Run(CombineLevel AtLevel) {
// set the instance variables, so that the various visit routines may use it.
Level = AtLevel;
LegalOperations = Level >= AfterLegalizeVectorOps;
LegalTypes = Level >= AfterLegalizeTypes;
// Add all the dag nodes to the worklist.
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = DAG.allnodes_end(); I != E; ++I)
AddToWorklist(I);
// Create a dummy node (which is not added to allnodes), that adds a reference
// to the root node, preventing it from being deleted, and tracking any
// changes of the root.
HandleSDNode Dummy(DAG.getRoot());
// while the worklist isn't empty, find a node and
// try and combine it.
while (!WorklistMap.empty()) {
SDNode *N;
// The Worklist holds the SDNodes in order, but it may contain null entries.
do {
N = Worklist.pop_back_val();
} while (!N);
bool GoodWorklistEntry = WorklistMap.erase(N);
(void)GoodWorklistEntry;
assert(GoodWorklistEntry &&
"Found a worklist entry without a corresponding map entry!");
// If N has no uses, it is dead. Make sure to revisit all N's operands once
// N is deleted from the DAG, since they too may now be dead or may have a
// reduced number of uses, allowing other xforms.
if (recursivelyDeleteUnusedNodes(N))
continue;
WorklistRemover DeadNodes(*this);
// If this combine is running after legalizing the DAG, re-legalize any
// nodes pulled off the worklist.
if (Level == AfterLegalizeDAG) {
SmallSetVector<SDNode *, 16> UpdatedNodes;
bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes);
for (SDNode *LN : UpdatedNodes) {
AddToWorklist(LN);
AddUsersToWorklist(LN);
}
if (!NIsValid)
continue;
}
DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG));
// Add any operands of the new node which have not yet been combined to the
// worklist as well. Because the worklist uniques things already, this
// won't repeatedly process the same operand.
CombinedNodes.insert(N);
for (const SDValue &ChildN : N->op_values())
if (!CombinedNodes.count(ChildN.getNode()))
AddToWorklist(ChildN.getNode());
SDValue RV = combine(N);
if (!RV.getNode())
continue;
++NodesCombined;
// If we get back the same node we passed in, rather than a new node or
// zero, we know that the node must have defined multiple values and
// CombineTo was used. Since CombineTo takes care of the worklist
// mechanics for us, we have no work to do in this case.
if (RV.getNode() == N)
continue;
assert(N->getOpcode() != ISD::DELETED_NODE &&
RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned new node!");
DEBUG(dbgs() << " ... into: ";
RV.getNode()->dump(&DAG));
// Transfer debug value.
DAG.TransferDbgValues(SDValue(N, 0), RV);
if (N->getNumValues() == RV.getNode()->getNumValues())
DAG.ReplaceAllUsesWith(N, RV.getNode());
else {
assert(N->getValueType(0) == RV.getValueType() &&
N->getNumValues() == 1 && "Type mismatch");
SDValue OpV = RV;
DAG.ReplaceAllUsesWith(N, &OpV);
}
// Push the new node and any users onto the worklist
AddToWorklist(RV.getNode());
AddUsersToWorklist(RV.getNode());
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
// something else needing this node. This will also take care of adding any
// operands which have lost a user to the worklist.
recursivelyDeleteUnusedNodes(N);
}
// If the root changed (e.g. it was a dead load, update the root).
DAG.setRoot(Dummy.getValue());
DAG.RemoveDeadNodes();
}
SDValue DAGCombiner::visit(SDNode *N) {
switch (N->getOpcode()) {
default: break;
case ISD::TokenFactor: return visitTokenFactor(N);
case ISD::MERGE_VALUES: return visitMERGE_VALUES(N);
case ISD::ADD: return visitADD(N);
case ISD::SUB: return visitSUB(N);
case ISD::ADDC: return visitADDC(N);
case ISD::SUBC: return visitSUBC(N);
case ISD::ADDE: return visitADDE(N);
case ISD::SUBE: return visitSUBE(N);
case ISD::MUL: return visitMUL(N);
case ISD::SDIV: return visitSDIV(N);
case ISD::UDIV: return visitUDIV(N);
case ISD::SREM: return visitSREM(N);
case ISD::UREM: return visitUREM(N);
case ISD::MULHU: return visitMULHU(N);
case ISD::MULHS: return visitMULHS(N);
case ISD::SMUL_LOHI: return visitSMUL_LOHI(N);
case ISD::UMUL_LOHI: return visitUMUL_LOHI(N);
case ISD::SMULO: return visitSMULO(N);
case ISD::UMULO: return visitUMULO(N);
case ISD::SDIVREM: return visitSDIVREM(N);
case ISD::UDIVREM: return visitUDIVREM(N);
case ISD::AND: return visitAND(N);
case ISD::OR: return visitOR(N);
case ISD::XOR: return visitXOR(N);
case ISD::SHL: return visitSHL(N);
case ISD::SRA: return visitSRA(N);
case ISD::SRL: return visitSRL(N);
case ISD::ROTR:
case ISD::ROTL: return visitRotate(N);
case ISD::BSWAP: return visitBSWAP(N);
case ISD::CTLZ: return visitCTLZ(N);
case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
case ISD::CTTZ: return visitCTTZ(N);
case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N);
case ISD::CTPOP: return visitCTPOP(N);
case ISD::SELECT: return visitSELECT(N);
case ISD::VSELECT: return visitVSELECT(N);
case ISD::SELECT_CC: return visitSELECT_CC(N);
case ISD::SETCC: return visitSETCC(N);
case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N);
case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N);
case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N);
case ISD::TRUNCATE: return visitTRUNCATE(N);
case ISD::BITCAST: return visitBITCAST(N);
case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
case ISD::FADD: return visitFADD(N);
case ISD::FSUB: return visitFSUB(N);
case ISD::FMUL: return visitFMUL(N);
case ISD::FMA: return visitFMA(N);
case ISD::FDIV: return visitFDIV(N);
case ISD::FREM: return visitFREM(N);
case ISD::FSQRT: return visitFSQRT(N);
case ISD::FCOPYSIGN: return visitFCOPYSIGN(N);
case ISD::SINT_TO_FP: return visitSINT_TO_FP(N);
case ISD::UINT_TO_FP: return visitUINT_TO_FP(N);
case ISD::FP_TO_SINT: return visitFP_TO_SINT(N);
case ISD::FP_TO_UINT: return visitFP_TO_UINT(N);
case ISD::FP_ROUND: return visitFP_ROUND(N);
case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N);
case ISD::FP_EXTEND: return visitFP_EXTEND(N);
case ISD::FNEG: return visitFNEG(N);
case ISD::FABS: return visitFABS(N);
case ISD::FFLOOR: return visitFFLOOR(N);
case ISD::FMINNUM: return visitFMINNUM(N);
case ISD::FMAXNUM: return visitFMAXNUM(N);
case ISD::FCEIL: return visitFCEIL(N);
case ISD::FTRUNC: return visitFTRUNC(N);
case ISD::BRCOND: return visitBRCOND(N);
case ISD::BR_CC: return visitBR_CC(N);
case ISD::LOAD: return visitLOAD(N);
case ISD::STORE: return visitSTORE(N);
case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N);
case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N);
case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N);
case ISD::MGATHER: return visitMGATHER(N);
case ISD::MLOAD: return visitMLOAD(N);
case ISD::MSCATTER: return visitMSCATTER(N);
case ISD::MSTORE: return visitMSTORE(N);
case ISD::FP_TO_FP16: return visitFP_TO_FP16(N);
}
return SDValue();
}
SDValue DAGCombiner::combine(SDNode *N) {
SDValue RV = visit(N);
// If nothing happened, try a target-specific DAG combine.
if (!RV.getNode()) {
assert(N->getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned NULL!");
if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
// Expose the DAG combiner to the target combiner impls.
TargetLowering::DAGCombinerInfo
DagCombineInfo(DAG, Level, false, this);
RV = TLI.PerformDAGCombine(N, DagCombineInfo);
}
}
// If nothing happened still, try promoting the operation.
if (!RV.getNode()) {
switch (N->getOpcode()) {
default: break;
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
RV = PromoteIntBinOp(SDValue(N, 0));
break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
RV = PromoteIntShiftOp(SDValue(N, 0));
break;
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
RV = PromoteExtend(SDValue(N, 0));
break;
case ISD::LOAD:
if (PromoteLoad(SDValue(N, 0)))
RV = SDValue(N, 0);
break;
}
}
// If N is a commutative binary node, try commuting it to enable more
// sdisel CSE.
if (!RV.getNode() && SelectionDAG::isCommutativeBinOp(N->getOpcode()) &&
N->getNumValues() == 1) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Constant operands are canonicalized to RHS.
if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
SDValue Ops[] = {N1, N0};
SDNode *CSENode;
if (const auto *BinNode = dyn_cast<BinaryWithFlagsSDNode>(N)) {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
&BinNode->Flags);
} else {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops);
}
if (CSENode)
return SDValue(CSENode, 0);
}
}
return RV;
}
/// Given a node, return its input chain if it has one, otherwise return a null
/// sd operand.
static SDValue getInputChainForNode(SDNode *N) {
if (unsigned NumOps = N->getNumOperands()) {
if (N->getOperand(0).getValueType() == MVT::Other)
return N->getOperand(0);
if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
return N->getOperand(NumOps-1);
for (unsigned i = 1; i < NumOps-1; ++i)
if (N->getOperand(i).getValueType() == MVT::Other)
return N->getOperand(i);
}
return SDValue();
}
SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
// If N has two operands, where one has an input chain equal to the other,
// the 'other' chain is redundant.
if (N->getNumOperands() == 2) {
if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
return N->getOperand(0);
if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
return N->getOperand(1);
}
SmallVector<SDNode *, 8> TFs; // List of token factors to visit.
SmallVector<SDValue, 8> Ops; // Ops for replacing token factor.
SmallPtrSet<SDNode*, 16> SeenOps;
bool Changed = false; // If we should replace this token factor.
// Start out with this token factor.
TFs.push_back(N);
// Iterate through token factors. The TFs grows when new token factors are
// encountered.
for (unsigned i = 0; i < TFs.size(); ++i) {
SDNode *TF = TFs[i];
// Check each of the operands.
for (const SDValue &Op : TF->op_values()) {
switch (Op.getOpcode()) {
case ISD::EntryToken:
// Entry tokens don't need to be added to the list. They are
// redundant.
Changed = true;
break;
case ISD::TokenFactor:
if (Op.hasOneUse() &&
std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) {
// Queue up for processing.
TFs.push_back(Op.getNode());
// Clean up in case the token factor is removed.
AddToWorklist(Op.getNode());
Changed = true;
break;
}
// Fall thru
default:
// Only add if it isn't already in the list.
if (SeenOps.insert(Op.getNode()).second)
Ops.push_back(Op);
else
Changed = true;
break;
}
}
}
SDValue Result;
// If we've changed things around then replace token factor.
if (Changed) {
if (Ops.empty()) {
// The entry token is the only possible outcome.
Result = DAG.getEntryNode();
} else {
// New and improved token factor.
Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops);
}
// Add users to worklist if AA is enabled, since it may introduce
// a lot of new chained token factors while removing memory deps.
bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA
: DAG.getSubtarget().useAA();
return CombineTo(N, Result, UseAA /*add to worklist*/);
}
return Result;
}
/// MERGE_VALUES can always be eliminated.
SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
WorklistRemover DeadNodes(*this);
// Replacing results may cause a different MERGE_VALUES to suddenly
// be CSE'd with N, and carry its uses with it. Iterate until no
// uses remain, to ensure that the node can be safely deleted.
// First add the users of this node to the work list so that they
// can be tried again once they have new operands.
AddUsersToWorklist(N);
do {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i));
} while (!N->use_empty());
deleteAndRecombine(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
static bool isNullConstant(SDValue V) {
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
return Const != nullptr && Const->isNullValue();
}
static bool isNullFPConstant(SDValue V) {
ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
return Const != nullptr && Const->isZero() && !Const->isNegative();
}
static bool isAllOnesConstant(SDValue V) {
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
return Const != nullptr && Const->isAllOnesValue();
}
static bool isOneConstant(SDValue V) {
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
return Const != nullptr && Const->isOne();
}
/// If \p N is a ContantSDNode with isOpaque() == false return it casted to a
/// ContantSDNode pointer else nullptr.
static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N);
return Const != nullptr && !Const->isOpaque() ? Const : nullptr;
}
SDValue DAGCombiner::visitADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (add x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N1;
}
// fold (add x, undef) -> undef
if (N0.getOpcode() == ISD::UNDEF)
return N0;
if (N1.getOpcode() == ISD::UNDEF)
return N1;
// fold (add c1, c2) -> c1+c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::ADD, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (isConstantIntBuildVectorOrConstantInt(N0) &&
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::ADD, SDLoc(N), VT, N1, N0);
// fold (add x, 0) -> x
if (isNullConstant(N1))
return N0;
// fold (add Sym, c) -> Sym+c
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C &&
GA->getOpcode() == ISD::GlobalAddress)
return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
GA->getOffset() +
(uint64_t)N1C->getSExtValue());
// fold ((c1-A)+c2) -> (c1+c2)-A
if (N1C && N0.getOpcode() == ISD::SUB)
if (ConstantSDNode *N0C = getAsNonOpaqueConstant(N0.getOperand(0))) {
SDLoc DL(N);
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(N1C->getAPIntValue()+
N0C->getAPIntValue(), DL, VT),
N0.getOperand(1));
}
// reassociate add
if (SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1))
return RADD;
// fold ((0-A) + B) -> B-A
if (N0.getOpcode() == ISD::SUB && isNullConstant(N0.getOperand(0)))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1, N0.getOperand(1));
// fold (A + (0-B)) -> A-B
if (N1.getOpcode() == ISD::SUB && isNullConstant(N1.getOperand(0)))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, N1.getOperand(1));
// fold (A+(B-A)) -> B
if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
return N1.getOperand(0);
// fold ((B-A)+A) -> B
if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
return N0.getOperand(0);
// fold (A+(B-(A+C))) to (B-C)
if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
N0 == N1.getOperand(1).getOperand(0))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1.getOperand(0),
N1.getOperand(1).getOperand(1));
// fold (A+(B-(C+A))) to (B-C)
if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
N0 == N1.getOperand(1).getOperand(1))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1.getOperand(0),
N1.getOperand(1).getOperand(0));
// fold (A+((B-A)+or-C)) to (B+or-C)
if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
N1.getOperand(0).getOpcode() == ISD::SUB &&
N0 == N1.getOperand(0).getOperand(1))
return DAG.getNode(N1.getOpcode(), SDLoc(N), VT,
N1.getOperand(0).getOperand(0), N1.getOperand(1));
// fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10))
return DAG.getNode(ISD::SUB, SDLoc(N), VT,
DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10),
DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11));
}
if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (a+b) -> (a|b) iff a and b share no bits.
if (VT.isInteger() && !VT.isVector()) {
APInt LHSZero, LHSOne;
APInt RHSZero, RHSOne;
DAG.computeKnownBits(N0, LHSZero, LHSOne);
if (LHSZero.getBoolValue()) {
DAG.computeKnownBits(N1, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero){
if (!LegalOperations || TLI.isOperationLegal(ISD::OR, VT))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1);
}
}
}
// fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB &&
isNullConstant(N1.getOperand(0).getOperand(0)))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0,
DAG.getNode(ISD::SHL, SDLoc(N), VT,
N1.getOperand(0).getOperand(1),
N1.getOperand(1)));
if (N0.getOpcode() == ISD::SHL && N0.getOperand(0).getOpcode() == ISD::SUB &&
isNullConstant(N0.getOperand(0).getOperand(0)))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1,
DAG.getNode(ISD::SHL, SDLoc(N), VT,
N0.getOperand(0).getOperand(1),
N0.getOperand(1)));
if (N1.getOpcode() == ISD::AND) {
SDValue AndOp0 = N1.getOperand(0);
unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
unsigned DestBits = VT.getScalarType().getSizeInBits();
// (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
// and similar xforms where the inner op is either ~0 or 0.
if (NumSignBits == DestBits && isOneConstant(N1->getOperand(1))) {
SDLoc DL(N);
return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0);
}
}
// add (sext i1), X -> sub X, (zext i1)
if (N0.getOpcode() == ISD::SIGN_EXTEND &&
N0.getOperand(0).getValueType() == MVT::i1 &&
!TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) {
SDLoc DL(N);
SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
}
// add X, (sextinreg Y i1) -> sub X, (and Y 1)
if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
if (TN->getVT() == MVT::i1) {
SDLoc DL(N);
SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
DAG.getConstant(1, DL, VT));
return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt);
}
}
return SDValue();
}
SDValue DAGCombiner::visitADDC(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// If the flag result is dead, turn this into an ADD.
if (!N->hasAnyUseOfValue(1))
return CombineTo(N, DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE,
SDLoc(N), MVT::Glue));
// canonicalize constant to RHS.
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && !N1C)
return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N1, N0);
// fold (addc x, 0) -> x + no carry out
if (isNullConstant(N1))
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
SDLoc(N), MVT::Glue));
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
APInt LHSZero, LHSOne;
APInt RHSZero, RHSOne;
DAG.computeKnownBits(N0, LHSZero, LHSOne);
if (LHSZero.getBoolValue()) {
DAG.computeKnownBits(N1, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero)
return CombineTo(N, DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE,
SDLoc(N), MVT::Glue));
}
return SDValue();
}
SDValue DAGCombiner::visitADDE(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CarryIn = N->getOperand(2);
// canonicalize constant to RHS
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && !N1C)
return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(),
N1, N0, CarryIn);
// fold (adde x, y, false) -> (addc x, y)
if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1);
return SDValue();
}
// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(SDLoc DL, const TargetLowering &TLI, EVT VT,
SelectionDAG &DAG,
bool LegalOperations, bool LegalTypes) {
if (!VT.isVector())
return DAG.getConstant(0, DL, VT);
if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
return DAG.getConstant(0, DL, VT);
return SDValue();
}
SDValue DAGCombiner::visitSUB(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (sub x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
}
// fold (sub x, x) -> 0
// FIXME: Refactor this and xor and other similar operations together.
if (N0 == N1)
return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
// fold (sub c1, c2) -> c1-c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::SUB, SDLoc(N), VT, N0C, N1C);
// fold (sub x, c) -> (add x, -c)
if (N1C) {
SDLoc DL(N);
return DAG.getNode(ISD::ADD, DL, VT, N0,
DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
}
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
if (isAllOnesConstant(N0))
return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
// fold A-(A-B) -> B
if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
return N1.getOperand(1);
// fold (A+B)-A -> B
if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
return N0.getOperand(1);
// fold (A+B)-B -> A
if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
return N0.getOperand(0);
// fold C2-(A+C1) -> (C2-C1)-A
ConstantSDNode *N1C1 = N1.getOpcode() != ISD::ADD ? nullptr :
dyn_cast<ConstantSDNode>(N1.getOperand(1).getNode());
if (N1.getOpcode() == ISD::ADD && N0C && N1C1) {
SDLoc DL(N);
SDValue NewC = DAG.getConstant(N0C->getAPIntValue() - N1C1->getAPIntValue(),
DL, VT);
return DAG.getNode(ISD::SUB, DL, VT, NewC,
N1.getOperand(0));
}
// fold ((A+(B+or-C))-B) -> A+or-C
if (N0.getOpcode() == ISD::ADD &&
(N0.getOperand(1).getOpcode() == ISD::SUB ||
N0.getOperand(1).getOpcode() == ISD::ADD) &&
N0.getOperand(1).getOperand(0) == N1)
return DAG.getNode(N0.getOperand(1).getOpcode(), SDLoc(N), VT,
N0.getOperand(0), N0.getOperand(1).getOperand(1));
// fold ((A+(C+B))-B) -> A+C
if (N0.getOpcode() == ISD::ADD &&
N0.getOperand(1).getOpcode() == ISD::ADD &&
N0.getOperand(1).getOperand(1) == N1)
return DAG.getNode(ISD::ADD, SDLoc(N), VT,
N0.getOperand(0), N0.getOperand(1).getOperand(0));
// fold ((A-(B-C))-C) -> A-B
if (N0.getOpcode() == ISD::SUB &&
N0.getOperand(1).getOpcode() == ISD::SUB &&
N0.getOperand(1).getOperand(1) == N1)
return DAG.getNode(ISD::SUB, SDLoc(N), VT,
N0.getOperand(0), N0.getOperand(1).getOperand(0));
// If either operand of a sub is undef, the result is undef
if (N0.getOpcode() == ISD::UNDEF)
return N0;
if (N1.getOpcode() == ISD::UNDEF)
return N1;
// If the relocation model supports it, consider symbol offsets.
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
// fold (sub Sym, c) -> Sym-c
if (N1C && GA->getOpcode() == ISD::GlobalAddress)
return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
GA->getOffset() -
(uint64_t)N1C->getSExtValue());
// fold (sub Sym+c1, Sym+c2) -> c1-c2
if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
if (GA->getGlobal() == GB->getGlobal())
return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
SDLoc(N), VT);
}
// sub X, (sextinreg Y i1) -> add X, (and Y 1)
if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
if (TN->getVT() == MVT::i1) {
SDLoc DL(N);
SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
DAG.getConstant(1, DL, VT));
return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt);
}
}
return SDValue();
}
SDValue DAGCombiner::visitSUBC(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// If the flag result is dead, turn this into an SUB.
if (!N->hasAnyUseOfValue(1))
return CombineTo(N, DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
MVT::Glue));
// fold (subc x, x) -> 0 + no borrow
if (N0 == N1) {
SDLoc DL(N);
return CombineTo(N, DAG.getConstant(0, DL, VT),
DAG.getNode(ISD::CARRY_FALSE, DL,
MVT::Glue));
}
// fold (subc x, 0) -> x + no borrow
if (isNullConstant(N1))
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
MVT::Glue));
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
if (isAllOnesConstant(N0))
return CombineTo(N, DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0),
DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
MVT::Glue));
return SDValue();
}
SDValue DAGCombiner::visitSUBE(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CarryIn = N->getOperand(2);
// fold (sube x, y, false) -> (subc x, y)
if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1);
return SDValue();
}
SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// fold (mul x, undef) -> 0
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
bool N0IsConst = false;
bool N1IsConst = false;
bool N1IsOpaqueConst = false;
bool N0IsOpaqueConst = false;
APInt ConstValue0, ConstValue1;
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
} else {
N0IsConst = isa<ConstantSDNode>(N0);
if (N0IsConst) {
ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque();
}
N1IsConst = isa<ConstantSDNode>(N1);
if (N1IsConst) {
ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque();
}
}
// fold (mul c1, c2) -> c1*c2
if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst)
return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT,
N0.getNode(), N1.getNode());
// canonicalize constant to RHS (vector doesn't have to splat)
if (isConstantIntBuildVectorOrConstantInt(N0) &&
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
// fold (mul x, 0) -> 0
if (N1IsConst && ConstValue1 == 0)
return N1;
// We require a splat of the entire scalar bit width for non-contiguous
// bit patterns.
bool IsFullSplat =
ConstValue1.getBitWidth() == VT.getScalarType().getSizeInBits();
// fold (mul x, 1) -> x
if (N1IsConst && ConstValue1 == 1 && IsFullSplat)
return N0;
// fold (mul x, -1) -> 0-x
if (N1IsConst && ConstValue1.isAllOnesValue()) {
SDLoc DL(N);
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT), N0);
}
// fold (mul x, (1 << c)) -> x << c
if (N1IsConst && !N1IsOpaqueConst && ConstValue1.isPowerOf2() &&
IsFullSplat) {
SDLoc DL(N);
return DAG.getNode(ISD::SHL, DL, VT, N0,
DAG.getConstant(ConstValue1.logBase2(), DL,
getShiftAmountTy(N0.getValueType())));
}
// fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2() &&
IsFullSplat) {
unsigned Log2Val = (-ConstValue1).logBase2();
SDLoc DL(N);
// FIXME: If the input is something that is easily negated (e.g. a
// single-use add), we should put the negate there.
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT),
DAG.getNode(ISD::SHL, DL, VT, N0,
DAG.getConstant(Log2Val, DL,
getShiftAmountTy(N0.getValueType()))));
}
APInt Val;
// (mul (shl X, c1), c2) -> (mul X, c2 << c1)
if (N1IsConst && N0.getOpcode() == ISD::SHL &&
(isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
isa<ConstantSDNode>(N0.getOperand(1)))) {
SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT,
N1, N0.getOperand(1));
AddToWorklist(C3.getNode());
return DAG.getNode(ISD::MUL, SDLoc(N), VT,
N0.getOperand(0), C3);
}
// Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
// use.
{
SDValue Sh(nullptr,0), Y(nullptr,0);
// Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
if (N0.getOpcode() == ISD::SHL &&
(isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
isa<ConstantSDNode>(N0.getOperand(1))) &&
N0.getNode()->hasOneUse()) {
Sh = N0; Y = N1;
} else if (N1.getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(N1.getOperand(1)) &&
N1.getNode()->hasOneUse()) {
Sh = N1; Y = N0;
}
if (Sh.getNode()) {
SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
Sh.getOperand(0), Y);
return DAG.getNode(ISD::SHL, SDLoc(N), VT,
Mul, Sh.getOperand(1));
}
}
// fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
if (N1IsConst && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
(isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
isa<ConstantSDNode>(N0.getOperand(1))))
return DAG.getNode(ISD::ADD, SDLoc(N), VT,
DAG.getNode(ISD::MUL, SDLoc(N0), VT,
N0.getOperand(0), N1),
DAG.getNode(ISD::MUL, SDLoc(N1), VT,
N0.getOperand(1), N1));
// reassociate mul
if (SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1))
return RMUL;
return SDValue();
}
SDValue DAGCombiner::visitSDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (sdiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SDIV, SDLoc(N), VT, N0C, N1C);
// fold (sdiv X, 1) -> X
if (N1C && N1C->isOne())
return N0;
// fold (sdiv X, -1) -> 0-X
if (N1C && N1C->isAllOnesValue()) {
SDLoc DL(N);
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT), N0);
}
// If we know the sign bits of both operands are zero, strength reduce to a
// udiv instead. Handles (X&15) /s 4 -> X&15 >> 2
if (!VT.isVector()) {
if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UDIV, SDLoc(N), N1.getValueType(),
N0, N1);
}
// fold (sdiv X, pow2) -> simple ops after legalize
// FIXME: We check for the exact bit here because the generic lowering gives
// better results in that case. The target-specific lowering should learn how
// to handle exact sdivs efficiently.
if (N1C && !N1C->isNullValue() && !N1C->isOpaque() &&
!cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact() &&
(N1C->getAPIntValue().isPowerOf2() ||
(-N1C->getAPIntValue()).isPowerOf2())) {
// If dividing by powers of two is cheap, then don't perform the following
// fold.
if (TLI.isPow2SDivCheap())
return SDValue();
// Target-specific implementation of sdiv x, pow2.
SDValue Res = BuildSDIVPow2(N);
if (Res.getNode())
return Res;
unsigned lg2 = N1C->getAPIntValue().countTrailingZeros();
SDLoc DL(N);
// Splat the sign bit into the register
SDValue SGN =
DAG.getNode(ISD::SRA, DL, VT, N0,
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL,
getShiftAmountTy(N0.getValueType())));
AddToWorklist(SGN.getNode());
// Add (N0 < 0) ? abs2 - 1 : 0;
SDValue SRL =
DAG.getNode(ISD::SRL, DL, VT, SGN,
DAG.getConstant(VT.getScalarSizeInBits() - lg2, DL,
getShiftAmountTy(SGN.getValueType())));
SDValue ADD = DAG.getNode(ISD::ADD, DL, VT, N0, SRL);
AddToWorklist(SRL.getNode());
AddToWorklist(ADD.getNode()); // Divide by pow2
SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, ADD,
DAG.getConstant(lg2, DL,
getShiftAmountTy(ADD.getValueType())));
// If we're dividing by a positive value, we're done. Otherwise, we must
// negate the result.
if (N1C->getAPIntValue().isNonNegative())
return SRA;
AddToWorklist(SRA.getNode());
return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
}
// If integer divide is expensive and we satisfy the requirements, emit an
// alternate sequence.
if (N1C && !TLI.isIntDivCheap()) {
SDValue Op = BuildSDIV(N);
if (Op.getNode()) return Op;
}
// undef / X -> 0
if (N0.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
// X / undef -> undef
if (N1.getOpcode() == ISD::UNDEF)
return N1;
return SDValue();
}
SDValue DAGCombiner::visitUDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (udiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C)
if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, SDLoc(N), VT,
N0C, N1C))
return Folded;
// fold (udiv x, (1 << c)) -> x >>u c
if (N1C && !N1C->isOpaque() && N1C->getAPIntValue().isPowerOf2()) {
SDLoc DL(N);
return DAG.getNode(ISD::SRL, DL, VT, N0,
DAG.getConstant(N1C->getAPIntValue().logBase2(), DL,
getShiftAmountTy(N0.getValueType())));
}
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
if (N1.getOpcode() == ISD::SHL) {
if (ConstantSDNode *SHC = getAsNonOpaqueConstant(N1.getOperand(0))) {
if (SHC->getAPIntValue().isPowerOf2()) {
EVT ADDVT = N1.getOperand(1).getValueType();
SDLoc DL(N);
SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT,
N1.getOperand(1),
DAG.getConstant(SHC->getAPIntValue()
.logBase2(),
DL, ADDVT));
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::SRL, DL, VT, N0, Add);
}
}
}
// fold (udiv x, c) -> alternate
if (N1C && !TLI.isIntDivCheap()) {
SDValue Op = BuildUDIV(N);
if (Op.getNode()) return Op;
}
// undef / X -> 0
if (N0.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
// X / undef -> undef
if (N1.getOpcode() == ISD::UNDEF)
return N1;
return SDValue();
}
SDValue DAGCombiner::visitSREM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// fold (srem c1, c2) -> c1%c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C)
if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::SREM, SDLoc(N), VT,
N0C, N1C))
return Folded;
// If we know the sign bits of both operands are zero, strength reduce to a
// urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
if (!VT.isVector()) {
if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UREM, SDLoc(N), VT, N0, N1);
}
// If X/C can be simplified by the division-by-constant logic, lower
// X%C to the equivalent of X-X/C*C.
if (N1C && !N1C->isNullValue()) {
SDValue Div = DAG.getNode(ISD::SDIV, SDLoc(N), VT, N0, N1);
AddToWorklist(Div.getNode());
SDValue OptimizedDiv = combine(Div.getNode());
if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
OptimizedDiv, N1);
SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul);
AddToWorklist(Mul.getNode());
return Sub;
}
}
// undef % X -> 0
if (N0.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
// X % undef -> undef
if (N1.getOpcode() == ISD::UNDEF)
return N1;
return SDValue();
}
SDValue DAGCombiner::visitUREM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// fold (urem c1, c2) -> c1%c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
if (N0C && N1C)
if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UREM, SDLoc(N), VT,
N0C, N1C))
return Folded;
// fold (urem x, pow2) -> (and x, pow2-1)
if (N1C && !N1C->isNullValue() && !N1C->isOpaque() &&
N1C->getAPIntValue().isPowerOf2()) {
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT, N0,
DAG.getConstant(N1C->getAPIntValue() - 1, DL, VT));
}
// fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
if (N1.getOpcode() == ISD::SHL) {
if (ConstantSDNode *SHC = getAsNonOpaqueConstant(N1.getOperand(0))) {
if (SHC->getAPIntValue().isPowerOf2()) {
SDLoc DL(N);
SDValue Add =
DAG.getNode(ISD::ADD, DL, VT, N1,
DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL,
VT));
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
}
}
}
// If X/C can be simplified by the division-by-constant logic, lower
// X%C to the equivalent of X-X/C*C.
if (N1C && !N1C->isNullValue()) {
SDValue Div = DAG.getNode(ISD::UDIV, SDLoc(N), VT, N0, N1);
AddToWorklist(Div.getNode());
SDValue OptimizedDiv = combine(Div.getNode());
if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
OptimizedDiv, N1);
SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul);
AddToWorklist(Mul.getNode());
return Sub;
}
}
// undef % X -> 0
if (N0.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
// X % undef -> undef
if (N1.getOpcode() == ISD::UNDEF)
return N1;
return SDValue();
}
SDValue DAGCombiner::visitMULHS(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
// fold (mulhs x, 0) -> 0
if (isNullConstant(N1))
return N1;
// fold (mulhs x, 1) -> (sra x, size(x)-1)
if (isOneConstant(N1)) {
SDLoc DL(N);
return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
DL,
getShiftAmountTy(N0.getValueType())));
}
// fold (mulhs x, undef) -> 0
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
// If the type twice as wide is legal, transform the mulhs to a wider multiply
// plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(N1.getValueType())));
return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
}
}
return SDValue();
}
SDValue DAGCombiner::visitMULHU(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
// fold (mulhu x, 0) -> 0
if (isNullConstant(N1))
return N1;
// fold (mulhu x, 1) -> 0
if (isOneConstant(N1))
return DAG.getConstant(0, DL, N0.getValueType());
// fold (mulhu x, undef) -> 0
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, DL, VT);
// If the type twice as wide is legal, transform the mulhu to a wider multiply
// plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(N1.getValueType())));
return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
}
}
return SDValue();
}
/// Perform optimizations common to nodes that compute two values. LoOp and HiOp
/// give the opcodes for the two computations that are being performed. Return
/// true if a simplification was made.
SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp) {
// If the high half is not needed, just compute the low half.
bool HiExists = N->hasAnyUseOfValue(1);
if (!HiExists &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) {
SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
return CombineTo(N, Res, Res);
}
// If the low half is not needed, just compute the high half.
bool LoExists = N->hasAnyUseOfValue(0);
if (!LoExists &&
(!LegalOperations ||
TLI.isOperationLegal(HiOp, N->getValueType(1)))) {
SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
return CombineTo(N, Res, Res);
}
// If both halves are used, return as it is.
if (LoExists && HiExists)
return SDValue();
// If the two computed results can be simplified separately, separate them.
if (LoExists) {
SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
AddToWorklist(Lo.getNode());
SDValue LoOpt = combine(Lo.getNode());
if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
(!LegalOperations ||
TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType())))
return CombineTo(N, LoOpt, LoOpt);
}
if (HiExists) {
SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
AddToWorklist(Hi.getNode());
SDValue HiOpt = combine(Hi.getNode());
if (HiOpt.getNode() && HiOpt != Hi &&
(!LegalOperations ||
TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType())))
return CombineTo(N, HiOpt, HiOpt);
}
return SDValue();
}
SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS);
if (Res.getNode()) return Res;
EVT VT = N->getValueType(0);
SDLoc DL(N);
// If the type is twice as wide is legal, transform the mulhu to a wider
// multiply plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(Lo.getValueType())));
Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
// Compute the low part as N0.
Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
return CombineTo(N, Lo, Hi);
}
}
return SDValue();
}
SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU);
if (Res.getNode()) return Res;
EVT VT = N->getValueType(0);
SDLoc DL(N);
// If the type is twice as wide is legal, transform the mulhu to a wider
// multiply plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
DAG.getConstant(SimpleSize, DL,
getShiftAmountTy(Lo.getValueType())));
Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
// Compute the low part as N0.
Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
return CombineTo(N, Lo, Hi);
}
}
return SDValue();
}
SDValue DAGCombiner::visitSMULO(SDNode *N) {
// (smulo x, 2) -> (saddo x, x)
if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
if (C2->getAPIntValue() == 2)
return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(),
N->getOperand(0), N->getOperand(0));
return SDValue();
}
SDValue DAGCombiner::visitUMULO(SDNode *N) {
// (umulo x, 2) -> (uaddo x, x)
if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
if (C2->getAPIntValue() == 2)
return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(),
N->getOperand(0), N->getOperand(0));
return SDValue();
}
SDValue DAGCombiner::visitSDIVREM(SDNode *N) {
SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM);
if (Res.getNode()) return Res;
return SDValue();
}
SDValue DAGCombiner::visitUDIVREM(SDNode *N) {
SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM);
if (Res.getNode()) return Res;
return SDValue();
}
/// If this is a binary operator with two operands of the same opcode, try to
/// simplify it.
SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
EVT VT = N0.getValueType();
assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
// Bail early if none of these transforms apply.
if (N0.getNode()->getNumOperands() == 0) return SDValue();
// For each of OP in AND/OR/XOR:
// fold (OP (zext x), (zext y)) -> (zext (OP x, y))
// fold (OP (sext x), (sext y)) -> (sext (OP x, y))
// fold (OP (aext x), (aext y)) -> (aext (OP x, y))
// fold (OP (bswap x), (bswap y)) -> (bswap (OP x, y))
// fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
//
// do not sink logical op inside of a vector extend, since it may combine
// into a vsetcc.
EVT Op0VT = N0.getOperand(0).getValueType();
if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND ||
N0.getOpcode() == ISD::BSWAP ||
// Avoid infinite looping with PromoteIntBinOp.
(N0.getOpcode() == ISD::ANY_EXTEND &&
(!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
(N0.getOpcode() == ISD::TRUNCATE &&
(!TLI.isZExtFree(VT, Op0VT) ||
!TLI.isTruncateFree(Op0VT, VT)) &&
TLI.isTypeLegal(Op0VT))) &&
!VT.isVector() &&
Op0VT == N1.getOperand(0).getValueType() &&
(!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) {
SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
N0.getOperand(0).getValueType(),
N0.getOperand(0), N1.getOperand(0));
AddToWorklist(ORNode.getNode());
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode);
}
// For each of OP in SHL/SRL/SRA/AND...
// fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z)
// fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z)
// fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z)
if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL ||
N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) &&
N0.getOperand(1) == N1.getOperand(1)) {
SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
N0.getOperand(0).getValueType(),
N0.getOperand(0), N1.getOperand(0));
AddToWorklist(ORNode.getNode());
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
ORNode, N0.getOperand(1));
}
// Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
// Only perform this optimization after type legalization and before
// LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by
// adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and
// we don't want to undo this promotion.
// We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
// on scalars.
if ((N0.getOpcode() == ISD::BITCAST ||
N0.getOpcode() == ISD::SCALAR_TO_VECTOR) &&
Level == AfterLegalizeTypes) {
SDValue In0 = N0.getOperand(0);
SDValue In1 = N1.getOperand(0);
EVT In0Ty = In0.getValueType();
EVT In1Ty = In1.getValueType();
SDLoc DL(N);
// If both incoming values are integers, and the original types are the
// same.
if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) {
SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1);
SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op);
AddToWorklist(Op.getNode());
return BC;
}
}
// Xor/and/or are indifferent to the swizzle operation (shuffle of one value).
// Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B))
// If both shuffles use the same mask, and both shuffle within a single
// vector, then it is worthwhile to move the swizzle after the operation.
// The type-legalizer generates this pattern when loading illegal
// vector types from memory. In many cases this allows additional shuffle
// optimizations.
// There are other cases where moving the shuffle after the xor/and/or
// is profitable even if shuffles don't perform a swizzle.
// If both shuffles use the same mask, and both shuffles have the same first
// or second operand, then it might still be profitable to move the shuffle
// after the xor/and/or operation.
if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) {
ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0);
ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1);
assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() &&
"Inputs to shuffles are not the same type");
// Check that both shuffles use the same mask. The masks are known to be of
// the same length because the result vector type is the same.
// Check also that shuffles have only one use to avoid introducing extra
// instructions.
if (SVN0->hasOneUse() && SVN1->hasOneUse() &&
SVN0->getMask().equals(SVN1->getMask())) {
SDValue ShOp = N0->getOperand(1);
// Don't try to fold this node if it requires introducing a
// build vector of all zeros that might be illegal at this stage.
if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) {
if (!LegalTypes)
ShOp = DAG.getConstant(0, SDLoc(N), VT);
else
ShOp = SDValue();
}
// (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C)
// (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C)
// (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0)
if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) {
SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
N0->getOperand(0), N1->getOperand(0));
AddToWorklist(NewNode.getNode());
return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp,
&SVN0->getMask()[0]);
}
// Don't try to fold this node if it requires introducing a
// build vector of all zeros that might be illegal at this stage.
ShOp = N0->getOperand(0);
if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) {
if (!LegalTypes)
ShOp = DAG.getConstant(0, SDLoc(N), VT);
else
ShOp = SDValue();
}
// (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B))
// (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B))
// (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B))
if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) {
SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
N0->getOperand(1), N1->getOperand(1));
AddToWorklist(NewNode.getNode());
return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode,
&SVN0->getMask()[0]);
}
}
}
return SDValue();
}
/// This contains all DAGCombine rules which reduce two values combined by
/// an And operation to a single value. This makes them reusable in the context
/// of visitSELECT(). Rules involving constants are not included as
/// visitSELECT() already handles those cases.
SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1,
SDNode *LocReference) {
EVT VT = N1.getValueType();
// fold (and x, undef) -> 0
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(LocReference), VT);
// fold (and (setcc x), (setcc y)) -> (setcc (and x, y))
SDValue LL, LR, RL, RR, CC0, CC1;
if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
LL.getValueType().isInteger()) {
// fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0)
if (isNullConstant(LR) && Op1 == ISD::SETEQ) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
LR.getValueType(), LL, RL);
AddToWorklist(ORNode.getNode());
return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1);
}
if (isAllOnesConstant(LR)) {
// fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
if (Op1 == ISD::SETEQ) {
SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0),
LR.getValueType(), LL, RL);
AddToWorklist(ANDNode.getNode());
return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1);
}
// fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1)
if (Op1 == ISD::SETGT) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
LR.getValueType(), LL, RL);
AddToWorklist(ORNode.getNode());
return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1);
}
}
}
// Simplify (and (setne X, 0), (setne X, -1)) -> (setuge (add X, 1), 2)
if (LL == RL && isa<ConstantSDNode>(LR) && isa<ConstantSDNode>(RR) &&
Op0 == Op1 && LL.getValueType().isInteger() &&
Op0 == ISD::SETNE && ((isNullConstant(LR) && isAllOnesConstant(RR)) ||
(isAllOnesConstant(LR) && isNullConstant(RR)))) {
SDLoc DL(N0);
SDValue ADDNode = DAG.getNode(ISD::ADD, DL, LL.getValueType(),
LL, DAG.getConstant(1, DL,
LL.getValueType()));
AddToWorklist(ADDNode.getNode());
return DAG.getSetCC(SDLoc(LocReference), VT, ADDNode,
DAG.getConstant(2, DL, LL.getValueType()),
ISD::SETUGE);
}
// canonicalize equivalent to ll == rl
if (LL == RR && LR == RL) {
Op1 = ISD::getSetCCSwappedOperands(Op1);
std::swap(RL, RR);
}
if (LL == RL && LR == RR) {
bool isInteger = LL.getValueType().isInteger();
ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger);
if (Result != ISD::SETCC_INVALID &&
(!LegalOperations ||
(TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) &&
TLI.isOperationLegal(ISD::SETCC,
getSetCCResultType(N0.getSimpleValueType())))))
return DAG.getSetCC(SDLoc(LocReference), N0.getValueType(),
LL, LR, Result);
}
}
if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
VT.getSizeInBits() <= 64) {
if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
APInt ADDC = ADDI->getAPIntValue();
if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
// Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal
// immediate for an add, but it is legal if its top c2 bits are set,
// transform the ADD so the immediate doesn't need to be materialized
// in a register.
if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
SRLI->getZExtValue());
if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) {
ADDC |= Mask;
if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
SDLoc DL(N0);
SDValue NewAdd =
DAG.getNode(ISD::ADD, DL, VT,
N0.getOperand(0), DAG.getConstant(ADDC, DL, VT));
CombineTo(N0.getNode(), NewAdd);
// Return N so it doesn't get rechecked!
return SDValue(LocReference, 0);
}
}
}
}
}
}
return SDValue();
}
SDValue DAGCombiner::visitAND(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N1.getValueType();
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (and x, 0) -> 0, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
// do not return N0, because undef node may exist in N0
return DAG.getConstant(
APInt::getNullValue(
N0.getValueType().getScalarType().getSizeInBits()),
SDLoc(N), N0.getValueType());
if (ISD::isBuildVectorAllZeros(N1.getNode()))
// do not return N1, because undef node may exist in N1
return DAG.getConstant(
APInt::getNullValue(
N1.getValueType().getScalarType().getSizeInBits()),
SDLoc(N), N1.getValueType());
// fold (and x, -1) -> x, vector edition
if (ISD::isBuildVectorAllOnes(N0.getNode()))
return N1;
if (ISD::isBuildVectorAllOnes(N1.getNode()))
return N0;
}
// fold (and c1, c2) -> c1&c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (isConstantIntBuildVectorOrConstantInt(N0) &&
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
// fold (and x, -1) -> x
if (isAllOnesConstant(N1))
return N0;
// if (and x, c) is known to be zero, return 0
unsigned BitWidth = VT.getScalarType().getSizeInBits();
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(BitWidth)))
return DAG.getConstant(0, SDLoc(N), VT);
// reassociate and
if (SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1))
return RAND;
// fold (and (or x, C), D) -> D if (C & D) == D
if (N1C && N0.getOpcode() == ISD::OR)
if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue())
return N1;
// fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
SDValue N0Op0 = N0.getOperand(0);
APInt Mask = ~N1C->getAPIntValue();
Mask = Mask.trunc(N0Op0.getValueSizeInBits());
if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
N0.getValueType(), N0Op0);
// Replace uses of the AND with uses of the Zero extend node.
CombineTo(N, Zext);
// We actually want to replace all uses of the any_extend with the
// zero_extend, to avoid duplicating things. This will later cause this
// AND to be folded.
CombineTo(N0.getNode(), Zext);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
// (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
// already be zero by virtue of the width of the base type of the load.
//
// the 'X' node here can either be nothing or an extract_vector_elt to catch
// more cases.
if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
N0.getOperand(0).getOpcode() == ISD::LOAD) ||
N0.getOpcode() == ISD::LOAD) {
LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ?
N0 : N0.getOperand(0) );
// Get the constant (if applicable) the zero'th operand is being ANDed with.
// This can be a pure constant or a vector splat, in which case we treat the
// vector as a scalar and use the splat value.
APInt Constant = APInt::getNullValue(1);
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
Constant = C->getAPIntValue();
} else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef,
SplatBitSize, HasAnyUndefs);
if (IsSplat) {
// Undef bits can contribute to a possible optimisation if set, so
// set them.
SplatValue |= SplatUndef;
// The splat value may be something like "0x00FFFFFF", which means 0 for
// the first vector value and FF for the rest, repeating. We need a mask
// that will apply equally to all members of the vector, so AND all the
// lanes of the constant together.
EVT VT = Vector->getValueType(0);
unsigned BitWidth = VT.getVectorElementType().getSizeInBits();
// If the splat value has been compressed to a bitlength lower
// than the size of the vector lane, we need to re-expand it to
// the lane size.
if (BitWidth > SplatBitSize)
for (SplatValue = SplatValue.zextOrTrunc(BitWidth);
SplatBitSize < BitWidth;
SplatBitSize = SplatBitSize * 2)
SplatValue |= SplatValue.shl(SplatBitSize);
// Make sure that variable 'Constant' is only set if 'SplatBitSize' is a
// multiple of 'BitWidth'. Otherwise, we could propagate a wrong value.
if (SplatBitSize % BitWidth == 0) {
Constant = APInt::getAllOnesValue(BitWidth);
for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i)
Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth);
}
}
}
// If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is
// actually legal and isn't going to get expanded, else this is a false
// optimisation.
bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD,
Load->getValueType(0),
Load->getMemoryVT());
// Resize the constant to the same size as the original memory access before
// extension. If it is still the AllOnesValue then this AND is completely
// unneeded.
Constant =
Constant.zextOrTrunc(Load->getMemoryVT().getScalarType().getSizeInBits());
bool B;
switch (Load->getExtensionType()) {
default: B = false; break;
case ISD::EXTLOAD: B = CanZextLoadProfitably; break;
case ISD::ZEXTLOAD:
case ISD::NON_EXTLOAD: B = true; break;
}
if (B && Constant.isAllOnesValue()) {
// If the load type was an EXTLOAD, convert to ZEXTLOAD in order to
// preserve semantics once we get rid of the AND.
SDValue NewLoad(Load, 0);
if (Load->getExtensionType() == ISD::EXTLOAD) {
NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD,
Load->getValueType(0), SDLoc(Load),
Load->getChain(), Load->getBasePtr(),
Load->getOffset(), Load->getMemoryVT(),
Load->getMemOperand());
// Replace uses of the EXTLOAD with the new ZEXTLOAD.
if (Load->getNumValues() == 3) {
// PRE/POST_INC loads have 3 values.
SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1),
NewLoad.getValue(2) };
CombineTo(Load, To, 3, true);
} else {
CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
}
}
// Fold the AND away, taking care not to fold to the old load node if we
// replaced it.
CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (and (load x), 255) -> (zextload x, i8)
// fold (and (extload x, i16), 255) -> (zextload x, i8)
// fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
if (N1C && (N0.getOpcode() == ISD::LOAD ||
(N0.getOpcode() == ISD::ANY_EXTEND &&
N0.getOperand(0).getOpcode() == ISD::LOAD))) {
bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND;
LoadSDNode *LN0 = HasAnyExt
? cast<LoadSDNode>(N0.getOperand(0))
: cast<LoadSDNode>(N0);
if (LN0->getExtensionType() != ISD::SEXTLOAD &&
LN0->isUnindexed() && N0.hasOneUse() && SDValue(LN0, 0).hasOneUse()) {
uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue())){
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
EVT LoadedVT = LN0->getMemoryVT();
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
if (ExtVT == LoadedVT &&
(!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy,
ExtVT))) {
SDValue NewLoad =
DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy,
LN0->getChain(), LN0->getBasePtr(), ExtVT,
LN0->getMemOperand());
AddToWorklist(N);
CombineTo(LN0, NewLoad, NewLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// Do not change the width of a volatile load.
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (!LN0->isVolatile() && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() &&
(!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy,
ExtVT))) {
EVT PtrType = LN0->getOperand(1).getValueType();
unsigned Alignment = LN0->getAlignment();
SDValue NewPtr = LN0->getBasePtr();
// For big endian targets, we need to add an offset to the pointer
// to load the correct bytes. For little endian systems, we merely
// need to read fewer bytes from the same pointer.
if (DAG.getDataLayout().isBigEndian()) {
unsigned LVTStoreBytes = LoadedVT.getStoreSize();
unsigned EVTStoreBytes = ExtVT.getStoreSize();
unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
SDLoc DL(LN0);
NewPtr = DAG.getNode(ISD::ADD, DL, PtrType,
NewPtr, DAG.getConstant(PtrOff, DL, PtrType));
Alignment = MinAlign(Alignment, PtrOff);
}
AddToWorklist(NewPtr.getNode());
SDValue Load =
DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy,
LN0->getChain(), NewPtr,
LN0->getPointerInfo(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
LN0->isInvariant(), Alignment, LN0->getAAInfo());
AddToWorklist(N);
CombineTo(LN0, Load, Load.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
}
}
if (SDValue Combined = visitANDLike(N0, N1, N))
return Combined;
// Simplify: (and (op x...), (op y...)) -> (op (and x, y))
if (N0.getOpcode() == N1.getOpcode()) {
SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
if (Tmp.getNode()) return Tmp;
}
// fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
// fold (and (sra)) -> (and (srl)) when possible.
if (!VT.isVector() &&
SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (zext_inreg (extload x)) -> (zextload x)
if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
// If we zero all the possible extended bits, then we can turn this into
// a zextload if we are running before legalize or the operation is legal.
unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
LN0->getChain(), LN0->getBasePtr(),
MemVT, LN0->getMemOperand());
AddToWorklist(N);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
// If we zero all the possible extended bits, then we can turn this into
// a zextload if we are running before legalize or the operation is legal.
unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
LN0->getChain(), LN0->getBasePtr(),
MemVT, LN0->getMemOperand());
AddToWorklist(N);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) {
SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false);
if (BSwap.getNode())
return BSwap;
}
return SDValue();
}
/// Match (a >> 8) | (a << 8) as (bswap a) >> 16.
SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
bool DemandHighBits) {
if (!LegalOperations)
return SDValue();
EVT VT = N->getValueType(0);
if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
return SDValue();
if (!TLI.isOperationLegal(ISD::BSWAP, VT))
return SDValue();
// Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00)
bool LookPassAnd0 = false;
bool LookPassAnd1 = false;
if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL)
std::swap(N0, N1);
if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() == ISD::AND) {
if (!N0.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N01C || N01C->getZExtValue() != 0xFF00)
return SDValue();
N0 = N0.getOperand(0);
LookPassAnd0 = true;
}
if (N1.getOpcode() == ISD::AND) {
if (!N1.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
if (!N11C || N11C->getZExtValue() != 0xFF)
return SDValue();
N1 = N1.getOperand(0);
LookPassAnd1 = true;
}
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
return SDValue();
if (!N0.getNode()->hasOneUse() ||
!N1.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
if (!N01C || !N11C)
return SDValue();
if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8)
return SDValue();
// Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
SDValue N00 = N0->getOperand(0);
if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) {
if (!N00.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1));
if (!N001C || N001C->getZExtValue() != 0xFF)
return SDValue();
N00 = N00.getOperand(0);
LookPassAnd0 = true;
}
SDValue N10 = N1->getOperand(0);
if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) {
if (!N10.getNode()->hasOneUse())
return SDValue();
ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1));
if (!N101C || N101C->getZExtValue() != 0xFF00)
return SDValue();
N10 = N10.getOperand(0);
LookPassAnd1 = true;
}
if (N00 != N10)
return SDValue();
// Make sure everything beyond the low halfword gets set to zero since the SRL
// 16 will clear the top bits.
unsigned OpSizeInBits = VT.getSizeInBits();
if (DemandHighBits && OpSizeInBits > 16) {
// If the left-shift isn't masked out then the only way this is a bswap is
// if all bits beyond the low 8 are 0. In that case the entire pattern
// reduces to a left shift anyway: leave it for other parts of the combiner.
if (!LookPassAnd0)
return SDValue();
// However, if the right shift isn't masked out then it might be because
// it's not needed. See if we can spot that too.
if (!LookPassAnd1 &&
!DAG.MaskedValueIsZero(
N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16)))
return SDValue();
}
SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00);
if (OpSizeInBits > 16) {
SDLoc DL(N);
Res = DAG.getNode(ISD::SRL, DL, VT, Res,
DAG.getConstant(OpSizeInBits - 16, DL,
getShiftAmountTy(VT)));
}
return Res;
}
/// Return true if the specified node is an element that makes up a 32-bit
/// packed halfword byteswap.
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) {
if (!N.getNode()->hasOneUse())
return false;
unsigned Opc = N.getOpcode();
if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL)
return false;
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!N1C)
return false;
unsigned Num;
switch (N1C->getZExtValue()) {
default:
return false;
case 0xFF: Num = 0; break;
case 0xFF00: Num = 1; break;
case 0xFF0000: Num = 2; break;
case 0xFF000000: Num = 3; break;
}
// Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00).
SDValue N0 = N.getOperand(0);
if (Opc == ISD::AND) {
if (Num == 0 || Num == 2) {
// (x >> 8) & 0xff
// (x >> 8) & 0xff0000
if (N0.getOpcode() != ISD::SRL)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
} else {
// (x << 8) & 0xff00
// (x << 8) & 0xff000000
if (N0.getOpcode() != ISD::SHL)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
}
} else if (Opc == ISD::SHL) {
// (x & 0xff) << 8
// (x & 0xff0000) << 8
if (Num != 0 && Num != 2)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
} else { // Opc == ISD::SRL
// (x & 0xff00) >> 8
// (x & 0xff000000) >> 8
if (Num != 1 && Num != 3)
return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!C || C->getZExtValue() != 8)
return false;
}
if (Parts[Num])
return false;
Parts[Num] = N0.getOperand(0).getNode();
return true;
}
/// Match a 32-bit packed halfword bswap. That is
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
/// => (rotl (bswap x), 16)
SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
if (!LegalOperations)
return SDValue();
EVT VT = N->getValueType(0);
if (VT != MVT::i32)
return SDValue();
if (!TLI.isOperationLegal(ISD::BSWAP, VT))
return SDValue();
// Look for either
// (or (or (and), (and)), (or (and), (and)))
// (or (or (or (and), (and)), (and)), (and))
if (N0.getOpcode() != ISD::OR)
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDNode *Parts[4] = {};
if (N1.getOpcode() == ISD::OR &&
N00.getNumOperands() == 2 && N01.getNumOperands() == 2) {
// (or (or (and), (and)), (or (and), (and)))
SDValue N000 = N00.getOperand(0);
if (!isBSwapHWordElement(N000, Parts))
return SDValue();
SDValue N001 = N00.getOperand(1);
if (!isBSwapHWordElement(N001, Parts))
return SDValue();
SDValue N010 = N01.getOperand(0);
if (!isBSwapHWordElement(N010, Parts))
return SDValue();
SDValue N011 = N01.getOperand(1);
if (!isBSwapHWordElement(N011, Parts))
return SDValue();
} else {
// (or (or (or (and), (and)), (and)), (and))
if (!isBSwapHWordElement(N1, Parts))
return SDValue();
if (!isBSwapHWordElement(N01, Parts))
return SDValue();
if (N00.getOpcode() != ISD::OR)
return SDValue();
SDValue N000 = N00.getOperand(0);
if (!isBSwapHWordElement(N000, Parts))
return SDValue();
SDValue N001 = N00.getOperand(1);
if (!isBSwapHWordElement(N001, Parts))
return SDValue();
}
// Make sure the parts are all coming from the same node.
if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
return SDValue();
SDLoc DL(N);
SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT,
SDValue(Parts[0], 0));
// Result of the bswap should be rotated by 16. If it's not legal, then
// do (x << 16) | (x >> 16).
SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT));
if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt);
if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt);
return DAG.getNode(ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt),
DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt));
}
/// This contains all DAGCombine rules which reduce two values combined by
/// an Or operation to a single value \see visitANDLike().
SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *LocReference) {
EVT VT = N1.getValueType();
// fold (or x, undef) -> -1
if (!LegalOperations &&
(N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) {
EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()),
SDLoc(LocReference), VT);
}
// fold (or (setcc x), (setcc y)) -> (setcc (or x, y))
SDValue LL, LR, RL, RR, CC0, CC1;
if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
if (LR == RR && Op0 == Op1 && LL.getValueType().isInteger()) {
// fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0)
// fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0)
if (isNullConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(LR),
LR.getValueType(), LL, RL);
AddToWorklist(ORNode.getNode());
return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1);
}
// fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1)
// fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1)
if (isAllOnesConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(LR),
LR.getValueType(), LL, RL);
AddToWorklist(ANDNode.getNode());
return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1);
}
}
// canonicalize equivalent to ll == rl
if (LL == RR && LR == RL) {
Op1 = ISD::getSetCCSwappedOperands(Op1);
std::swap(RL, RR);
}
if (LL == RL && LR == RR) {
bool isInteger = LL.getValueType().isInteger();
ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger);
if (Result != ISD::SETCC_INVALID &&
(!LegalOperations ||
(TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) &&
TLI.isOperationLegal(ISD::SETCC,
getSetCCResultType(N0.getValueType())))))
return DAG.getSetCC(SDLoc(LocReference), N0.getValueType(),
LL, LR, Result);
}
}
// (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible.
if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND &&
// Don't increase # computations.
(N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
// We can only do this xform if we know that bits from X that are set in C2
// but not in C1 are already zero. Likewise for Y.
if (const ConstantSDNode *N0O1C =
getAsNonOpaqueConstant(N0.getOperand(1))) {
if (const ConstantSDNode *N1O1C =
getAsNonOpaqueConstant(N1.getOperand(1))) {
// We can only do this xform if we know that bits from X that are set in
// C2 but not in C1 are already zero. Likewise for Y.
const APInt &LHSMask = N0O1C->getAPIntValue();
const APInt &RHSMask = N1O1C->getAPIntValue();
if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
N0.getOperand(0), N1.getOperand(0));
SDLoc DL(LocReference);
return DAG.getNode(ISD::AND, DL, VT, X,
DAG.getConstant(LHSMask | RHSMask, DL, VT));
}
}
}
}
// (or (and X, M), (and X, N)) -> (and X, (or M, N))
if (N0.getOpcode() == ISD::AND &&
N1.getOpcode() == ISD::AND &&
N0.getOperand(0) == N1.getOperand(0) &&
// Don't increase # computations.
(N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
N0.getOperand(1), N1.getOperand(1));
return DAG.getNode(ISD::AND, SDLoc(LocReference), VT, N0.getOperand(0), X);
}
return SDValue();
}
SDValue DAGCombiner::visitOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N1.getValueType();
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (or x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N1;
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
// fold (or x, -1) -> -1, vector edition
if (ISD::isBuildVectorAllOnes(N0.getNode()))
// do not return N0, because undef node may exist in N0
return DAG.getConstant(
APInt::getAllOnesValue(
N0.getValueType().getScalarType().getSizeInBits()),
SDLoc(N), N0.getValueType());
if (ISD::isBuildVectorAllOnes(N1.getNode()))
// do not return N1, because undef node may exist in N1
return DAG.getConstant(
APInt::getAllOnesValue(
N1.getValueType().getScalarType().getSizeInBits()),
SDLoc(N), N1.getValueType());
// fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask1)
// fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf B, A, Mask2)
// Do this only if the resulting shuffle is legal.
if (isa<ShuffleVectorSDNode>(N0) &&
isa<ShuffleVectorSDNode>(N1) &&
// Avoid folding a node with illegal type.
TLI.isTypeLegal(VT) &&
N0->getOperand(1) == N1->getOperand(1) &&
ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode())) {
bool CanFold = true;
unsigned NumElts = VT.getVectorNumElements();
const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
// We construct two shuffle masks:
// - Mask1 is a shuffle mask for a shuffle with N0 as the first operand
// and N1 as the second operand.
// - Mask2 is a shuffle mask for a shuffle with N1 as the first operand
// and N0 as the second operand.
// We do this because OR is commutable and therefore there might be
// two ways to fold this node into a shuffle.
SmallVector<int,4> Mask1;
SmallVector<int,4> Mask2;
for (unsigned i = 0; i != NumElts && CanFold; ++i) {
int M0 = SV0->getMaskElt(i);
int M1 = SV1->getMaskElt(i);
// Both shuffle indexes are undef. Propagate Undef.
if (M0 < 0 && M1 < 0) {
Mask1.push_back(M0);
Mask2.push_back(M0);
continue;
}
if (M0 < 0 || M1 < 0 ||
(M0 < (int)NumElts && M1 < (int)NumElts) ||
(M0 >= (int)NumElts && M1 >= (int)NumElts)) {
CanFold = false;
break;
}
Mask1.push_back(M0 < (int)NumElts ? M0 : M1 + NumElts);
Mask2.push_back(M1 < (int)NumElts ? M1 : M0 + NumElts);
}
if (CanFold) {
// Fold this sequence only if the resulting shuffle is 'legal'.
if (TLI.isShuffleMaskLegal(Mask1, VT))
return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0),
N1->getOperand(0), &Mask1[0]);
if (TLI.isShuffleMaskLegal(Mask2, VT))
return DAG.getVectorShuffle(VT, SDLoc(N), N1->getOperand(0),
N0->getOperand(0), &Mask2[0]);
}
}
}
// fold (or c1, c2) -> c1|c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (isConstantIntBuildVectorOrConstantInt(N0) &&
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
// fold (or x, 0) -> x
if (isNullConstant(N1))
return N0;
// fold (or x, -1) -> -1
if (isAllOnesConstant(N1))
return N1;
// fold (or x, c) -> c iff (x & ~c) == 0
if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
return N1;
if (SDValue Combined = visitORLike(N0, N1, N))
return Combined;
// Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
SDValue BSwap = MatchBSwapHWord(N, N0, N1);
if (BSwap.getNode())
return BSwap;
BSwap = MatchBSwapHWordLow(N, N0, N1);
if (BSwap.getNode())
return BSwap;
// reassociate or
if (SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1))
return ROR;
// Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
// iff (c1 & c2) == 0.
if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1));
if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) {
if (SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT,
N1C, C1))
return DAG.getNode(
ISD::AND, SDLoc(N), VT,
DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1), COR);
return SDValue();
}
}
// Simplify: (or (op x...), (op y...)) -> (op (or x, y))
if (N0.getOpcode() == N1.getOpcode()) {
SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
if (Tmp.getNode()) return Tmp;
}
// See if this is some rotate idiom.
if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N)))
return SDValue(Rot, 0);
// Simplify the operands using demanded-bits information.
if (!VT.isVector() &&
SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
/// Match "(X shl/srl V1) & V2" where V2 may not be present.
static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
if (Op.getOpcode() == ISD::AND) {
if (isa<ConstantSDNode>(Op.getOperand(1))) {
Mask = Op.getOperand(1);
Op = Op.getOperand(0);
} else {
return false;
}
}
if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
Shift = Op;
return true;
}
return false;
}
// Return true if we can prove that, whenever Neg and Pos are both in the
// range [0, OpSize), Neg == (Pos == 0 ? 0 : OpSize - Pos). This means that
// for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
//
// (or (shift1 X, Neg), (shift2 X, Pos))
//
// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
// in direction shift1 by Neg. The range [0, OpSize) means that we only need
// to consider shift amounts with defined behavior.
static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned OpSize) {
// If OpSize is a power of 2 then:
//
// (a) (Pos == 0 ? 0 : OpSize - Pos) == (OpSize - Pos) & (OpSize - 1)
// (b) Neg == Neg & (OpSize - 1) whenever Neg is in [0, OpSize).
//
// So if OpSize is a power of 2 and Neg is (and Neg', OpSize-1), we check
// for the stronger condition:
//
// Neg & (OpSize - 1) == (OpSize - Pos) & (OpSize - 1) [A]
//
// for all Neg and Pos. Since Neg & (OpSize - 1) == Neg' & (OpSize - 1)
// we can just replace Neg with Neg' for the rest of the function.
//
// In other cases we check for the even stronger condition:
//
// Neg == OpSize - Pos [B]
//
// for all Neg and Pos. Note that the (or ...) then invokes undefined
// behavior if Pos == 0 (and consequently Neg == OpSize).
//
// We could actually use [A] whenever OpSize is a power of 2, but the
// only extra cases that it would match are those uninteresting ones
// where Neg and Pos are never in range at the same time. E.g. for
// OpSize == 32, using [A] would allow a Neg of the form (sub 64, Pos)
// as well as (sub 32, Pos), but:
//
// (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos))
//
// always invokes undefined behavior for 32-bit X.
//
// Below, Mask == OpSize - 1 when using [A] and is all-ones otherwise.
unsigned MaskLoBits = 0;
if (Neg.getOpcode() == ISD::AND &&
isPowerOf2_64(OpSize) &&
Neg.getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(Neg.getOperand(1))->getAPIntValue() == OpSize - 1) {
Neg = Neg.getOperand(0);
MaskLoBits = Log2_64(OpSize);
}
// Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1.
if (Neg.getOpcode() != ISD::SUB)
return 0;
ConstantSDNode *NegC = dyn_cast<ConstantSDNode>(Neg.getOperand(0));
if (!NegC)
return 0;
SDValue NegOp1 = Neg.getOperand(1);
// On the RHS of [A], if Pos is Pos' & (OpSize - 1), just replace Pos with
// Pos'. The truncation is redundant for the purpose of the equality.
if (MaskLoBits &&
Pos.getOpcode() == ISD::AND &&
Pos.getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() == OpSize - 1)
Pos = Pos.getOperand(0);
// The condition we need is now:
//
// (NegC - NegOp1) & Mask == (OpSize - Pos) & Mask
//
// If NegOp1 == Pos then we need:
//
// OpSize & Mask == NegC & Mask
//
// (because "x & Mask" is a truncation and distributes through subtraction).
APInt Width;
if (Pos == NegOp1)
Width = NegC->getAPIntValue();
// Check for cases where Pos has the form (add NegOp1, PosC) for some PosC.
// Then the condition we want to prove becomes:
//
// (NegC - NegOp1) & Mask == (OpSize - (NegOp1 + PosC)) & Mask
//
// which, again because "x & Mask" is a truncation, becomes:
//
// NegC & Mask == (OpSize - PosC) & Mask
// OpSize & Mask == (NegC + PosC) & Mask
else if (Pos.getOpcode() == ISD::ADD &&
Pos.getOperand(0) == NegOp1 &&
Pos.getOperand(1).getOpcode() == ISD::Constant)
Width = (cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() +
NegC->getAPIntValue());
else
return false;
// Now we just need to check that OpSize & Mask == Width & Mask.
if (MaskLoBits)
// Opsize & Mask is 0 since Mask is Opsize - 1.
return Width.getLoBits(MaskLoBits) == 0;
return Width == OpSize;
}
// A subroutine of MatchRotate used once we have found an OR of two opposite
// shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces
// to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
// former being preferred if supported. InnerPos and InnerNeg are Pos and
// Neg with outer conversions stripped away.
SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
SDValue Neg, SDValue InnerPos,
SDValue InnerNeg, unsigned PosOpcode,
unsigned NegOpcode, SDLoc DL) {
// fold (or (shl x, (*ext y)),
// (srl x, (*ext (sub 32, y)))) ->
// (rotl x, y) or (rotr x, (sub 32, y))
//
// fold (or (shl x, (*ext (sub 32, y))),
// (srl x, (*ext y))) ->
// (rotr x, y) or (rotl x, (sub 32, y))
EVT VT = Shifted.getValueType();
if (matchRotateSub(InnerPos, InnerNeg, VT.getSizeInBits())) {
bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
HasPos ? Pos : Neg).getNode();
}
return nullptr;
}
// MatchRotate - Handle an 'or' of two operands. If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr].
SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
// Must be a legal type. Expanded 'n promoted things won't work with rotates.
EVT VT = LHS.getValueType();
if (!TLI.isTypeLegal(VT)) return nullptr;
// The target must have at least one rotate flavor.
bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT);
bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT);
if (!HasROTL && !HasROTR) return nullptr;
// Match "(X shl/srl V1) & V2" where V2 may not be present.
SDValue LHSShift; // The shift.
SDValue LHSMask; // AND value if any.
if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
return nullptr; // Not part of a rotate.
SDValue RHSShift; // The shift.
SDValue RHSMask; // AND value if any.
if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
return nullptr; // Not part of a rotate.
if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
return nullptr; // Not shifting the same value.
if (LHSShift.getOpcode() == RHSShift.getOpcode())
return nullptr; // Shifts must disagree.
// Canonicalize shl to left side in a shl/srl pair.
if (RHSShift.getOpcode() == ISD::SHL) {
std::swap(LHS, RHS);
std::swap(LHSShift, RHSShift);
std::swap(LHSMask , RHSMask );
}
unsigned OpSizeInBits = VT.getSizeInBits();
SDValue LHSShiftArg = LHSShift.getOperand(0);
SDValue LHSShiftAmt = LHSShift.getOperand(1);
SDValue RHSShiftArg = RHSShift.getOperand(0);
SDValue RHSShiftAmt = RHSShift.getOperand(1);
// fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
// fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
if (LHSShiftAmt.getOpcode() == ISD::Constant &&
RHSShiftAmt.getOpcode() == ISD::Constant) {
uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue();
uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue();
if ((LShVal + RShVal) != OpSizeInBits)
return nullptr;
SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
// If there is an AND of either shifted operand, apply it to the result.
if (LHSMask.getNode() || RHSMask.getNode()) {
APInt Mask = APInt::getAllOnesValue(OpSizeInBits);
if (LHSMask.getNode()) {
APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal);
Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits;
}
if (RHSMask.getNode()) {
APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal);
Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits;
}
Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, DL, VT));
}
return Rot.getNode();
}
// If there is a mask here, and we have a variable shift, we can't be sure
// that we're masking out the right stuff.
if (LHSMask.getNode() || RHSMask.getNode())
return nullptr;
// If the shift amount is sign/zext/any-extended just peel it off.
SDValue LExtOp0 = LHSShiftAmt;
SDValue RExtOp0 = RHSShiftAmt;
if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
(RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
LExtOp0 = LHSShiftAmt.getOperand(0);
RExtOp0 = RHSShiftAmt.getOperand(0);
}
SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt,
LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL);
if (TryL)
return TryL;
SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt,
RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL);
if (TryR)
return TryR;
return nullptr;
}
SDValue DAGCombiner::visitXOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (xor x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N1;
if (ISD::isBuildVectorAllZeros(N1.getNode()))
return N0;
}
// fold (xor undef, undef) -> 0. This is a common idiom (misuse).
if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
// fold (xor x, undef) -> undef
if (N0.getOpcode() == ISD::UNDEF)
return N0;
if (N1.getOpcode() == ISD::UNDEF)
return N1;
// fold (xor c1, c2) -> c1^c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::XOR, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (isConstantIntBuildVectorOrConstantInt(N0) &&
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
// fold (xor x, 0) -> x
if (isNullConstant(N1))
return N0;
// reassociate xor
if (SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1))
return RXOR;
// fold !(x cc y) -> (x !cc y)
SDValue LHS, RHS, CC;
if (TLI.isConstTrueVal(N1.getNode()) && isSetCCEquivalent(N0, LHS, RHS, CC)) {
bool isInt = LHS.getValueType().isInteger();
ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
isInt);
if (!LegalOperations ||
TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) {
switch (N0.getOpcode()) {
default:
llvm_unreachable("Unhandled SetCC Equivalent!");
case ISD::SETCC:
return DAG.getSetCC(SDLoc(N), VT, LHS, RHS, NotCC);
case ISD::SELECT_CC:
return DAG.getSelectCC(SDLoc(N), LHS, RHS, N0.getOperand(2),
N0.getOperand(3), NotCC);
}
}
}
// fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
if (isOneConstant(N1) && N0.getOpcode() == ISD::ZERO_EXTEND &&
N0.getNode()->hasOneUse() &&
isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
SDValue V = N0.getOperand(0);
SDLoc DL(N0);
V = DAG.getNode(ISD::XOR, DL, V.getValueType(), V,
DAG.getConstant(1, DL, V.getValueType()));
AddToWorklist(V.getNode());
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V);
}
// fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
if (isOneConstant(N1) && VT == MVT::i1 &&
(N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
}
}
// fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
if (isAllOnesConstant(N1) &&
(N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
}
}
// fold (xor (and x, y), y) -> (and (not x), y)
if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
N0->getOperand(1) == N1) {
SDValue X = N0->getOperand(0);
SDValue NotX = DAG.getNOT(SDLoc(X), X, VT);
AddToWorklist(NotX.getNode());
return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1);
}
// fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
if (N1C && N0.getOpcode() == ISD::XOR) {
if (const ConstantSDNode *N00C = getAsNonOpaqueConstant(N0.getOperand(0))) {
SDLoc DL(N);
return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1),
DAG.getConstant(N1C->getAPIntValue() ^
N00C->getAPIntValue(), DL, VT));
}
if (const ConstantSDNode *N01C = getAsNonOpaqueConstant(N0.getOperand(1))) {
SDLoc DL(N);
return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
DAG.getConstant(N1C->getAPIntValue() ^
N01C->getAPIntValue(), DL, VT));
}
}
// fold (xor x, x) -> 0
if (N0 == N1)
return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
// fold (xor (shl 1, x), -1) -> (rotl ~1, x)
// Here is a concrete example of this equivalence:
// i16 x == 14
// i16 shl == 1 << 14 == 16384 == 0b0100000000000000
// i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111
//
// =>
//
// i16 ~1 == 0b1111111111111110
// i16 rol(~1, 14) == 0b1011111111111111
//
// Some additional tips to help conceptualize this transform:
// - Try to see the operation as placing a single zero in a value of all ones.
// - There exists no value for x which would allow the result to contain zero.
// - Values of x larger than the bitwidth are undefined and do not require a
// consistent result.
// - Pushing the zero left requires shifting one bits in from the right.
// A rotate left of ~1 is a nice way of achieving the desired result.
if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0.getOpcode() == ISD::SHL
&& isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) {
SDLoc DL(N);
return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT),
N0.getOperand(1));
}
// Simplify: xor (op x...), (op y...) -> (op (xor x, y))
if (N0.getOpcode() == N1.getOpcode()) {
SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
if (Tmp.getNode()) return Tmp;
}
// Simplify the expression using non-local knowledge.
if (!VT.isVector() &&
SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
/// Handle transforms common to the three shifts, when the shift amount is a
/// constant.
SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) {
SDNode *LHS = N->getOperand(0).getNode();
if (!LHS->hasOneUse()) return SDValue();
// We want to pull some binops through shifts, so that we have (and (shift))
// instead of (shift (and)), likewise for add, or, xor, etc. This sort of
// thing happens with address calculations, so it's important to canonicalize
// it.
bool HighBitSet = false; // Can we transform this if the high bit is set?
switch (LHS->getOpcode()) {
default: return SDValue();
case ISD::OR:
case ISD::XOR:
HighBitSet = false; // We can only transform sra if the high bit is clear.
break;
case ISD::AND:
HighBitSet = true; // We can only transform sra if the high bit is set.
break;
case ISD::ADD:
if (N->getOpcode() != ISD::SHL)
return SDValue(); // only shl(add) not sr[al](add).
HighBitSet = false; // We can only transform sra if the high bit is clear.
break;
}
// We require the RHS of the binop to be a constant and not opaque as well.
ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS->getOperand(1));
if (!BinOpCst) return SDValue();
// FIXME: disable this unless the input to the binop is a shift by a constant.
// If it is not a shift, it pessimizes some common cases like:
//
// void foo(int *X, int i) { X[i & 1235] = 1; }
// int bar(int *X, int i) { return X[i & 255]; }
SDNode *BinOpLHSVal = LHS->getOperand(0).getNode();
if ((BinOpLHSVal->getOpcode() != ISD::SHL &&
BinOpLHSVal->getOpcode() != ISD::SRA &&
BinOpLHSVal->getOpcode() != ISD::SRL) ||
!isa<ConstantSDNode>(BinOpLHSVal->getOperand(1)))
return SDValue();
EVT VT = N->getValueType(0);
// If this is a signed shift right, and the high bit is modified by the
// logical operation, do not perform the transformation. The highBitSet
// boolean indicates the value of the high bit of the constant which would
// cause it to be modified for this operation.
if (N->getOpcode() == ISD::SRA) {
bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative();
if (BinOpRHSSignSet != HighBitSet)
return SDValue();
}
if (!TLI.isDesirableToCommuteWithShift(LHS))
return SDValue();
// Fold the constants, shifting the binop RHS by the shift amount.
SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)),
N->getValueType(0),
LHS->getOperand(1), N->getOperand(1));
assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!");
// Create the new shift.
SDValue NewShift = DAG.getNode(N->getOpcode(),
SDLoc(LHS->getOperand(0)),
VT, LHS->getOperand(0), N->getOperand(1));
// Create the new binop.
return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS);
}
SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
assert(N->getOpcode() == ISD::TRUNCATE);
assert(N->getOperand(0).getOpcode() == ISD::AND);
// (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
if (N->hasOneUse() && N->getOperand(0).hasOneUse()) {
SDValue N01 = N->getOperand(0).getOperand(1);
if (ConstantSDNode *N01C = isConstOrConstSplat(N01)) {
if (!N01C->isOpaque()) {
EVT TruncVT = N->getValueType(0);
SDValue N00 = N->getOperand(0).getOperand(0);
APInt TruncC = N01C->getAPIntValue();
TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits());
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, TruncVT,
DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00),
DAG.getConstant(TruncC, DL, TruncVT));
}
}
}
return SDValue();
}
SDValue DAGCombiner::visitRotate(SDNode *N) {
// fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
if (N->getOperand(1).getOpcode() == ISD::TRUNCATE &&
N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) {
SDValue NewOp1 = distributeTruncateThroughAnd(N->getOperand(1).getNode());
if (NewOp1.getNode())
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
N->getOperand(0), NewOp1);
}
return SDValue();
}
SDValue DAGCombiner::visitSHL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarSizeInBits();
// fold vector ops
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
// If setcc produces all-one true value then:
// (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
if (N1CV && N1CV->isConstant()) {
if (N0.getOpcode() == ISD::AND) {
SDValue N00 = N0->getOperand(0);
SDValue N01 = N0->getOperand(1);
BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01);
if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC &&
TLI.getBooleanContents(N00.getOperand(0).getValueType()) ==
TargetLowering::ZeroOrNegativeOneBooleanContent) {
if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT,
N01CV, N1CV))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C);
}
} else {
N1C = isConstOrConstSplat(N1);
}
}
}
// fold (shl c1, c2) -> c1<<c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C);
// fold (shl 0, x) -> 0
if (isNullConstant(N0))
return N0;
// fold (shl x, c >= size(x)) -> undef
if (N1C && N1C->getAPIntValue().uge(OpSizeInBits))
return DAG.getUNDEF(VT);
// fold (shl x, 0) -> x
if (N1C && N1C->isNullValue())
return N0;
// fold (shl undef, x) -> 0
if (N0.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, SDLoc(N), VT);
// if (shl x, c) is known to be zero, return 0
if (DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(OpSizeInBits)))
return DAG.getConstant(0, SDLoc(N), VT);
// fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
if (NewOp1.getNode())
return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
}
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
if (N1C && N0.getOpcode() == ISD::SHL) {
if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
uint64_t c1 = N0C1->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
SDLoc DL(N);
if (c1 + c2 >= OpSizeInBits)
return DAG.getConstant(0, DL, VT);
return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(c1 + c2, DL, N1.getValueType()));
}
}
// fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
// For this to be valid, the second form must not preserve any of the bits
// that are shifted out by the inner shift in the first form. This means
// the outer shift size must be >= the number of bits added by the ext.
// As a corollary, we don't care what kind of ext it is.
if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND) &&
N0.getOperand(0).getOpcode() == ISD::SHL) {
SDValue N0Op0 = N0.getOperand(0);
if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
uint64_t c1 = N0Op0C1->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
EVT InnerShiftVT = N0Op0.getValueType();
uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
if (c2 >= OpSizeInBits - InnerShiftSize) {
SDLoc DL(N0);
if (c1 + c2 >= OpSizeInBits)
return DAG.getConstant(0, DL, VT);
return DAG.getNode(ISD::SHL, DL, VT,
DAG.getNode(N0.getOpcode(), DL, VT,
N0Op0->getOperand(0)),
DAG.getConstant(c1 + c2, DL, N1.getValueType()));
}
}
}
// fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
// Only fold this if the inner zext has no other uses to avoid increasing
// the total number of instructions.
if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
N0.getOperand(0).getOpcode() == ISD::SRL) {
SDValue N0Op0 = N0.getOperand(0);
if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
uint64_t c1 = N0Op0C1->getZExtValue();
if (c1 < VT.getScalarSizeInBits()) {
uint64_t c2 = N1C->getZExtValue();
if (c1 == c2) {
SDValue NewOp0 = N0.getOperand(0);
EVT CountVT = NewOp0.getOperand(1).getValueType();
SDLoc DL(N);
SDValue NewSHL = DAG.getNode(ISD::SHL, DL, NewOp0.getValueType(),
NewOp0,
DAG.getConstant(c2, DL, CountVT));
AddToWorklist(NewSHL.getNode());
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
}
}
}
}
// fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
// fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
cast<BinaryWithFlagsSDNode>(N0)->Flags.hasExact()) {
if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
uint64_t C1 = N0C1->getZExtValue();
uint64_t C2 = N1C->getZExtValue();
SDLoc DL(N);
if (C1 <= C2)
return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(C2 - C1, DL, N1.getValueType()));
return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0),
DAG.getConstant(C1 - C2, DL, N1.getValueType()));
}
}
// fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
// (and (srl x, (sub c1, c2), MASK)
// Only fold this if the inner shift has no other uses -- if it does, folding
// this will increase the total number of instructions.
if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
uint64_t c1 = N0C1->getZExtValue();
if (c1 < OpSizeInBits) {
uint64_t c2 = N1C->getZExtValue();
APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
SDValue Shift;
if (c2 > c1) {
Mask = Mask.shl(c2 - c1);
SDLoc DL(N);
Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(c2 - c1, DL, N1.getValueType()));
} else {
Mask = Mask.lshr(c1 - c2);
SDLoc DL(N);
Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
DAG.getConstant(c1 - c2, DL, N1.getValueType()));
}
SDLoc DL(N0);
return DAG.getNode(ISD::AND, DL, VT, Shift,
DAG.getConstant(Mask, DL, VT));
}
}
}
// fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) {
unsigned BitSize = VT.getScalarSizeInBits();
SDLoc DL(N);
SDValue HiBitsMask =
DAG.getConstant(APInt::getHighBitsSet(BitSize,
BitSize - N1C->getZExtValue()),
DL, VT);
return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0),
HiBitsMask);
}
// fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
// Variant of version done on multiply, except mul by a power of 2 is turned
// into a shift.
APInt Val;
if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
(isa<ConstantSDNode>(N0.getOperand(1)) ||
isConstantSplatVector(N0.getOperand(1).getNode(), Val))) {
SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1);
}
if (N1C && !N1C->isOpaque()) {
SDValue NewSHL = visitShiftByConstant(N, N1C);
if (NewSHL.getNode())
return NewSHL;
}
return SDValue();
}
SDValue DAGCombiner::visitSRA(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
// fold vector ops
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
N1C = isConstOrConstSplat(N1);
}
// fold (sra c1, c2) -> (sra c1, c2)
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C);
// fold (sra 0, x) -> 0
if (isNullConstant(N0))
return N0;
// fold (sra -1, x) -> -1
if (isAllOnesConstant(N0))
return N0;
// fold (sra x, (setge c, size(x))) -> undef
if (N1C && N1C->getZExtValue() >= OpSizeInBits)
return DAG.getUNDEF(VT);
// fold (sra x, 0) -> x
if (N1C && N1C->isNullValue())
return N0;
// fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
// sext_inreg.
if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
if (VT.isVector())
ExtVT = EVT::getVectorVT(*DAG.getContext(),
ExtVT, VT.getVectorNumElements());
if ((!LegalOperations ||
TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT)))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
N0.getOperand(0), DAG.getValueType(ExtVT));
}
// fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
if (N1C && N0.getOpcode() == ISD::SRA) {
if (ConstantSDNode *C1 = isConstOrConstSplat(N0.getOperand(1))) {
unsigned Sum = N1C->getZExtValue() + C1->getZExtValue();
if (Sum >= OpSizeInBits)
Sum = OpSizeInBits - 1;
SDLoc DL(N);
return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0),
DAG.getConstant(Sum, DL, N1.getValueType()));
}
}
// fold (sra (shl X, m), (sub result_size, n))
// -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
// result_size - n != m.
// If truncate is free for the target sext(shl) is likely to result in better
// code.
if (N0.getOpcode() == ISD::SHL && N1C) {
// Get the two constanst of the shifts, CN0 = m, CN = n.
const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1));
if (N01C) {
LLVMContext &Ctx = *DAG.getContext();
// Determine what the truncate's result bitsize and type would be.
EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());
if (VT.isVector())
TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements());
// Determine the residual right-shift amount.
signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
// If the shift is not a no-op (in which case this should be just a sign
// extend already), the truncated to type is legal, sign_extend is legal
// on that type, and the truncate to that type is both legal and free,
// perform the transform.
if ((ShiftAmt > 0) &&
TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
TLI.isTruncateFree(VT, TruncVT)) {
SDLoc DL(N);
SDValue Amt = DAG.getConstant(ShiftAmt, DL,
getShiftAmountTy(N0.getOperand(0).getValueType()));
SDValue Shift = DAG.getNode(ISD::SRL, DL, VT,
N0.getOperand(0), Amt);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT,
Shift);
return DAG.getNode(ISD::SIGN_EXTEND, DL,
N->getValueType(0), Trunc);
}
}
}
// fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
if (NewOp1.getNode())
return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
}
// fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
// if c1 is equal to the number of bits the trunc removes
if (N0.getOpcode() == ISD::TRUNCATE &&
(N0.getOperand(0).getOpcode() == ISD::SRL ||
N0.getOperand(0).getOpcode() == ISD::SRA) &&
N0.getOperand(0).hasOneUse() &&
N0.getOperand(0).getOperand(1).hasOneUse() &&
N1C) {
SDValue N0Op0 = N0.getOperand(0);
if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) {
unsigned LargeShiftVal = LargeShift->getZExtValue();
EVT LargeVT = N0Op0.getValueType();
if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) {
SDLoc DL(N);
SDValue Amt =
DAG.getConstant(LargeShiftVal + N1C->getZExtValue(), DL,
getShiftAmountTy(N0Op0.getOperand(0).getValueType()));
SDValue SRA = DAG.getNode(ISD::SRA, DL, LargeVT,
N0Op0.getOperand(0), Amt);
return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA);
}
}
}
// Simplify, based on bits shifted out of the LHS.
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// If the sign bit is known to be zero, switch this to a SRL.
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);
if (N1C && !N1C->isOpaque()) {
SDValue NewSRA = visitShiftByConstant(N, N1C);
if (NewSRA.getNode())
return NewSRA;
}
return SDValue();
}
SDValue DAGCombiner::visitSRL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
// fold vector ops
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (VT.isVector()) {
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
N1C = isConstOrConstSplat(N1);
}
// fold (srl c1, c2) -> c1 >>u c2
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C);
// fold (srl 0, x) -> 0
if (isNullConstant(N0))
return N0;
// fold (srl x, c >= size(x)) -> undef
if (N1C && N1C->getZExtValue() >= OpSizeInBits)
return DAG.getUNDEF(VT);
// fold (srl x, 0) -> x
if (N1C && N1C->isNullValue())
return N0;
// if (srl x, c) is known to be zero, return 0
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(OpSizeInBits)))
return DAG.getConstant(0, SDLoc(N), VT);
// fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
if (N1C && N0.getOpcode() == ISD::SRL) {
if (ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1))) {
uint64_t c1 = N01C->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
SDLoc DL(N);
if (c1 + c2 >= OpSizeInBits)
return DAG.getConstant(0, DL, VT);
return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
DAG.getConstant(c1 + c2, DL, N1.getValueType()));
}
}
// fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
N0.getOperand(0).getOpcode() == ISD::SRL &&
isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
uint64_t c1 =
cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
EVT InnerShiftVT = N0.getOperand(0).getValueType();
EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType();
uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
// This is only valid if the OpSizeInBits + c1 = size of inner shift.
if (c1 + OpSizeInBits == InnerShiftSize) {
SDLoc DL(N0);
if (c1 + c2 >= InnerShiftSize)
return DAG.getConstant(0, DL, VT);
return DAG.getNode(ISD::TRUNCATE, DL, VT,
DAG.getNode(ISD::SRL, DL, InnerShiftVT,
N0.getOperand(0)->getOperand(0),
DAG.getConstant(c1 + c2, DL,
ShiftCountVT)));
}
}
// fold (srl (shl x, c), c) -> (and x, cst2)
if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1) {
unsigned BitSize = N0.getScalarValueSizeInBits();
if (BitSize <= 64) {
uint64_t ShAmt = N1C->getZExtValue() + 64 - BitSize;
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0),
DAG.getConstant(~0ULL >> ShAmt, DL, VT));
}
}
// fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
// Shifting in all undef bits?
EVT SmallVT = N0.getOperand(0).getValueType();
unsigned BitSize = SmallVT.getScalarSizeInBits();
if (N1C->getZExtValue() >= BitSize)
return DAG.getUNDEF(VT);
if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
uint64_t ShiftAmt = N1C->getZExtValue();
SDLoc DL0(N0);
SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT,
N0.getOperand(0),
DAG.getConstant(ShiftAmt, DL0,
getShiftAmountTy(SmallVT)));
AddToWorklist(SmallShift.getNode());
APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt);
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift),
DAG.getConstant(Mask, DL, VT));
}
}
// fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
// bit, which is unmodified by sra.
if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) {
if (N0.getOpcode() == ISD::SRA)
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1);
}
// fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
if (N1C && N0.getOpcode() == ISD::CTLZ &&
N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
APInt KnownZero, KnownOne;
DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne);
// If any of the input bits are KnownOne, then the input couldn't be all
// zeros, thus the result of the srl will always be zero.
if (KnownOne.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT);
// If all of the bits input the to ctlz node are known to be zero, then
// the result of the ctlz is "32" and the result of the shift is one.
APInt UnknownBits = ~KnownZero;
if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT);
// Otherwise, check to see if there is exactly one bit input to the ctlz.
if ((UnknownBits & (UnknownBits - 1)) == 0) {
// Okay, we know that only that the single bit specified by UnknownBits
// could be set on input to the CTLZ node. If this bit is set, the SRL
// will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
// to an SRL/XOR pair, which is likely to simplify more.
unsigned ShAmt = UnknownBits.countTrailingZeros();
SDValue Op = N0.getOperand(0);
if (ShAmt) {
SDLoc DL(N0);
Op = DAG.getNode(ISD::SRL, DL, VT, Op,
DAG.getConstant(ShAmt, DL,
getShiftAmountTy(Op.getValueType())));
AddToWorklist(Op.getNode());
}
SDLoc DL(N);
return DAG.getNode(ISD::XOR, DL, VT,
Op, DAG.getConstant(1, DL, VT));
}
}
// fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
if (NewOp1.getNode())
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
}
// fold operands of srl based on knowledge that the low bits are not
// demanded.
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
if (N1C && !N1C->isOpaque()) {
SDValue NewSRL = visitShiftByConstant(N, N1C);
if (NewSRL.getNode())
return NewSRL;
}
// Attempt to convert a srl of a load into a narrower zero-extending load.
SDValue NarrowLoad = ReduceLoadWidth(N);
if (NarrowLoad.getNode())
return NarrowLoad;
// Here is a common situation. We want to optimize:
//
// %a = ...
// %b = and i32 %a, 2
// %c = srl i32 %b, 1
// brcond i32 %c ...
//
// into
//
// %a = ...
// %b = and %a, 2
// %c = setcc eq %b, 0
// brcond %c ...
//
// However when after the source operand of SRL is optimized into AND, the SRL
// itself may not be optimized further. Look for it and add the BRCOND into
// the worklist.
if (N->hasOneUse()) {
SDNode *Use = *N->use_begin();
if (Use->getOpcode() == ISD::BRCOND)
AddToWorklist(Use);
else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
// Also look pass the truncate.
Use = *Use->use_begin();
if (Use->getOpcode() == ISD::BRCOND)
AddToWorklist(Use);
}
}
return SDValue();
}
SDValue DAGCombiner::visitBSWAP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (bswap c1) -> c2
if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
// fold (bswap (bswap x)) -> x
if (N0.getOpcode() == ISD::BSWAP)
return N0->getOperand(0);
return SDValue();
}
SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ctlz c1) -> c2
if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ctlz_zero_undef c1) -> c2
if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitCTTZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (cttz c1) -> c2
if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (cttz_zero_undef c1) -> c2
if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitCTPOP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ctpop c1) -> c2
if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0);
return SDValue();
}
/// \brief Generate Min/Max node
static SDValue combineMinNumMaxNum(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS,
SDValue True, SDValue False,
ISD::CondCode CC, const TargetLowering &TLI,
SelectionDAG &DAG) {
if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
return SDValue();
switch (CC) {
case ISD::SETOLT:
case ISD::SETOLE:
case ISD::SETLT:
case ISD::SETLE:
case ISD::SETULT:
case ISD::SETULE: {
unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM;
if (TLI.isOperationLegal(Opcode, VT))
return DAG.getNode(Opcode, DL, VT, LHS, RHS);
return SDValue();
}
case ISD::SETOGT:
case ISD::SETOGE:
case ISD::SETGT:
case ISD::SETGE:
case ISD::SETUGT:
case ISD::SETUGE: {
unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM;
if (TLI.isOperationLegal(Opcode, VT))
return DAG.getNode(Opcode, DL, VT, LHS, RHS);
return SDValue();
}
default:
return SDValue();
}
}
SDValue DAGCombiner::visitSELECT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
EVT VT = N->getValueType(0);
EVT VT0 = N0.getValueType();
// fold (select C, X, X) -> X
if (N1 == N2)
return N1;
if (const ConstantSDNode *N0C = dyn_cast<const ConstantSDNode>(N0)) {
// fold (select true, X, Y) -> X
// fold (select false, X, Y) -> Y
return !N0C->isNullValue() ? N1 : N2;
}
// fold (select C, 1, X) -> (or C, X)
if (VT == MVT::i1 && isOneConstant(N1))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
// fold (select C, 0, 1) -> (xor C, 1)
// We can't do this reliably if integer based booleans have different contents
// to floating point based booleans. This is because we can't tell whether we
// have an integer-based boolean or a floating-point-based boolean unless we
// can find the SETCC that produced it and inspect its operands. This is
// fairly easy if C is the SETCC node, but it can potentially be
// undiscoverable (or not reasonably discoverable). For example, it could be
// in another basic block or it could require searching a complicated
// expression.
if (VT.isInteger() &&
(VT0 == MVT::i1 || (VT0.isInteger() &&
TLI.getBooleanContents(false, false) ==
TLI.getBooleanContents(false, true) &&
TLI.getBooleanContents(false, false) ==
TargetLowering::ZeroOrOneBooleanContent)) &&
isNullConstant(N1) && isOneConstant(N2)) {
SDValue XORNode;
if (VT == VT0) {
SDLoc DL(N);
return DAG.getNode(ISD::XOR, DL, VT0,
N0, DAG.getConstant(1, DL, VT0));
}
SDLoc DL0(N0);
XORNode = DAG.getNode(ISD::XOR, DL0, VT0,
N0, DAG.getConstant(1, DL0, VT0));
AddToWorklist(XORNode.getNode());
if (VT.bitsGT(VT0))
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, XORNode);
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, XORNode);
}
// fold (select C, 0, X) -> (and (not C), X)
if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2);
}
// fold (select C, X, 1) -> (or (not C), X)
if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1);
}
// fold (select C, X, 0) -> (and C, X)
if (VT == MVT::i1 && isNullConstant(N2))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
// fold (select X, X, Y) -> (or X, Y)
// fold (select X, 1, Y) -> (or X, Y)
if (VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
// fold (select X, Y, X) -> (and X, Y)
// fold (select X, Y, 0) -> (and X, Y)
if (VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
// If we can fold this based on the true/false value, do so.
if (SimplifySelectOps(N, N1, N2))
return SDValue(N, 0); // Don't revisit N.
// fold selects based on a setcc into other things, such as min/max/abs
if (N0.getOpcode() == ISD::SETCC) {
// select x, y (fcmp lt x, y) -> fminnum x, y
// select x, y (fcmp gt x, y) -> fmaxnum x, y
//
// This is OK if we don't care about what happens if either operand is a
// NaN.
//
// FIXME: Instead of testing for UnsafeFPMath, this should be checking for
// no signed zeros as well as no nans.
const TargetOptions &Options = DAG.getTarget().Options;
if (Options.UnsafeFPMath &&
VT.isFloatingPoint() && N0.hasOneUse() &&
DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) {
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
SDValue FMinMax =
combineMinNumMaxNum(SDLoc(N), VT, N0.getOperand(0), N0.getOperand(1),
N1, N2, CC, TLI, DAG);
if (FMinMax)
return FMinMax;
}
if ((!LegalOperations &&
TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) ||
TLI.isOperationLegal(ISD::SELECT_CC, VT))
return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT,
N0.getOperand(0), N0.getOperand(1),
N1, N2, N0.getOperand(2));
return SimplifySelect(SDLoc(N), N0, N1, N2);
}
if (VT0 == MVT::i1) {
if (TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
// select (and Cond0, Cond1), X, Y
// -> select Cond0, (select Cond1, X, Y), Y
if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
SDValue Cond0 = N0->getOperand(0);
SDValue Cond1 = N0->getOperand(1);
SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
N1.getValueType(), Cond1, N1, N2);
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0,
InnerSelect, N2);
}
// select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
SDValue Cond0 = N0->getOperand(0);
SDValue Cond1 = N0->getOperand(1);
SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
N1.getValueType(), Cond1, N1, N2);
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, N1,
InnerSelect);
}
}
// select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y
if (N1->getOpcode() == ISD::SELECT) {
SDValue N1_0 = N1->getOperand(0);
SDValue N1_1 = N1->getOperand(1);
SDValue N1_2 = N1->getOperand(2);
if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
// Create the actual and node if we can generate good code for it.
if (!TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
SDValue And = DAG.getNode(ISD::AND, SDLoc(N), N0.getValueType(),
N0, N1_0);
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), And,
N1_1, N2);
}
// Otherwise see if we can optimize the "and" to a better pattern.
if (SDValue Combined = visitANDLike(N0, N1_0, N))
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined,
N1_1, N2);
}
}
// select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y
if (N2->getOpcode() == ISD::SELECT) {
SDValue N2_0 = N2->getOperand(0);
SDValue N2_1 = N2->getOperand(1);
SDValue N2_2 = N2->getOperand(2);
if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
// Create the actual or node if we can generate good code for it.
if (!TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
SDValue Or = DAG.getNode(ISD::OR, SDLoc(N), N0.getValueType(),
N0, N2_0);
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Or,
N1, N2_2);
}
// Otherwise see if we can optimize to a better pattern.
if (SDValue Combined = visitORLike(N0, N2_0, N))
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined,
N1, N2_2);
}
}
}
return SDValue();
}
static
std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// Split the inputs.
SDValue Lo, Hi, LL, LH, RL, RH;
std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
return std::make_pair(Lo, Hi);
}
// This function assumes all the vselect's arguments are CONCAT_VECTOR
// nodes and that the condition is a BV of ConstantSDNodes (or undefs).
static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
SDLoc dl(N);
SDValue Cond = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
EVT VT = N->getValueType(0);
int NumElems = VT.getVectorNumElements();
assert(LHS.getOpcode() == ISD::CONCAT_VECTORS &&
RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR);
// CONCAT_VECTOR can take an arbitrary number of arguments. We only care about
// binary ones here.
if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2)
return SDValue();
// We're sure we have an even number of elements due to the
// concat_vectors we have as arguments to vselect.
// Skip BV elements until we find one that's not an UNDEF
// After we find an UNDEF element, keep looping until we get to half the
// length of the BV and see if all the non-undef nodes are the same.
ConstantSDNode *BottomHalf = nullptr;
for (int i = 0; i < NumElems / 2; ++i) {
if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF)
continue;
if (BottomHalf == nullptr)
BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i));
else if (Cond->getOperand(i).getNode() != BottomHalf)
return SDValue();
}
// Do the same for the second half of the BuildVector
ConstantSDNode *TopHalf = nullptr;
for (int i = NumElems / 2; i < NumElems; ++i) {
if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF)
continue;
if (TopHalf == nullptr)
TopHalf = cast<ConstantSDNode>(Cond.getOperand(i));
else if (Cond->getOperand(i).getNode() != TopHalf)
return SDValue();
}
assert(TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function.");
return DAG.getNode(
ISD::CONCAT_VECTORS, dl, VT,
BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0),
TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
}
SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
if (Level >= AfterLegalizeTypes)
return SDValue();
MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N);
SDValue Mask = MSC->getMask();
SDValue Data = MSC->getValue();
SDLoc DL(N);
// If the MSCATTER data type requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() != ISD::SETCC)
return SDValue();
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), Data.getValueType()) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi, Lo, Hi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MSC->getValueType(0));
SDValue Chain = MSC->getChain();
EVT MemoryVT = MSC->getMemoryVT();
unsigned Alignment = MSC->getOriginalAlignment();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue DataLo, DataHi;
std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
SDValue BasePtr = MSC->getBasePtr();
SDValue IndexLo, IndexHi;
std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MSC->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, MSC->getAAInfo(), MSC->getRanges());
SDValue OpsLo[] = { Chain, DataLo, MaskLo, BasePtr, IndexLo };
Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(),
DL, OpsLo, MMO);
SDValue OpsHi[] = {Chain, DataHi, MaskHi, BasePtr, IndexHi};
Hi = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataHi.getValueType(),
DL, OpsHi, MMO);
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
SDValue DAGCombiner::visitMSTORE(SDNode *N) {
if (Level >= AfterLegalizeTypes)
return SDValue();
MaskedStoreSDNode *MST = dyn_cast<MaskedStoreSDNode>(N);
SDValue Mask = MST->getMask();
SDValue Data = MST->getValue();
SDLoc DL(N);
// If the MSTORE data type requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() == ISD::SETCC) {
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), Data.getValueType()) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi, Lo, Hi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MST->getValueType(0));
SDValue Chain = MST->getChain();
SDValue Ptr = MST->getBasePtr();
EVT MemoryVT = MST->getMemoryVT();
unsigned Alignment = MST->getOriginalAlignment();
// if Alignment is equal to the vector size,
// take the half of it for the second part
unsigned SecondHalfAlignment =
(Alignment == Data->getValueType(0).getSizeInBits()/8) ?
Alignment/2 : Alignment;
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue DataLo, DataHi;
std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MST->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, MST->getAAInfo(), MST->getRanges());
Lo = DAG.getMaskedStore(Chain, DL, DataLo, Ptr, MaskLo, LoMemVT, MMO,
MST->isTruncatingStore());
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, DL, Ptr.getValueType()));
MMO = DAG.getMachineFunction().
getMachineMemOperand(MST->getPointerInfo(),
MachineMemOperand::MOStore, HiMemVT.getStoreSize(),
SecondHalfAlignment, MST->getAAInfo(),
MST->getRanges());
Hi = DAG.getMaskedStore(Chain, DL, DataHi, Ptr, MaskHi, HiMemVT, MMO,
MST->isTruncatingStore());
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
return SDValue();
}
SDValue DAGCombiner::visitMGATHER(SDNode *N) {
if (Level >= AfterLegalizeTypes)
return SDValue();
MaskedGatherSDNode *MGT = dyn_cast<MaskedGatherSDNode>(N);
SDValue Mask = MGT->getMask();
SDLoc DL(N);
// If the MGATHER result requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() != ISD::SETCC)
return SDValue();
EVT VT = N->getValueType(0);
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), VT) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi, Lo, Hi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
SDValue Src0 = MGT->getValue();
SDValue Src0Lo, Src0Hi;
std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
SDValue Chain = MGT->getChain();
EVT MemoryVT = MGT->getMemoryVT();
unsigned Alignment = MGT->getOriginalAlignment();
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue BasePtr = MGT->getBasePtr();
SDValue Index = MGT->getIndex();
SDValue IndexLo, IndexHi;
std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(), MGT->getRanges());
SDValue OpsLo[] = { Chain, Src0Lo, MaskLo, BasePtr, IndexLo };
Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, DL, OpsLo,
MMO);
SDValue OpsHi[] = {Chain, Src0Hi, MaskHi, BasePtr, IndexHi};
Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, DL, OpsHi,
MMO);
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
// Build a factor node to remember that this load is independent of the
// other one.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
DAG.ReplaceAllUsesOfValueWith(SDValue(MGT, 1), Chain);
SDValue GatherRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
SDValue RetOps[] = { GatherRes, Chain };
return DAG.getMergeValues(RetOps, DL);
}
SDValue DAGCombiner::visitMLOAD(SDNode *N) {
if (Level >= AfterLegalizeTypes)
return SDValue();
MaskedLoadSDNode *MLD = dyn_cast<MaskedLoadSDNode>(N);
SDValue Mask = MLD->getMask();
SDLoc DL(N);
// If the MLOAD result requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (Mask.getOpcode() == ISD::SETCC) {
EVT VT = N->getValueType(0);
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), VT) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue MaskLo, MaskHi, Lo, Hi;
std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
SDValue Src0 = MLD->getSrc0();
SDValue Src0Lo, Src0Hi;
std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0));
SDValue Chain = MLD->getChain();
SDValue Ptr = MLD->getBasePtr();
EVT MemoryVT = MLD->getMemoryVT();
unsigned Alignment = MLD->getOriginalAlignment();
// if Alignment is equal to the vector size,
// take the half of it for the second part
unsigned SecondHalfAlignment =
(Alignment == MLD->getValueType(0).getSizeInBits()/8) ?
Alignment/2 : Alignment;
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MLD->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MLD->getAAInfo(), MLD->getRanges());
Lo = DAG.getMaskedLoad(LoVT, DL, Chain, Ptr, MaskLo, Src0Lo, LoMemVT, MMO,
ISD::NON_EXTLOAD);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, DL, Ptr.getValueType()));
MMO = DAG.getMachineFunction().
getMachineMemOperand(MLD->getPointerInfo(),
MachineMemOperand::MOLoad, HiMemVT.getStoreSize(),
SecondHalfAlignment, MLD->getAAInfo(), MLD->getRanges());
Hi = DAG.getMaskedLoad(HiVT, DL, Chain, Ptr, MaskHi, Src0Hi, HiMemVT, MMO,
ISD::NON_EXTLOAD);
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
// Build a factor node to remember that this load is independent of the
// other one.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), Chain);
SDValue LoadRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
SDValue RetOps[] = { LoadRes, Chain };
return DAG.getMergeValues(RetOps, DL);
}
return SDValue();
}
SDValue DAGCombiner::visitVSELECT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
SDLoc DL(N);
// Canonicalize integer abs.
// vselect (setg[te] X, 0), X, -X ->
// vselect (setgt X, -1), X, -X ->
// vselect (setl[te] X, 0), -X, X ->
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
if (N0.getOpcode() == ISD::SETCC) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
bool isAbs = false;
bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
(ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) &&
N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1))
isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode());
else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) &&
N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1))
isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
if (isAbs) {
EVT VT = LHS.getValueType();
SDValue Shift = DAG.getNode(
ISD::SRA, DL, VT, LHS,
DAG.getConstant(VT.getScalarType().getSizeInBits() - 1, DL, VT));
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
AddToWorklist(Shift.getNode());
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::XOR, DL, VT, Add, Shift);
}
}
if (SimplifySelectOps(N, N1, N2))
return SDValue(N, 0); // Don't revisit N.
// If the VSELECT result requires splitting and the mask is provided by a
// SETCC, then split both nodes and its operands before legalization. This
// prevents the type legalizer from unrolling SETCC into scalar comparisons
// and enables future optimizations (e.g. min/max pattern matching on X86).
if (N0.getOpcode() == ISD::SETCC) {
EVT VT = N->getValueType(0);
// Check if any splitting is required.
if (TLI.getTypeAction(*DAG.getContext(), VT) !=
TargetLowering::TypeSplitVector)
return SDValue();
SDValue Lo, Hi, CCLo, CCHi, LL, LH, RL, RH;
std::tie(CCLo, CCHi) = SplitVSETCC(N0.getNode(), DAG);
std::tie(LL, LH) = DAG.SplitVectorOperand(N, 1);
std::tie(RL, RH) = DAG.SplitVectorOperand(N, 2);
Lo = DAG.getNode(N->getOpcode(), DL, LL.getValueType(), CCLo, LL, RL);
Hi = DAG.getNode(N->getOpcode(), DL, LH.getValueType(), CCHi, LH, RH);
// Add the new VSELECT nodes to the work list in case they need to be split
// again.
AddToWorklist(Lo.getNode());
AddToWorklist(Hi.getNode());
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
// Fold (vselect (build_vector all_ones), N1, N2) -> N1
if (ISD::isBuildVectorAllOnes(N0.getNode()))
return N1;
// Fold (vselect (build_vector all_zeros), N1, N2) -> N2
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return N2;
// The ConvertSelectToConcatVector function is assuming both the above
// checks for (vselect (build_vector all{ones,zeros) ...) have been made
// and addressed.
if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
N2.getOpcode() == ISD::CONCAT_VECTORS &&
ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
SDValue CV = ConvertSelectToConcatVector(N, DAG);
if (CV.getNode())
return CV;
}
return SDValue();
}
SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
SDValue N3 = N->getOperand(3);
SDValue N4 = N->getOperand(4);
ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
// fold select_cc lhs, rhs, x, x, cc -> x
if (N2 == N3)
return N2;
// Determine if the condition we're dealing with is constant
SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
N0, N1, CC, SDLoc(N), false);
if (SCC.getNode()) {
AddToWorklist(SCC.getNode());
if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
if (!SCCC->isNullValue())
return N2; // cond always true -> true val
else
return N3; // cond always false -> false val
} else if (SCC->getOpcode() == ISD::UNDEF) {
// When the condition is UNDEF, just return the first operand. This is
// coherent the DAG creation, no setcc node is created in this case
return N2;
} else if (SCC.getOpcode() == ISD::SETCC) {
// Fold to a simpler select_cc
return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(),
SCC.getOperand(0), SCC.getOperand(1), N2, N3,
SCC.getOperand(2));
}
}
// If we can fold this based on the true/false value, do so.
if (SimplifySelectOps(N, N2, N3))
return SDValue(N, 0); // Don't revisit N.
// fold select_cc into other things, such as min/max/abs
return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC);
}
SDValue DAGCombiner::visitSETCC(SDNode *N) {
return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1),
cast<CondCodeSDNode>(N->getOperand(2))->get(),
SDLoc(N));
}
/// Try to fold a sext/zext/aext dag node into a ConstantSDNode or
/// a build_vector of constants.
/// This function is called by the DAGCombiner when visiting sext/zext/aext
/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
/// Vector extends are not folded if operations are legal; this is to
/// avoid introducing illegal build_vector dag nodes.
static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
SelectionDAG &DAG, bool LegalTypes,
bool LegalOperations) {
unsigned Opcode = N->getOpcode();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||
Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
&& "Expected EXTEND dag node in input!");
// fold (sext c1) -> c1
// fold (zext c1) -> c1
// fold (aext c1) -> c1
if (isa<ConstantSDNode>(N0))
return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode();
// fold (sext (build_vector AllConstants) -> (build_vector AllConstants)
// fold (zext (build_vector AllConstants) -> (build_vector AllConstants)
// fold (aext (build_vector AllConstants) -> (build_vector AllConstants)
EVT SVT = VT.getScalarType();
if (!(VT.isVector() &&
(!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) &&
ISD::isBuildVectorOfConstantSDNodes(N0.getNode())))
return nullptr;
// We can fold this node into a build_vector.
unsigned VTBits = SVT.getSizeInBits();
unsigned EVTBits = N0->getValueType(0).getScalarType().getSizeInBits();
SmallVector<SDValue, 8> Elts;
unsigned NumElts = VT.getVectorNumElements();
SDLoc DL(N);
for (unsigned i=0; i != NumElts; ++i) {
SDValue Op = N0->getOperand(i);
if (Op->getOpcode() == ISD::UNDEF) {
Elts.push_back(DAG.getUNDEF(SVT));
continue;
}
SDLoc DL(Op);
// Get the constant value and if needed trunc it to the size of the type.
// Nodes like build_vector might have constants wider than the scalar type.
APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits);
if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT));
else
Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Elts).getNode();
}
// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
// transformation. Returns true if extension are possible and the above
// mentioned transformation is profitable.
static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
unsigned ExtOpc,
SmallVectorImpl<SDNode *> &ExtendNodes,
const TargetLowering &TLI) {
bool HasCopyToRegUses = false;
bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
UE = N0.getNode()->use_end();
UI != UE; ++UI) {
SDNode *User = *UI;
if (User == N)
continue;
if (UI.getUse().getResNo() != N0.getResNo())
continue;
// FIXME: Only extend SETCC N, N and SETCC N, c for now.
if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
// Sign bits will be lost after a zext.
return false;
bool Add = false;
for (unsigned i = 0; i != 2; ++i) {
SDValue UseOp = User->getOperand(i);
if (UseOp == N0)
continue;
if (!isa<ConstantSDNode>(UseOp))
return false;
Add = true;
}
if (Add)
ExtendNodes.push_back(User);
continue;
}
// If truncates aren't free and there are users we can't
// extend, it isn't worthwhile.
if (!isTruncFree)
return false;
// Remember if this value is live-out.
if (User->getOpcode() == ISD::CopyToReg)
HasCopyToRegUses = true;
}
if (HasCopyToRegUses) {
bool BothLiveOut = false;
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
UI != UE; ++UI) {
SDUse &Use = UI.getUse();
if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
BothLiveOut = true;
break;
}
}
if (BothLiveOut)
// Both unextended and extended values are live out. There had better be
// a good reason for the transformation.
return ExtendNodes.size();
}
return true;
}
void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue Trunc, SDValue ExtLoad, SDLoc DL,
ISD::NodeType ExtType) {
// Extend SetCC uses if necessary.
for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
SDNode *SetCC = SetCCs[i];
SmallVector<SDValue, 4> Ops;
for (unsigned j = 0; j != 2; ++j) {
SDValue SOp = SetCC->getOperand(j);
if (SOp == Trunc)
Ops.push_back(ExtLoad);
else
Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp));
}
Ops.push_back(SetCC->getOperand(2));
CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
}
}
// FIXME: Bring more similar combines here, common to sext/zext (maybe aext?).
SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT DstVT = N->getValueType(0);
EVT SrcVT = N0.getValueType();
assert((N->getOpcode() == ISD::SIGN_EXTEND ||
N->getOpcode() == ISD::ZERO_EXTEND) &&
"Unexpected node type (not an extend)!");
// fold (sext (load x)) to multiple smaller sextloads; same for zext.
// For example, on a target with legal v4i32, but illegal v8i32, turn:
// (v8i32 (sext (v8i16 (load x))))
// into:
// (v8i32 (concat_vectors (v4i32 (sextload x)),
// (v4i32 (sextload (x + 16)))))
// Where uses of the original load, i.e.:
// (v8i16 (load x))
// are replaced with:
// (v8i16 (truncate
// (v8i32 (concat_vectors (v4i32 (sextload x)),
// (v4i32 (sextload (x + 16)))))))
//
// This combine is only applicable to illegal, but splittable, vectors.
// All legal types, and illegal non-vector types, are handled elsewhere.
// This combine is controlled by TargetLowering::isVectorLoadExtDesirable.
//
if (N0->getOpcode() != ISD::LOAD)
return SDValue();
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) ||
!N0.hasOneUse() || LN0->isVolatile() || !DstVT.isVector() ||
!DstVT.isPow2VectorType() || !TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
return SDValue();
SmallVector<SDNode *, 4> SetCCs;
if (!ExtendUsesToFormExtLoad(N, N0, N->getOpcode(), SetCCs, TLI))
return SDValue();
ISD::LoadExtType ExtType =
N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
// Try to split the vector types to get down to legal types.
EVT SplitSrcVT = SrcVT;
EVT SplitDstVT = DstVT;
while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) &&
SplitSrcVT.getVectorNumElements() > 1) {
SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first;
SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first;
}
if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT))
return SDValue();
SDLoc DL(N);
const unsigned NumSplits =
DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements();
const unsigned Stride = SplitSrcVT.getStoreSize();
SmallVector<SDValue, 4> Loads;
SmallVector<SDValue, 4> Chains;
SDValue BasePtr = LN0->getBasePtr();
for (unsigned Idx = 0; Idx < NumSplits; Idx++) {
const unsigned Offset = Idx * Stride;
const unsigned Align = MinAlign(LN0->getAlignment(), Offset);
SDValue SplitLoad = DAG.getExtLoad(
ExtType, DL, SplitDstVT, LN0->getChain(), BasePtr,
LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT,
LN0->isVolatile(), LN0->isNonTemporal(), LN0->isInvariant(),
Align, LN0->getAAInfo());
BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
DAG.getConstant(Stride, DL, BasePtr.getValueType()));
Loads.push_back(SplitLoad.getValue(0));
Chains.push_back(SplitLoad.getValue(1));
}
SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads);
CombineTo(N, NewValue);
// Replace uses of the original load (before extension)
// with a truncate of the concatenated sextloaded vectors.
SDValue Trunc =
DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue);
CombineTo(N0.getNode(), Trunc, NewChain);
ExtendSetCCUses(SetCCs, Trunc, NewValue, DL,
(ISD::NodeType)N->getOpcode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
LegalOperations))
return SDValue(Res, 0);
// fold (sext (sext x)) -> (sext x)
// fold (sext (aext x)) -> (sext x)
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT,
N0.getOperand(0));
if (N0.getOpcode() == ISD::TRUNCATE) {
// fold (sext (truncate (load x))) -> (sext (smaller load x))
// fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
if (NarrowLoad.getNode()) {
SDNode* oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// See if the value being truncated is already sign extended. If so, just
// eliminate the trunc/sext pair.
SDValue Op = N0.getOperand(0);
unsigned OpBits = Op.getValueType().getScalarType().getSizeInBits();
unsigned MidBits = N0.getValueType().getScalarType().getSizeInBits();
unsigned DestBits = VT.getScalarType().getSizeInBits();
unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
if (OpBits == DestBits) {
// Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
// bits, it is already ready.
if (NumSignBits > DestBits-MidBits)
return Op;
} else if (OpBits < DestBits) {
// Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
// bits, just sext from i32.
if (NumSignBits > OpBits-MidBits)
return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, Op);
} else {
// Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
// bits, just truncate to i32.
if (NumSignBits > OpBits-MidBits)
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
}
// fold (sext (truncate x)) -> (sextinreg x).
if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
N0.getValueType())) {
if (OpBits < DestBits)
Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op);
else if (OpBits > DestBits)
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op);
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, Op,
DAG.getValueType(N0.getValueType()));
}
}
// fold (sext (load x)) -> (sext (truncate (sextload x)))
// Only generate vector extloads when 1) they're legal, and 2) they are
// deemed desirable by the target.
if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
((!LegalOperations && !VT.isVector() &&
!cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()))) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
if (VT.isVector())
DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
CombineTo(N, ExtLoad);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad);
CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
ISD::SIGN_EXTEND);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (sext (load x)) to multiple smaller sextloads.
// Only on illegal but splittable vectors.
if (SDValue ExtLoad = CombineExtLoad(N))
return ExtLoad;
// fold (sext (sextload x)) -> (sext (truncate (sextload x)))
// fold (sext ( extload x)) -> (sext (truncate (sextload x)))
if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT)) {
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), MemVT,
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad),
ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (sext (and/or/xor (load x), cst)) ->
// (and/or/xor (sextload x), (sext cst))
if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
N0.getOpcode() == ISD::XOR) &&
isa<LoadSDNode>(N0.getOperand(0)) &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()) &&
(!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0));
if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND,
SetCCs, TLI);
if (DoXform) {
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN0), VT,
LN0->getChain(), LN0->getBasePtr(),
LN0->getMemoryVT(),
LN0->getMemOperand());
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.sext(VT.getSizeInBits());
SDLoc DL(N);
SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
ExtLoad, DAG.getConstant(Mask, DL, VT));
SDValue Trunc = DAG.getNode(ISD::TRUNCATE,
SDLoc(N0.getOperand(0)),
N0.getOperand(0).getValueType(), ExtLoad);
CombineTo(N, And);
CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1));
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL,
ISD::SIGN_EXTEND);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
}
if (N0.getOpcode() == ISD::SETCC) {
EVT N0VT = N0.getOperand(0).getValueType();
// sext(setcc) -> sext_in_reg(vsetcc) for vectors.
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations &&
TLI.getBooleanContents(N0VT) ==
TargetLowering::ZeroOrNegativeOneBooleanContent) {
// On some architectures (such as SSE/NEON/etc) the SETCC result type is
// of the same size as the compared operands. Only optimize sext(setcc())
// if this is the case.
EVT SVT = getSetCCResultType(N0VT);
// We know that the # elements of the results is the same as the
// # elements of the compare (and the # elements of the compare result
// for that matter). Check to see that they are the same size. If so,
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
if (VT.getSizeInBits() == SVT.getSizeInBits())
return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/sign extend
EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger();
if (SVT == MatchingVectorType) {
SDValue VsetCC = DAG.getSetCC(SDLoc(N), MatchingVectorType,
N0.getOperand(0), N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
return DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT);
}
}
// sext(setcc x, y, cc) -> (select (setcc x, y, cc), -1, 0)
unsigned ElementWidth = VT.getScalarType().getSizeInBits();
SDLoc DL(N);
SDValue NegOne =
DAG.getConstant(APInt::getAllOnesValue(ElementWidth), DL, VT);
SDValue SCC =
SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1),
NegOne, DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode()) return SCC;
if (!VT.isVector()) {
EVT SetCCVT = getSetCCResultType(N0.getOperand(0).getValueType());
if (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, SetCCVT)) {
SDLoc DL(N);
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
SDValue SetCC = DAG.getSetCC(DL, SetCCVT,
N0.getOperand(0), N0.getOperand(1), CC);
return DAG.getSelect(DL, VT, SetCC,
NegOne, DAG.getConstant(0, DL, VT));
}
}
}
// fold (sext x) -> (zext x) if the sign bit is known zero.
if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0);
return SDValue();
}
// isTruncateOf - If N is a truncate of some other value, return true, record
// the value being truncated in Op and which of Op's bits are zero in KnownZero.
// This function computes KnownZero to avoid a duplicated call to
// computeKnownBits in the caller.
static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
APInt &KnownZero) {
APInt KnownOne;
if (N->getOpcode() == ISD::TRUNCATE) {
Op = N->getOperand(0);
DAG.computeKnownBits(Op, KnownZero, KnownOne);
return true;
}
if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 ||
cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE)
return false;
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
assert(Op0.getValueType() == Op1.getValueType());
if (isNullConstant(Op0))
Op = Op1;
else if (isNullConstant(Op1))
Op = Op0;
else
return false;
DAG.computeKnownBits(Op, KnownZero, KnownOne);
if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue())
return false;
return true;
}
SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
LegalOperations))
return SDValue(Res, 0);
// fold (zext (zext x)) -> (zext x)
// fold (zext (aext x)) -> (zext x)
if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT,
N0.getOperand(0));
// fold (zext (truncate x)) -> (zext x) or
// (zext (truncate x)) -> (truncate x)
// This is valid when the truncated bits of x are already zero.
// FIXME: We should extend this to work for vectors too.
SDValue Op;
APInt KnownZero;
if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) {
APInt TruncatedBits =
(Op.getValueSizeInBits() == N0.getValueSizeInBits()) ?
APInt(Op.getValueSizeInBits(), 0) :
APInt::getBitsSet(Op.getValueSizeInBits(),
N0.getValueSizeInBits(),
std::min(Op.getValueSizeInBits(),
VT.getSizeInBits()));
if (TruncatedBits == (KnownZero & TruncatedBits)) {
if (VT.bitsGT(Op.getValueType()))
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Op);
if (VT.bitsLT(Op.getValueType()))
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
return Op;
}
}
// fold (zext (truncate (load x))) -> (zext (smaller load x))
// fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
if (N0.getOpcode() == ISD::TRUNCATE) {
SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
if (NarrowLoad.getNode()) {
SDNode* oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (zext (truncate x)) -> (and x, mask)
if (N0.getOpcode() == ISD::TRUNCATE &&
(!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
// fold (zext (truncate (load x))) -> (zext (smaller load x))
// fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
if (NarrowLoad.getNode()) {
SDNode* oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
SDValue Op = N0.getOperand(0);
if (Op.getValueType().bitsLT(VT)) {
Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op);
AddToWorklist(Op.getNode());
} else if (Op.getValueType().bitsGT(VT)) {
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
AddToWorklist(Op.getNode());
}
return DAG.getZeroExtendInReg(Op, SDLoc(N),
N0.getValueType().getScalarType());
}
// Fold (zext (and (trunc x), cst)) -> (and x, cst),
// if either of the casts is not free.
if (N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
(!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
N0.getValueType()) ||
!TLI.isZExtFree(N0.getValueType(), VT))) {
SDValue X = N0.getOperand(0).getOperand(0);
if (X.getValueType().bitsLT(VT)) {
X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(X), VT, X);
} else if (X.getValueType().bitsGT(VT)) {
X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
}
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.zext(VT.getSizeInBits());
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT,
X, DAG.getConstant(Mask, DL, VT));
}
// fold (zext (load x)) -> (zext (truncate (zextload x)))
// Only generate vector extloads when 1) they're legal, and 2) they are
// deemed desirable by the target.
if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
((!LegalOperations && !VT.isVector() &&
!cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()))) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
if (VT.isVector())
DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
CombineTo(N, ExtLoad);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad);
CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
ISD::ZERO_EXTEND);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (zext (load x)) to multiple smaller zextloads.
// Only on illegal but splittable vectors.
if (SDValue ExtLoad = CombineExtLoad(N))
return ExtLoad;
// fold (zext (and/or/xor (load x), cst)) ->
// (and/or/xor (zextload x), (zext cst))
if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
N0.getOpcode() == ISD::XOR) &&
isa<LoadSDNode>(N0.getOperand(0)) &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()) &&
(!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0));
if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::ZERO_EXTEND,
SetCCs, TLI);
if (DoXform) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT,
LN0->getChain(), LN0->getBasePtr(),
LN0->getMemoryVT(),
LN0->getMemOperand());
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.zext(VT.getSizeInBits());
SDLoc DL(N);
SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
ExtLoad, DAG.getConstant(Mask, DL, VT));
SDValue Trunc = DAG.getNode(ISD::TRUNCATE,
SDLoc(N0.getOperand(0)),
N0.getOperand(0).getValueType(), ExtLoad);
CombineTo(N, And);
CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1));
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL,
ISD::ZERO_EXTEND);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
}
// fold (zext (zextload x)) -> (zext (truncate (zextload x)))
// fold (zext ( extload x)) -> (zext (truncate (zextload x)))
if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT)) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), MemVT,
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(),
ExtLoad),
ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
if (N0.getOpcode() == ISD::SETCC) {
if (!LegalOperations && VT.isVector() &&
N0.getValueType().getVectorElementType() == MVT::i1) {
EVT N0VT = N0.getOperand(0).getValueType();
if (getSetCCResultType(N0VT) == N0.getValueType())
return SDValue();
// zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
// Only do this before legalize for now.
EVT EltVT = VT.getVectorElementType();
SDLoc DL(N);
SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(),
DAG.getConstant(1, DL, EltVT));
if (VT.getSizeInBits() == N0VT.getSizeInBits())
// We know that the # elements of the results is the same as the
// # elements of the compare (and the # elements of the compare result
// for that matter). Check to see that they are the same size. If so,
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
return DAG.getNode(ISD::AND, DL, VT,
DAG.getSetCC(DL, VT, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get()),
DAG.getNode(ISD::BUILD_VECTOR, DL, VT,
OneOps));
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/sign extend
EVT MatchingElementType =
EVT::getIntegerVT(*DAG.getContext(),
N0VT.getScalarType().getSizeInBits());
EVT MatchingVectorType =
EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
N0VT.getVectorNumElements());
SDValue VsetCC =
DAG.getSetCC(DL, MatchingVectorType, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
return DAG.getNode(ISD::AND, DL, VT,
DAG.getSExtOrTrunc(VsetCC, DL, VT),
DAG.getNode(ISD::BUILD_VECTOR, DL, VT, OneOps));
}
// zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDLoc DL(N);
SDValue SCC =
SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1),
DAG.getConstant(1, DL, VT), DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode()) return SCC;
}
// (zext (shl (zext x), cst)) -> (shl (zext x), cst)
if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) &&
isa<ConstantSDNode>(N0.getOperand(1)) &&
N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
N0.hasOneUse()) {
SDValue ShAmt = N0.getOperand(1);
unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue();
if (N0.getOpcode() == ISD::SHL) {
SDValue InnerZExt = N0.getOperand(0);
// If the original shl may be shifting out bits, do not perform this
// transformation.
unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() -
InnerZExt.getOperand(0).getValueType().getSizeInBits();
if (ShAmtVal > KnownZeroBits)
return SDValue();
}
SDLoc DL(N);
// Ensure that the shift amount is wide enough for the shifted value.
if (VT.getSizeInBits() >= 256)
ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);
return DAG.getNode(N0.getOpcode(), DL, VT,
DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
ShAmt);
}
return SDValue();
}
SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
LegalOperations))
return SDValue(Res, 0);
// fold (aext (aext x)) -> (aext x)
// fold (aext (zext x)) -> (zext x)
// fold (aext (sext x)) -> (sext x)
if (N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND)
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
// fold (aext (truncate (load x))) -> (aext (smaller load x))
// fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
if (N0.getOpcode() == ISD::TRUNCATE) {
SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
if (NarrowLoad.getNode()) {
SDNode* oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (aext (truncate x))
if (N0.getOpcode() == ISD::TRUNCATE) {
SDValue TruncOp = N0.getOperand(0);
if (TruncOp.getValueType() == VT)
return TruncOp; // x iff x size == zext size.
if (TruncOp.getValueType().bitsGT(VT))
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, TruncOp);
return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, TruncOp);
}
// Fold (aext (and (trunc x), cst)) -> (and x, cst)
// if the trunc is not free.
if (N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
N0.getOperand(1).getOpcode() == ISD::Constant &&
!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
N0.getValueType())) {
SDValue X = N0.getOperand(0).getOperand(0);
if (X.getValueType().bitsLT(VT)) {
X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, X);
} else if (X.getValueType().bitsGT(VT)) {
X = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, X);
}
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask = Mask.zext(VT.getSizeInBits());
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT,
X, DAG.getConstant(Mask, DL, VT));
}
// fold (aext (load x)) -> (aext (truncate (extload x)))
// None of the supported targets knows how to perform load and any_ext
// on vectors in one instruction. We only perform this transformation on
// scalars.
if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
ISD::isUNINDEXEDLoad(N0.getNode()) &&
TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
CombineTo(N, ExtLoad);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad);
CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
ISD::ANY_EXTEND);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
// fold (aext (zextload x)) -> (aext (truncate (zextload x)))
// fold (aext (sextload x)) -> (aext (truncate (sextload x)))
// fold (aext ( extload x)) -> (aext (truncate (extload x)))
if (N0.getOpcode() == ISD::LOAD &&
!ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
ISD::LoadExtType ExtType = LN0->getExtensionType();
EVT MemVT = LN0->getMemoryVT();
if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) {
SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N),
VT, LN0->getChain(), LN0->getBasePtr(),
MemVT, LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad),
ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
if (N0.getOpcode() == ISD::SETCC) {
// For vectors:
// aext(setcc) -> vsetcc
// aext(setcc) -> truncate(vsetcc)
// aext(setcc) -> aext(vsetcc)
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations) {
EVT N0VT = N0.getOperand(0).getValueType();
// We know that the # elements of the results is the same as the
// # elements of the compare (and the # elements of the compare result
// for that matter). Check to see that they are the same size. If so,
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
if (VT.getSizeInBits() == N0VT.getSizeInBits())
return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/any extend
else {
EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger();
SDValue VsetCC =
DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT);
}
}
// aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDLoc DL(N);
SDValue SCC =
SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1),
DAG.getConstant(1, DL, VT), DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode())
return SCC;
}
return SDValue();
}
/// See if the specified operand can be simplified with the knowledge that only
/// the bits specified by Mask are used. If so, return the simpler operand,
/// otherwise return a null SDValue.
SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
switch (V.getOpcode()) {
default: break;
case ISD::Constant: {
const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
assert(CV && "Const value should be ConstSDNode.");
const APInt &CVal = CV->getAPIntValue();
APInt NewVal = CVal & Mask;
if (NewVal != CVal)
return DAG.getConstant(NewVal, SDLoc(V), V.getValueType());
break;
}
case ISD::OR:
case ISD::XOR:
// If the LHS or RHS don't contribute bits to the or, drop them.
if (DAG.MaskedValueIsZero(V.getOperand(0), Mask))
return V.getOperand(1);
if (DAG.MaskedValueIsZero(V.getOperand(1), Mask))
return V.getOperand(0);
break;
case ISD::SRL:
// Only look at single-use SRLs.
if (!V.getNode()->hasOneUse())
break;
if (ConstantSDNode *RHSC = getAsNonOpaqueConstant(V.getOperand(1))) {
// See if we can recursively simplify the LHS.
unsigned Amt = RHSC->getZExtValue();
// Watch out for shift count overflow though.
if (Amt >= Mask.getBitWidth()) break;
APInt NewMask = Mask << Amt;
SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask);
if (SimplifyLHS.getNode())
return DAG.getNode(ISD::SRL, SDLoc(V), V.getValueType(),
SimplifyLHS, V.getOperand(1));
}
}
return SDValue();
}
/// If the result of a wider load is shifted to right of N bits and then
/// truncated to a narrower type and where N is a multiple of number of bits of
/// the narrower type, transform it to a narrower load from address + N / num of
/// bits of new type. If the result is to be extended, also fold the extension
/// to form a extending load.
SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
unsigned Opc = N->getOpcode();
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT ExtVT = VT;
// This transformation isn't valid for vector loads.
if (VT.isVector())
return SDValue();
// Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
// extended to VT.
if (Opc == ISD::SIGN_EXTEND_INREG) {
ExtType = ISD::SEXTLOAD;
ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
} else if (Opc == ISD::SRL) {
// Another special-case: SRL is basically zero-extending a narrower value.
ExtType = ISD::ZEXTLOAD;
N0 = SDValue(N, 0);
ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N01) return SDValue();
ExtVT = EVT::getIntegerVT(*DAG.getContext(),
VT.getSizeInBits() - N01->getZExtValue());
}
if (LegalOperations && !TLI.isLoadExtLegal(ExtType, VT, ExtVT))
return SDValue();
unsigned EVTBits = ExtVT.getSizeInBits();
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (!ExtVT.isRound())
return SDValue();
unsigned ShAmt = 0;
if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
ShAmt = N01->getZExtValue();
// Is the shift amount a multiple of size of VT?
if ((ShAmt & (EVTBits-1)) == 0) {
N0 = N0.getOperand(0);
// Is the load width a multiple of size of VT?
if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
return SDValue();
}
// At this point, we must have a load or else we can't do the transform.
if (!isa<LoadSDNode>(N0)) return SDValue();
// Because a SRL must be assumed to *need* to zero-extend the high bits
// (as opposed to anyext the high bits), we can't combine the zextload
// lowering of SRL and an sextload.
if (cast<LoadSDNode>(N0)->getExtensionType() == ISD::SEXTLOAD)
return SDValue();
// If the shift amount is larger than the input type then we're not
// accessing any of the loaded bytes. If the load was a zextload/extload
// then the result of the shift+trunc is zero/undef (handled elsewhere).
if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits())
return SDValue();
}
}
// If the load is shifted left (and the result isn't shifted back right),
// we can fold the truncate through the shift.
unsigned ShLeftAmt = 0;
if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
ShLeftAmt = N01->getZExtValue();
N0 = N0.getOperand(0);
}
}
// If we haven't found a load, we can't narrow it. Don't transform one with
// multiple uses, this would require adding a new load.
if (!isa<LoadSDNode>(N0) || !N0.hasOneUse())
return SDValue();
// Don't change the width of a volatile load.
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (LN0->isVolatile())
return SDValue();
// Verify that we are actually reducing a load width here.
if (LN0->getMemoryVT().getSizeInBits() < EVTBits)
return SDValue();
// For the transform to be legal, the load must produce only two values
// (the value loaded and the chain). Don't transform a pre-increment
// load, for example, which produces an extra value. Otherwise the
// transformation is not equivalent, and the downstream logic to replace
// uses gets things wrong.
if (LN0->getNumValues() > 2)
return SDValue();
// If the load that we're shrinking is an extload and we're not just
// discarding the extension we can't simply shrink the load. Bail.
// TODO: It would be possible to merge the extensions in some cases.
if (LN0->getExtensionType() != ISD::NON_EXTLOAD &&
LN0->getMemoryVT().getSizeInBits() < ExtVT.getSizeInBits() + ShAmt)
return SDValue();
if (!TLI.shouldReduceLoadWidth(LN0, ExtType, ExtVT))
return SDValue();
EVT PtrType = N0.getOperand(1).getValueType();
if (PtrType == MVT::Untyped || PtrType.isExtended())
// It's not possible to generate a constant of extended or untyped type.
return SDValue();
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
if (DAG.getDataLayout().isBigEndian()) {
unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
}
uint64_t PtrOff = ShAmt / 8;
unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
SDLoc DL(LN0);
SDValue NewPtr = DAG.getNode(ISD::ADD, DL,
PtrType, LN0->getBasePtr(),
DAG.getConstant(PtrOff, DL, PtrType));
AddToWorklist(NewPtr.getNode());
SDValue Load;
if (ExtType == ISD::NON_EXTLOAD)
Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff),
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->isInvariant(), NewAlign, LN0->getAAInfo());
else
Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(),NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
LN0->isInvariant(), NewAlign, LN0->getAAInfo());
// Replace the old load's chain with the new load's chain.
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
// Shift the result left, if we've swallowed a left shift.
SDValue Result = Load;
if (ShLeftAmt != 0) {
EVT ShImmTy = getShiftAmountTy(Result.getValueType());
if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
ShImmTy = VT;
// If the shift amount is as large as the result size (but, presumably,
// no larger than the source) then the useful bits of the result are
// zero; we can't simply return the shortened shift, because the result
// of that operation is undefined.
SDLoc DL(N0);
if (ShLeftAmt >= VT.getSizeInBits())
Result = DAG.getConstant(0, DL, VT);
else
Result = DAG.getNode(ISD::SHL, DL, VT,
Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy));
}
// Return the new loaded value.
return Result;
}
SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT EVT = cast<VTSDNode>(N1)->getVT();
unsigned VTBits = VT.getScalarType().getSizeInBits();
unsigned EVTBits = EVT.getScalarType().getSizeInBits();
// fold (sext_in_reg c1) -> c1
if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1);
// If the input is already sign extended, just drop the extension.
if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1)
return N0;
// fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT()))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
N0.getOperand(0), N1);
// fold (sext_in_reg (sext x)) -> (sext x)
// fold (sext_in_reg (aext x)) -> (sext x)
// if x is small enough.
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits &&
(!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
}
// fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits)))
return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT);
// fold operands of sext_in_reg based on knowledge that the top bits are not
// demanded.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (sext_in_reg (load x)) -> (smaller sextload x)
// fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
SDValue NarrowLoad = ReduceLoadWidth(N);
if (NarrowLoad.getNode())
return NarrowLoad;
// fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
// fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
// We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
if (N0.getOpcode() == ISD::SRL) {
if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
if (ShAmt->getZExtValue()+EVTBits <= VTBits) {
// We can turn this into an SRA iff the input to the SRL is already sign
// extended enough.
unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits)
return DAG.getNode(ISD::SRA, SDLoc(N), VT,
N0.getOperand(0), N0.getOperand(1));
}
}
// fold (sext_inreg (extload x)) -> (sextload x)
if (ISD::isEXTLoad(N0.getNode()) &&
ISD::isUNINDEXEDLoad(N0.getNode()) &&
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), EVT,
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
AddToWorklist(ExtLoad.getNode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse() &&
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), EVT,
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false);
if (BSwap.getNode())
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
BSwap, N1);
}
// Fold a sext_inreg of a build_vector of ConstantSDNodes or undefs
// into a build_vector.
if (ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
SmallVector<SDValue, 8> Elts;
unsigned NumElts = N0->getNumOperands();
unsigned ShAmt = VTBits - EVTBits;
for (unsigned i = 0; i != NumElts; ++i) {
SDValue Op = N0->getOperand(i);
if (Op->getOpcode() == ISD::UNDEF) {
Elts.push_back(Op);
continue;
}
ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op);
const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue());
Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(),
SDLoc(Op), Op.getValueType()));
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Elts);
}
return SDValue();
}
SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.getOpcode() == ISD::UNDEF)
return DAG.getUNDEF(VT);
if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
LegalOperations))
return SDValue(Res, 0);
return SDValue();
}
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
bool isLE = DAG.getDataLayout().isLittleEndian();
// noop truncate
if (N0.getValueType() == N->getValueType(0))
return N0;
// fold (truncate c1) -> c1
if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0);
// fold (truncate (truncate x)) -> (truncate x)
if (N0.getOpcode() == ISD::TRUNCATE)
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
// fold (truncate (ext x)) -> (ext x) or (truncate x) or x
if (N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND ||
N0.getOpcode() == ISD::ANY_EXTEND) {
if (N0.getOperand(0).getValueType().bitsLT(VT))
// if the source is smaller than the dest, we still need an extend
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
N0.getOperand(0));
if (N0.getOperand(0).getValueType().bitsGT(VT))
// if the source is larger than the dest, than we just need the truncate
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
// if the source and dest are the same type, we can drop both the extend
// and the truncate.
return N0.getOperand(0);
}
// Fold extract-and-trunc into a narrow extract. For example:
// i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1)
// i32 y = TRUNCATE(i64 x)
// -- becomes --
// v16i8 b = BITCAST (v2i64 val)
// i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8)
//
// Note: We only run this optimization after type legalization (which often
// creates this pattern) and before operation legalization after which
// we need to be more careful about the vector instructions that we generate.
if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) {
EVT VecTy = N0.getOperand(0).getValueType();
EVT ExTy = N0.getValueType();
EVT TrTy = N->getValueType(0);
unsigned NumElem = VecTy.getVectorNumElements();
unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem);
assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size");
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDValue V = DAG.getNode(ISD::BITCAST, SDLoc(N),
NVT, N0.getOperand(0));
SDLoc DL(N);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT,
DL, TrTy, V,
DAG.getConstant(Index, DL, IndexTy));
}
}
// trunc (select c, a, b) -> select c, (trunc a), (trunc b)
if (N0.getOpcode() == ISD::SELECT) {
EVT SrcVT = N0.getValueType();
if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) &&
TLI.isTruncateFree(SrcVT, VT)) {
SDLoc SL(N0);
SDValue Cond = N0.getOperand(0);
SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2));
return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1);
}
}
// Fold a series of buildvector, bitcast, and truncate if possible.
// For example fold
// (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to
// (2xi32 (buildvector x, y)).
if (Level == AfterLegalizeVectorOps && VT.isVector() &&
N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
N0.getOperand(0).hasOneUse()) {
SDValue BuildVect = N0.getOperand(0);
EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType();
EVT TruncVecEltTy = VT.getVectorElementType();
// Check that the element types match.
if (BuildVectEltTy == TruncVecEltTy) {
// Now we only need to compute the offset of the truncated elements.
unsigned BuildVecNumElts = BuildVect.getNumOperands();
unsigned TruncVecNumElts = VT.getVectorNumElements();
unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts;
assert((BuildVecNumElts % TruncVecNumElts) == 0 &&
"Invalid number of elements");
SmallVector<SDValue, 8> Opnds;
for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
Opnds.push_back(BuildVect.getOperand(i));
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
}
}
// See if we can simplify the input to this truncate through knowledge that
// only the low bits are being used.
// For example "trunc (or (shl x, 8), y)" // -> trunc y
// Currently we only perform this optimization on scalars because vectors
// may have different active low bits.
if (!VT.isVector()) {
SDValue Shorter =
GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
VT.getSizeInBits()));
if (Shorter.getNode())
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter);
}
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
SDValue Reduced = ReduceLoadWidth(N);
if (Reduced.getNode())
return Reduced;
// Handle the case where the load remains an extending load even
// after truncation.
if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (!LN0->isVolatile() &&
LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) {
SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
VT, LN0->getChain(), LN0->getBasePtr(),
LN0->getMemoryVT(),
LN0->getMemOperand());
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1));
return NewLoad;
}
}
}
// fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
// where ... are all 'undef'.
if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) {
SmallVector<EVT, 8> VTs;
SDValue V;
unsigned Idx = 0;
unsigned NumDefs = 0;
for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
SDValue X = N0.getOperand(i);
if (X.getOpcode() != ISD::UNDEF) {
V = X;
Idx = i;
NumDefs++;
}
// Stop if more than one members are non-undef.
if (NumDefs > 1)
break;
VTs.push_back(EVT::getVectorVT(*DAG.getContext(),
VT.getVectorElementType(),
X.getValueType().getVectorNumElements()));
}
if (NumDefs == 0)
return DAG.getUNDEF(VT);
if (NumDefs == 1) {
assert(V.getNode() && "The single defined operand is empty!");
SmallVector<SDValue, 8> Opnds;
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
if (i != Idx) {
Opnds.push_back(DAG.getUNDEF(VTs[i]));
continue;
}
SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V);
AddToWorklist(NV.getNode());
Opnds.push_back(NV);
}
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds);
}
}
// Simplify the operands using demanded-bits information.
if (!VT.isVector() &&
SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
}
static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
SDValue Elt = N->getOperand(i);
if (Elt.getOpcode() != ISD::MERGE_VALUES)
return Elt.getNode();
return Elt.getOperand(Elt.getResNo()).getNode();
}
/// build_pair (load, load) -> load
/// if load locations are consecutive.
SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
assert(N->getOpcode() == ISD::BUILD_PAIR);
LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
LD1->getAddressSpace() != LD2->getAddressSpace())
return SDValue();
EVT LD1VT = LD1->getValueType(0);
if (ISD::isNON_EXTLoad(LD2) &&
LD2->hasOneUse() &&
// If both are volatile this would reduce the number of volatile loads.
// If one is volatile it might be ok, but play conservative and bail out.
!LD1->isVolatile() &&
!LD2->isVolatile() &&
DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
unsigned Align = LD1->getAlignment();
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign <= Align &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
return DAG.getLoad(VT, SDLoc(N), LD1->getChain(),
LD1->getBasePtr(), LD1->getPointerInfo(),
false, false, false, Align);
}
return SDValue();
}
SDValue DAGCombiner::visitBITCAST(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// If the input is a BUILD_VECTOR with all constant elements, fold this now.
// Only do this before legalize, since afterward the target may be depending
// on the bitconvert.
// First check to see if this is all constant.
if (!LegalTypes &&
N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
VT.isVector()) {
bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant();
EVT DestEltVT = N->getValueType(0).getVectorElementType();
assert(!DestEltVT.isVector() &&
"Element type of vector ValueType must not be vector!");
if (isSimple)
return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT);
}
// If the input is a constant, let getNode fold it.
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
// If we can't allow illegal operations, we need to check that this is just
// a fp -> int or int -> conversion and that the resulting operation will
// be legal.
if (!LegalOperations ||
(isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() &&
TLI.isOperationLegal(ISD::ConstantFP, VT)) ||
(isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() &&
TLI.isOperationLegal(ISD::Constant, VT)))
return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, N0);
}
// (conv (conv x, t1), t2) -> (conv x, t2)
if (N0.getOpcode() == ISD::BITCAST)
return DAG.getNode(ISD::BITCAST, SDLoc(N), VT,
N0.getOperand(0));
// fold (conv (load x)) -> (load (conv*)x)
// If the resultant load doesn't need a higher alignment than the original!
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
// Do not change the width of a volatile load.
!cast<LoadSDNode>(N0)->isVolatile() &&
// Do not remove the cast if the types differ in endian layout.
TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) ==
TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) &&
TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
unsigned Align = DAG.getDataLayout().getABITypeAlignment(
VT.getTypeForEVT(*DAG.getContext()));
unsigned OrigAlign = LN0->getAlignment();
if (Align <= OrigAlign) {
SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(),
LN0->getBasePtr(), LN0->getPointerInfo(),
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->isInvariant(), OrigAlign,
LN0->getAAInfo());
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
return Load;
}
}
// fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
// fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
// This often reduces constant pool loads.
if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
(N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
N0.getNode()->hasOneUse() && VT.isInteger() &&
!VT.isVector() && !N0.getValueType().isVector()) {
SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT,
N0.getOperand(0));
AddToWorklist(NewConv.getNode());
SDLoc DL(N);
APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
if (N0.getOpcode() == ISD::FNEG)
return DAG.getNode(ISD::XOR, DL, VT,
NewConv, DAG.getConstant(SignBit, DL, VT));
assert(N0.getOpcode() == ISD::FABS);
return DAG.getNode(ISD::AND, DL, VT,
NewConv, DAG.getConstant(~SignBit, DL, VT));
}
// fold (bitconvert (fcopysign cst, x)) ->
// (or (and (bitconvert x), sign), (and cst, (not sign)))
// Note that we don't handle (copysign x, cst) because this can always be
// folded to an fneg or fabs.
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
isa<ConstantFPSDNode>(N0.getOperand(0)) &&
VT.isInteger() && !VT.isVector()) {
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
if (isTypeLegal(IntXVT)) {
SDValue X = DAG.getNode(ISD::BITCAST, SDLoc(N0),
IntXVT, N0.getOperand(1));
AddToWorklist(X.getNode());
// If X has a different width than the result/lhs, sext it or truncate it.
unsigned VTWidth = VT.getSizeInBits();
if (OrigXWidth < VTWidth) {
X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X);
AddToWorklist(X.getNode());
} else if (OrigXWidth > VTWidth) {
// To get the sign bit in the right place, we have to shift it right
// before truncating.
SDLoc DL(X);
X = DAG.getNode(ISD::SRL, DL,
X.getValueType(), X,
DAG.getConstant(OrigXWidth-VTWidth, DL,
X.getValueType()));
AddToWorklist(X.getNode());
X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
AddToWorklist(X.getNode());
}
APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
X = DAG.getNode(ISD::AND, SDLoc(X), VT,
X, DAG.getConstant(SignBit, SDLoc(X), VT));
AddToWorklist(X.getNode());
SDValue Cst = DAG.getNode(ISD::BITCAST, SDLoc(N0),
VT, N0.getOperand(0));
Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT));
AddToWorklist(Cst.getNode());
return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst);
}
}
// bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
if (N0.getOpcode() == ISD::BUILD_PAIR) {
SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT);
if (CombineLD.getNode())
return CombineLD;
}
// Remove double bitcasts from shuffles - this is often a legacy of
// XformToShuffleWithZero being used to combine bitmaskings (of
// float vectors bitcast to integer vectors) into shuffles.
// bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1)
if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() &&
N0->getOpcode() == ISD::VECTOR_SHUFFLE &&
VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() &&
!(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) {
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0);
// If operands are a bitcast, peek through if it casts the original VT.
// If operands are a UNDEF or constant, just bitcast back to original VT.
auto PeekThroughBitcast = [&](SDValue Op) {
if (Op.getOpcode() == ISD::BITCAST &&
Op.getOperand(0)->getValueType(0) == VT)
return SDValue(Op.getOperand(0));
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
return SDValue();
};
SDValue SV0 = PeekThroughBitcast(N0->getOperand(0));
SDValue SV1 = PeekThroughBitcast(N0->getOperand(1));
if (!(SV0 && SV1))
return SDValue();
int MaskScale =
VT.getVectorNumElements() / N0.getValueType().getVectorNumElements();
SmallVector<int, 8> NewMask;
for (int M : SVN->getMask())
for (int i = 0; i != MaskScale; ++i)
NewMask.push_back(M < 0 ? -1 : M * MaskScale + i);
bool LegalMask = TLI.isShuffleMaskLegal(NewMask, VT);
if (!LegalMask) {
std::swap(SV0, SV1);
ShuffleVectorSDNode::commuteMask(NewMask);
LegalMask = TLI.isShuffleMaskLegal(NewMask, VT);
}
if (LegalMask)
return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask);
}
return SDValue();
}
SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
EVT VT = N->getValueType(0);
return CombineConsecutiveLoads(N, VT);
}
/// We know that BV is a build_vector node with Constant, ConstantFP or Undef
/// operands. DstEltVT indicates the destination element value type.
SDValue DAGCombiner::
ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
// If this is already the right type, we're done.
if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
unsigned SrcBitSize = SrcEltVT.getSizeInBits();
unsigned DstBitSize = DstEltVT.getSizeInBits();
// If this is a conversion of N elements of one type to N elements of another
// type, convert each element. This handles FP<->INT cases.
if (SrcBitSize == DstBitSize) {
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
BV->getValueType(0).getVectorNumElements());
// Due to the FP element handling below calling this routine recursively,
// we can end up with a scalar-to-vector node here.
if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT,
DAG.getNode(ISD::BITCAST, SDLoc(BV),
DstEltVT, BV->getOperand(0)));
SmallVector<SDValue, 8> Ops;
for (SDValue Op : BV->op_values()) {
// If the vector element type is not legal, the BUILD_VECTOR operands
// are promoted and implicitly truncated. Make that explicit here.
if (Op.getValueType() != SrcEltVT)
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
Ops.push_back(DAG.getNode(ISD::BITCAST, SDLoc(BV),
DstEltVT, Op));
AddToWorklist(Ops.back().getNode());
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
}
// Otherwise, we're growing or shrinking the elements. To avoid having to
// handle annoying details of growing/shrinking FP values, we convert them to
// int first.
if (SrcEltVT.isFloatingPoint()) {
// Convert the input float vector to a int vector where the elements are the
// same sizes.
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
SrcEltVT = IntVT;
}
// Now we know the input is an integer vector. If the output is a FP type,
// convert to integer first, then to FP of the right size.
if (DstEltVT.isFloatingPoint()) {
EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
// Next, convert to FP elements of the same size.
return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
}
SDLoc DL(BV);
// Okay, we know the src/dst types are both integers of differing types.
// Handling growing first.
assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
if (SrcBitSize < DstBitSize) {
unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e;
i += NumInputsPerOutput) {
bool isLE = DAG.getDataLayout().isLittleEndian();
APInt NewBits = APInt(DstBitSize, 0);
bool EltIsUndef = true;
for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
// Shift the previously computed bits over.
NewBits <<= SrcBitSize;
SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
if (Op.getOpcode() == ISD::UNDEF) continue;
EltIsUndef = false;
NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
zextOrTrunc(SrcBitSize).zext(DstBitSize);
}
if (EltIsUndef)
Ops.push_back(DAG.getUNDEF(DstEltVT));
else
Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT));
}
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
// Finally, this must be the case where we are shrinking elements: each input
// turns into multiple outputs.
unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
NumOutputsPerInput*BV->getNumOperands());
SmallVector<SDValue, 8> Ops;
for (const SDValue &Op : BV->op_values()) {
if (Op.getOpcode() == ISD::UNDEF) {
Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT));
continue;
}
APInt OpVal = cast<ConstantSDNode>(Op)->
getAPIntValue().zextOrTrunc(SrcBitSize);
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
APInt ThisVal = OpVal.trunc(DstBitSize);
Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT));
OpVal = OpVal.lshr(DstBitSize);
}
// For big endian targets, swap the order of the pieces of each element.
if (DAG.getDataLayout().isBigEndian())
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
}
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
/// Try to perform FMA combining on a given FADD node.
SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc SL(N);
const TargetOptions &Options = DAG.getTarget().Options;
bool UnsafeFPMath = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
Options.UnsafeFPMath);
// Floating-point multiply-add with intermediate rounding.
bool HasFMAD = (LegalOperations &&
TLI.isOperationLegal(ISD::FMAD, VT));
// Floating-point multiply-add without intermediate rounding.
bool HasFMA = ((!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::FMA, VT)) &&
TLI.isFMAFasterThanFMulAndFAdd(VT) &&
UnsafeFPMath);
// No valid opcode, do not combine.
if (!HasFMAD && !HasFMA)
return SDValue();
// Always prefer FMAD to FMA for precision.
unsigned int PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
bool LookThroughFPExt = TLI.isFPExtFree(VT);
// fold (fadd (fmul x, y), z) -> (fma x, y, z)
if (N0.getOpcode() == ISD::FMUL &&
(Aggressive || N0->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1), N1);
}
// fold (fadd x, (fmul y, z)) -> (fma y, z, x)
// Note: Commutes FADD operands.
if (N1.getOpcode() == ISD::FMUL &&
(Aggressive || N1->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N1.getOperand(0), N1.getOperand(1), N0);
}
// Look through FP_EXTEND nodes to do more combining.
if (UnsafeFPMath && LookThroughFPExt) {
// fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == ISD::FMUL)
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(1)), N1);
}
// fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
// Note: Commutes FADD operands.
if (N1.getOpcode() == ISD::FP_EXTEND) {
SDValue N10 = N1.getOperand(0);
if (N10.getOpcode() == ISD::FMUL)
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(1)), N0);
}
}
// More folding opportunities when target permits.
if ((UnsafeFPMath || HasFMAD) && Aggressive) {
// fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z))
if (N0.getOpcode() == PreferredFusedOpcode &&
N0.getOperand(2).getOpcode() == ISD::FMUL) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(2).getOperand(0),
N0.getOperand(2).getOperand(1),
N1));
}
// fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x))
if (N1->getOpcode() == PreferredFusedOpcode &&
N1.getOperand(2).getOpcode() == ISD::FMUL) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N1.getOperand(0), N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
N1.getOperand(2).getOperand(0),
N1.getOperand(2).getOperand(1),
N0));
}
if (UnsafeFPMath && LookThroughFPExt) {
// fold (fadd (fma x, y, (fpext (fmul u, v))), z)
// -> (fma x, y, (fma (fpext u), (fpext v), z))
auto FoldFAddFMAFPExtFMul = [&] (
SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) {
return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y,
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
Z));
};
if (N0.getOpcode() == PreferredFusedOpcode) {
SDValue N02 = N0.getOperand(2);
if (N02.getOpcode() == ISD::FP_EXTEND) {
SDValue N020 = N02.getOperand(0);
if (N020.getOpcode() == ISD::FMUL)
return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1),
N020.getOperand(0), N020.getOperand(1),
N1);
}
}
// fold (fadd (fpext (fma x, y, (fmul u, v))), z)
// -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
auto FoldFAddFPExtFMAFMul = [&] (
SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT, X),
DAG.getNode(ISD::FP_EXTEND, SL, VT, Y),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
Z));
};
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == PreferredFusedOpcode) {
SDValue N002 = N00.getOperand(2);
if (N002.getOpcode() == ISD::FMUL)
return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1),
N002.getOperand(0), N002.getOperand(1),
N1);
}
}
// fold (fadd x, (fma y, z, (fpext (fmul u, v)))
// -> (fma y, z, (fma (fpext u), (fpext v), x))
if (N1.getOpcode() == PreferredFusedOpcode) {
SDValue N12 = N1.getOperand(2);
if (N12.getOpcode() == ISD::FP_EXTEND) {
SDValue N120 = N12.getOperand(0);
if (N120.getOpcode() == ISD::FMUL)
return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1),
N120.getOperand(0), N120.getOperand(1),
N0);
}
}
// fold (fadd x, (fpext (fma y, z, (fmul u, v)))
// -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
if (N1.getOpcode() == ISD::FP_EXTEND) {
SDValue N10 = N1.getOperand(0);
if (N10.getOpcode() == PreferredFusedOpcode) {
SDValue N102 = N10.getOperand(2);
if (N102.getOpcode() == ISD::FMUL)
return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1),
N102.getOperand(0), N102.getOperand(1),
N0);
}
}
}
}
return SDValue();
}
/// Try to perform FMA combining on a given FSUB node.
SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc SL(N);
const TargetOptions &Options = DAG.getTarget().Options;
bool UnsafeFPMath = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
Options.UnsafeFPMath);
// Floating-point multiply-add with intermediate rounding.
bool HasFMAD = (LegalOperations &&
TLI.isOperationLegal(ISD::FMAD, VT));
// Floating-point multiply-add without intermediate rounding.
bool HasFMA = ((!LegalOperations ||
TLI.isOperationLegalOrCustom(ISD::FMA, VT)) &&
TLI.isFMAFasterThanFMulAndFAdd(VT) &&
UnsafeFPMath);
// No valid opcode, do not combine.
if (!HasFMAD && !HasFMA)
return SDValue();
// Always prefer FMAD to FMA for precision.
unsigned int PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
bool LookThroughFPExt = TLI.isFPExtFree(VT);
// fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
if (N0.getOpcode() == ISD::FMUL &&
(Aggressive || N0->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(ISD::FNEG, SL, VT, N1));
}
// fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
// Note: Commutes FSUB operands.
if (N1.getOpcode() == ISD::FMUL &&
(Aggressive || N1->hasOneUse()))
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
N1.getOperand(0)),
N1.getOperand(1), N0);
// fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
if (N0.getOpcode() == ISD::FNEG &&
N0.getOperand(0).getOpcode() == ISD::FMUL &&
(Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) {
SDValue N00 = N0.getOperand(0).getOperand(0);
SDValue N01 = N0.getOperand(0).getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N00), N01,
DAG.getNode(ISD::FNEG, SL, VT, N1));
}
// Look through FP_EXTEND nodes to do more combining.
if (UnsafeFPMath && LookThroughFPExt) {
// fold (fsub (fpext (fmul x, y)), z)
// -> (fma (fpext x), (fpext y), (fneg z))
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == ISD::FMUL)
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT, N1));
}
// fold (fsub x, (fpext (fmul y, z)))
// -> (fma (fneg (fpext y)), (fpext z), x)
// Note: Commutes FSUB operands.
if (N1.getOpcode() == ISD::FP_EXTEND) {
SDValue N10 = N1.getOperand(0);
if (N10.getOpcode() == ISD::FMUL)
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(0))),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(1)),
N0);
}
// fold (fsub (fpext (fneg (fmul, x, y))), z)
// -> (fneg (fma (fpext x), (fpext y), z))
// Note: This could be removed with appropriate canonicalization of the
// input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
// orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
// from implementing the canonicalization in visitFSUB.
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == ISD::FNEG) {
SDValue N000 = N00.getOperand(0);
if (N000.getOpcode() == ISD::FMUL) {
return DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(1)),
N1));
}
}
}
// fold (fsub (fneg (fpext (fmul, x, y))), z)
// -> (fneg (fma (fpext x)), (fpext y), z)
// Note: This could be removed with appropriate canonicalization of the
// input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
// orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
// from implementing the canonicalization in visitFSUB.
if (N0.getOpcode() == ISD::FNEG) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == ISD::FP_EXTEND) {
SDValue N000 = N00.getOperand(0);
if (N000.getOpcode() == ISD::FMUL) {
return DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N000.getOperand(1)),
N1));
}
}
}
}
// More folding opportunities when target permits.
if ((UnsafeFPMath || HasFMAD) && Aggressive) {
// fold (fsub (fma x, y, (fmul u, v)), z)
// -> (fma x, y (fma u, v, (fneg z)))
if (N0.getOpcode() == PreferredFusedOpcode &&
N0.getOperand(2).getOpcode() == ISD::FMUL) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(2).getOperand(0),
N0.getOperand(2).getOperand(1),
DAG.getNode(ISD::FNEG, SL, VT,
N1)));
}
// fold (fsub x, (fma y, z, (fmul u, v)))
// -> (fma (fneg y), z, (fma (fneg u), v, x))
if (N1.getOpcode() == PreferredFusedOpcode &&
N1.getOperand(2).getOpcode() == ISD::FMUL) {
SDValue N20 = N1.getOperand(2).getOperand(0);
SDValue N21 = N1.getOperand(2).getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
N1.getOperand(0)),
N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N20),
N21, N0));
}
if (UnsafeFPMath && LookThroughFPExt) {
// fold (fsub (fma x, y, (fpext (fmul u, v))), z)
// -> (fma x, y (fma (fpext u), (fpext v), (fneg z)))
if (N0.getOpcode() == PreferredFusedOpcode) {
SDValue N02 = N0.getOperand(2);
if (N02.getOpcode() == ISD::FP_EXTEND) {
SDValue N020 = N02.getOperand(0);
if (N020.getOpcode() == ISD::FMUL)
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N020.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N020.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
N1)));
}
}
// fold (fsub (fpext (fma x, y, (fmul u, v))), z)
// -> (fma (fpext x), (fpext y),
// (fma (fpext u), (fpext v), (fneg z)))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == PreferredFusedOpcode) {
SDValue N002 = N00.getOperand(2);
if (N002.getOpcode() == ISD::FMUL)
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N00.getOperand(1)),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N002.getOperand(0)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N002.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
N1)));
}
}
// fold (fsub x, (fma y, z, (fpext (fmul u, v))))
// -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x))
if (N1.getOpcode() == PreferredFusedOpcode &&
N1.getOperand(2).getOpcode() == ISD::FP_EXTEND) {
SDValue N120 = N1.getOperand(2).getOperand(0);
if (N120.getOpcode() == ISD::FMUL) {
SDValue N1200 = N120.getOperand(0);
SDValue N1201 = N120.getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)),
N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL,
VT, N1200)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1201),
N0));
}
}
// fold (fsub x, (fpext (fma y, z, (fmul u, v))))
// -> (fma (fneg (fpext y)), (fpext z),
// (fma (fneg (fpext u)), (fpext v), x))
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
if (N1.getOpcode() == ISD::FP_EXTEND &&
N1.getOperand(0).getOpcode() == PreferredFusedOpcode) {
SDValue N100 = N1.getOperand(0).getOperand(0);
SDValue N101 = N1.getOperand(0).getOperand(1);
SDValue N102 = N1.getOperand(0).getOperand(2);
if (N102.getOpcode() == ISD::FMUL) {
SDValue N1020 = N102.getOperand(0);
SDValue N1021 = N102.getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N100)),
DAG.getNode(ISD::FP_EXTEND, SL, VT, N101),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL,
VT, N1020)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1021),
N0));
}
}
}
}
return SDValue();
}
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (fadd c1, c2) -> c1 + c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FADD, DL, VT, N0, N1);
// canonicalize constant to RHS
if (N0CFP && !N1CFP)
return DAG.getNode(ISD::FADD, DL, VT, N1, N0);
// fold (fadd A, (fneg B)) -> (fsub A, B)
if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
isNegatibleForFree(N1, LegalOperations, TLI, &Options) == 2)
return DAG.getNode(ISD::FSUB, DL, VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations));
// fold (fadd (fneg A), B) -> (fsub B, A)
if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
isNegatibleForFree(N0, LegalOperations, TLI, &Options) == 2)
return DAG.getNode(ISD::FSUB, DL, VT, N1,
GetNegatedExpression(N0, DAG, LegalOperations));
// If 'unsafe math' is enabled, fold lots of things.
if (Options.UnsafeFPMath) {
// No FP constant should be created after legalization as Instruction
// Selection pass has a hard time dealing with FP constants.
bool AllowNewConst = (Level < AfterLegalizeDAG);
// fold (fadd A, 0) -> A
if (N1CFP && N1CFP->isZero())
return N0;
// fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
if (N1CFP && N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() &&
isa<ConstantFPSDNode>(N0.getOperand(1)))
return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0),
DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1));
// If allowed, fold (fadd (fneg x), x) -> 0.0
if (AllowNewConst && N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1)
return DAG.getConstantFP(0.0, DL, VT);
// If allowed, fold (fadd x, (fneg x)) -> 0.0
if (AllowNewConst && N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0)
return DAG.getConstantFP(0.0, DL, VT);
// We can fold chains of FADD's of the same value into multiplications.
// This transform is not safe in general because we are reducing the number
// of rounding steps.
if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
if (N0.getOpcode() == ISD::FMUL) {
ConstantFPSDNode *CFP00 = dyn_cast<ConstantFPSDNode>(N0.getOperand(0));
ConstantFPSDNode *CFP01 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
// (fadd (fmul x, c), x) -> (fmul x, c+1)
if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, SDValue(CFP01, 0),
DAG.getConstantFP(1.0, DL, VT));
return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP);
}
// (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(0) == N1.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, SDValue(CFP01, 0),
DAG.getConstantFP(2.0, DL, VT));
return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP);
}
}
if (N1.getOpcode() == ISD::FMUL) {
ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0));
ConstantFPSDNode *CFP11 = dyn_cast<ConstantFPSDNode>(N1.getOperand(1));
// (fadd x, (fmul x, c)) -> (fmul x, c+1)
if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, SDValue(CFP11, 0),
DAG.getConstantFP(1.0, DL, VT));
return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP);
}
// (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD &&
N0.getOperand(0) == N0.getOperand(1) &&
N1.getOperand(0) == N0.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, SDValue(CFP11, 0),
DAG.getConstantFP(2.0, DL, VT));
return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP);
}
}
if (N0.getOpcode() == ISD::FADD && AllowNewConst) {
ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N0.getOperand(0));
// (fadd (fadd x, x), x) -> (fmul x, 3.0)
if (!CFP && N0.getOperand(0) == N0.getOperand(1) &&
(N0.getOperand(0) == N1)) {
return DAG.getNode(ISD::FMUL, DL, VT,
N1, DAG.getConstantFP(3.0, DL, VT));
}
}
if (N1.getOpcode() == ISD::FADD && AllowNewConst) {
ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0));
// (fadd x, (fadd x, x)) -> (fmul x, 3.0)
if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
N1.getOperand(0) == N0) {
return DAG.getNode(ISD::FMUL, DL, VT,
N0, DAG.getConstantFP(3.0, DL, VT));
}
}
// (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0)
if (AllowNewConst &&
N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
N0.getOperand(0) == N0.getOperand(1) &&
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(0) == N1.getOperand(0)) {
return DAG.getNode(ISD::FMUL, DL, VT,
N0.getOperand(0), DAG.getConstantFP(4.0, DL, VT));
}
}
} // enable-unsafe-fp-math
// FADD -> FMA combines:
SDValue Fused = visitFADDForFMACombine(N);
if (Fused) {
AddToWorklist(Fused.getNode());
return Fused;
}
return SDValue();
}
SDValue DAGCombiner::visitFSUB(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
EVT VT = N->getValueType(0);
SDLoc dl(N);
const TargetOptions &Options = DAG.getTarget().Options;
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (fsub c1, c2) -> c1-c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FSUB, dl, VT, N0, N1);
// fold (fsub A, (fneg B)) -> (fadd A, B)
if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
return DAG.getNode(ISD::FADD, dl, VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations));
// If 'unsafe math' is enabled, fold lots of things.
if (Options.UnsafeFPMath) {
// (fsub A, 0) -> A
if (N1CFP && N1CFP->isZero())
return N0;
// (fsub 0, B) -> -B
if (N0CFP && N0CFP->isZero()) {
if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
return GetNegatedExpression(N1, DAG, LegalOperations);
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, dl, VT, N1);
}
// (fsub x, x) -> 0.0
if (N0 == N1)
return DAG.getConstantFP(0.0f, dl, VT);
// (fsub x, (fadd x, y)) -> (fneg y)
// (fsub x, (fadd y, x)) -> (fneg y)
if (N1.getOpcode() == ISD::FADD) {
SDValue N10 = N1->getOperand(0);
SDValue N11 = N1->getOperand(1);
if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, &Options))
return GetNegatedExpression(N11, DAG, LegalOperations);
if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, &Options))
return GetNegatedExpression(N10, DAG, LegalOperations);
}
}
// FSUB -> FMA combines:
SDValue Fused = visitFSUBForFMACombine(N);
if (Fused) {
AddToWorklist(Fused.getNode());
return Fused;
}
return SDValue();
}
SDValue DAGCombiner::visitFMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
// fold vector ops
if (VT.isVector()) {
// This just handles C1 * C2 for vectors. Other vector folds are below.
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
}
// fold (fmul c1, c2) -> c1*c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FMUL, DL, VT, N0, N1);
// canonicalize constant to RHS
if (isConstantFPBuildVectorOrConstantFP(N0) &&
!isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(ISD::FMUL, DL, VT, N1, N0);
// fold (fmul A, 1.0) -> A
if (N1CFP && N1CFP->isExactlyValue(1.0))
return N0;
if (Options.UnsafeFPMath) {
// fold (fmul A, 0) -> 0
if (N1CFP && N1CFP->isZero())
return N1;
// fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
if (N0.getOpcode() == ISD::FMUL) {
// Fold scalars or any vector constants (not just splats).
// This fold is done in general by InstCombine, but extra fmul insts
// may have been generated during lowering.
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
auto *BV00 = dyn_cast<BuildVectorSDNode>(N00);
auto *BV01 = dyn_cast<BuildVectorSDNode>(N01);
// Check 1: Make sure that the first operand of the inner multiply is NOT
// a constant. Otherwise, we may induce infinite looping.
if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) {
// Check 2: Make sure that the second operand of the inner multiply and
// the second operand of the outer multiply are constants.
if ((N1CFP && isConstOrConstSplatFP(N01)) ||
(BV1 && BV01 && BV1->isConstant() && BV01->isConstant())) {
SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1);
return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts);
}
}
}
// fold (fmul (fadd x, x), c) -> (fmul x, (fmul 2.0, c))
// Undo the fmul 2.0, x -> fadd x, x transformation, since if it occurs
// during an early run of DAGCombiner can prevent folding with fmuls
// inserted during lowering.
if (N0.getOpcode() == ISD::FADD && N0.getOperand(0) == N0.getOperand(1)) {
const SDValue Two = DAG.getConstantFP(2.0, DL, VT);
SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1);
return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts);
}
}
// fold (fmul X, 2.0) -> (fadd X, X)
if (N1CFP && N1CFP->isExactlyValue(+2.0))
return DAG.getNode(ISD::FADD, DL, VT, N0, N0);
// fold (fmul X, -1.0) -> (fneg X)
if (N1CFP && N1CFP->isExactlyValue(-1.0))
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, DL, VT, N0);
// fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) {
if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
return DAG.getNode(ISD::FMUL, DL, VT,
GetNegatedExpression(N0, DAG, LegalOperations),
GetNegatedExpression(N1, DAG, LegalOperations));
}
}
return SDValue();
}
SDValue DAGCombiner::visitFMA(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
SDLoc dl(N);
const TargetOptions &Options = DAG.getTarget().Options;
// Constant fold FMA.
if (isa<ConstantFPSDNode>(N0) &&
isa<ConstantFPSDNode>(N1) &&
isa<ConstantFPSDNode>(N2)) {
return DAG.getNode(ISD::FMA, dl, VT, N0, N1, N2);
}
if (Options.UnsafeFPMath) {
if (N0CFP && N0CFP->isZero())
return N2;
if (N1CFP && N1CFP->isZero())
return N2;
}
if (N0CFP && N0CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
if (N1CFP && N1CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);
// Canonicalize (fma c, x, y) -> (fma x, c, y)
if (N0CFP && !N1CFP)
return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
// (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
if (Options.UnsafeFPMath && N1CFP &&
N2.getOpcode() == ISD::FMUL &&
N0 == N2.getOperand(0) &&
N2.getOperand(1).getOpcode() == ISD::ConstantFP) {
return DAG.getNode(ISD::FMUL, dl, VT, N0,
DAG.getNode(ISD::FADD, dl, VT, N1, N2.getOperand(1)));
}
// (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
if (Options.UnsafeFPMath &&
N0.getOpcode() == ISD::FMUL && N1CFP &&
N0.getOperand(1).getOpcode() == ISD::ConstantFP) {
return DAG.getNode(ISD::FMA, dl, VT,
N0.getOperand(0),
DAG.getNode(ISD::FMUL, dl, VT, N1, N0.getOperand(1)),
N2);
}
// (fma x, 1, y) -> (fadd x, y)
// (fma x, -1, y) -> (fadd (fneg x), y)
if (N1CFP) {
if (N1CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, dl, VT, N0, N2);
if (N1CFP->isExactlyValue(-1.0) &&
(!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
SDValue RHSNeg = DAG.getNode(ISD::FNEG, dl, VT, N0);
AddToWorklist(RHSNeg.getNode());
return DAG.getNode(ISD::FADD, dl, VT, N2, RHSNeg);
}
}
// (fma x, c, x) -> (fmul x, (c+1))
if (Options.UnsafeFPMath && N1CFP && N0 == N2)
return DAG.getNode(ISD::FMUL, dl, VT, N0,
DAG.getNode(ISD::FADD, dl, VT,
N1, DAG.getConstantFP(1.0, dl, VT)));
// (fma x, c, (fneg x)) -> (fmul x, (c-1))
if (Options.UnsafeFPMath && N1CFP &&
N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0)
return DAG.getNode(ISD::FMUL, dl, VT, N0,
DAG.getNode(ISD::FADD, dl, VT,
N1, DAG.getConstantFP(-1.0, dl, VT)));
return SDValue();
}
SDValue DAGCombiner::visitFDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
// fold vector ops
if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
// fold (fdiv c1, c2) -> c1/c2
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1);
if (Options.UnsafeFPMath) {
// fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
if (N1CFP) {
// Compute the reciprocal 1.0 / c2.
APFloat N1APF = N1CFP->getValueAPF();
APFloat Recip(N1APF.getSemantics(), 1); // 1.0
APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven);
// Only do the transform if the reciprocal is a legal fp immediate that
// isn't too nasty (eg NaN, denormal, ...).
if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty
(!LegalOperations ||
// FIXME: custom lowering of ConstantFP might fail (see e.g. ARM
// backend)... we should handle this gracefully after Legalize.
// TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) ||
TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) ||
TLI.isFPImmLegal(Recip, VT)))
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getConstantFP(Recip, DL, VT));
}
// If this FDIV is part of a reciprocal square root, it may be folded
// into a target-specific square root estimate instruction.
if (N1.getOpcode() == ISD::FSQRT) {
if (SDValue RV = BuildRsqrtEstimate(N1.getOperand(0))) {
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
} else if (N1.getOpcode() == ISD::FP_EXTEND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT) {
if (SDValue RV = BuildRsqrtEstimate(N1.getOperand(0).getOperand(0))) {
RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
} else if (N1.getOpcode() == ISD::FP_ROUND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT) {
if (SDValue RV = BuildRsqrtEstimate(N1.getOperand(0).getOperand(0))) {
RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
} else if (N1.getOpcode() == ISD::FMUL) {
// Look through an FMUL. Even though this won't remove the FDIV directly,
// it's still worthwhile to get rid of the FSQRT if possible.
SDValue SqrtOp;
SDValue OtherOp;
if (N1.getOperand(0).getOpcode() == ISD::FSQRT) {
SqrtOp = N1.getOperand(0);
OtherOp = N1.getOperand(1);
} else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) {
SqrtOp = N1.getOperand(1);
OtherOp = N1.getOperand(0);
}
if (SqrtOp.getNode()) {
// We found a FSQRT, so try to make this fold:
// x / (y * sqrt(z)) -> x * (rsqrt(z) / y)
if (SDValue RV = BuildRsqrtEstimate(SqrtOp.getOperand(0))) {
RV = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, RV, OtherOp);
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
}
}
// Fold into a reciprocal estimate and multiply instead of a real divide.
if (SDValue RV = BuildReciprocalEstimate(N1)) {
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
}
// (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) {
if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
return DAG.getNode(ISD::FDIV, SDLoc(N), VT,
GetNegatedExpression(N0, DAG, LegalOperations),
GetNegatedExpression(N1, DAG, LegalOperations));
}
}
// Combine multiple FDIVs with the same divisor into multiple FMULs by the
// reciprocal.
// E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip)
// Notice that this is not always beneficial. One reason is different target
// may have different costs for FDIV and FMUL, so sometimes the cost of two
// FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason
// is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
if (Options.UnsafeFPMath) {
// Skip if current node is a reciprocal.
if (N0CFP && N0CFP->isExactlyValue(1.0))
return SDValue();
// Find all FDIV users of the same divisor.
// Use a set because duplicates may be present in the user list.
SetVector<SDNode *> Users;
for (auto *U : N1->uses())
if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1)
Users.insert(U);
if (TLI.combineRepeatedFPDivisors(Users.size())) {
SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
// FIXME: This optimization requires some level of fast-math, so the
// created reciprocal node should at least have the 'allowReciprocal'
// fast-math-flag set.
SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1);
// Dividend / Divisor -> Dividend * Reciprocal
for (auto *U : Users) {
SDValue Dividend = U->getOperand(0);
if (Dividend != FPOne) {
SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend,
Reciprocal);
CombineTo(U, NewNode);
} else if (U != Reciprocal.getNode()) {
// In the absence of fast-math-flags, this user node is always the
// same node as Reciprocal, but with FMF they may be different nodes.
CombineTo(U, Reciprocal);
}
}
return SDValue(N, 0); // N was replaced.
}
}
return SDValue();
}
SDValue DAGCombiner::visitFREM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
// fold (frem c1, c2) -> fmod(c1,c2)
if (N0CFP && N1CFP)
return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1);
return SDValue();
}
SDValue DAGCombiner::visitFSQRT(SDNode *N) {
if (!DAG.getTarget().Options.UnsafeFPMath || TLI.isFsqrtCheap())
return SDValue();
// Compute this as X * (1/sqrt(X)) = X * (X ** -0.5)
SDValue RV = BuildRsqrtEstimate(N->getOperand(0));
if (!RV)
return SDValue();
EVT VT = RV.getValueType();
SDLoc DL(N);
RV = DAG.getNode(ISD::FMUL, DL, VT, N->getOperand(0), RV);
AddToWorklist(RV.getNode());
// Unfortunately, RV is now NaN if the input was exactly 0.
// Select out this case and force the answer to 0.
SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
EVT CCVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
SDValue ZeroCmp = DAG.getSetCC(DL, CCVT, N->getOperand(0), Zero, ISD::SETEQ);
AddToWorklist(ZeroCmp.getNode());
AddToWorklist(RV.getNode());
return DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
ZeroCmp, Zero, RV);
}
SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
if (N0CFP && N1CFP) // Constant fold
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1);
if (N1CFP) {
const APFloat& V = N1CFP->getValueAPF();
// copysign(x, c1) -> fabs(x) iff ispos(c1)
// copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
if (!V.isNegative()) {
if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
} else {
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, SDLoc(N), VT,
DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0));
}
}
// copysign(fabs(x), y) -> copysign(x, y)
// copysign(fneg(x), y) -> copysign(x, y)
// copysign(copysign(x,z), y) -> copysign(x, y)
if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
N0.getOpcode() == ISD::FCOPYSIGN)
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
N0.getOperand(0), N1);
// copysign(x, abs(y)) -> abs(x)
if (N1.getOpcode() == ISD::FABS)
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
// copysign(x, copysign(y,z)) -> copysign(x, z)
if (N1.getOpcode() == ISD::FCOPYSIGN)
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
N0, N1.getOperand(1));
// copysign(x, fp_extend(y)) -> copysign(x, y)
// copysign(x, fp_round(y)) -> copysign(x, y)
if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND)
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
N0, N1.getOperand(0));
return SDValue();
}
SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT OpVT = N0.getValueType();
// fold (sint_to_fp c1) -> c1fp
if (isConstantIntBuildVectorOrConstantInt(N0) &&
// ...but only if the target supports immediate floating-point values
(!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
// If the input is a legal type, and SINT_TO_FP is not legal on this target,
// but UINT_TO_FP is legal on this target, try to convert.
if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) &&
TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) {
// If the sign bit is known to be zero, we can change this to UINT_TO_FP.
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
}
// The next optimizations are desirable only if SELECT_CC can be lowered.
if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
// fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
!VT.isVector() &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
SDLoc DL(N);
SDValue Ops[] =
{ N0.getOperand(0), N0.getOperand(1),
DAG.getConstantFP(-1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
N0.getOperand(2) };
return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
}
// fold (sint_to_fp (zext (setcc x, y, cc))) ->
// (select_cc x, y, 1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::ZERO_EXTEND &&
N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
SDLoc DL(N);
SDValue Ops[] =
{ N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1),
DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
N0.getOperand(0).getOperand(2) };
return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
}
}
return SDValue();
}
SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT OpVT = N0.getValueType();
// fold (uint_to_fp c1) -> c1fp
if (isConstantIntBuildVectorOrConstantInt(N0) &&
// ...but only if the target supports immediate floating-point values
(!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
// If the input is a legal type, and UINT_TO_FP is not legal on this target,
// but SINT_TO_FP is legal on this target, try to convert.
if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) &&
TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) {
// If the sign bit is known to be zero, we can change this to SINT_TO_FP.
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
}
// The next optimizations are desirable only if SELECT_CC can be lowered.
if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
// fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
(!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
SDLoc DL(N);
SDValue Ops[] =
{ N0.getOperand(0), N0.getOperand(1),
DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
N0.getOperand(2) };
return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
}
}
return SDValue();
}
// Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x
static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP)
return SDValue();
SDValue Src = N0.getOperand(0);
EVT SrcVT = Src.getValueType();
bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP;
bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT;
// We can safely assume the conversion won't overflow the output range,
// because (for example) (uint8_t)18293.f is undefined behavior.
// Since we can assume the conversion won't overflow, our decision as to
// whether the input will fit in the float should depend on the minimum
// of the input range and output range.
// This means this is also safe for a signed input and unsigned output, since
// a negative input would lead to undefined behavior.
unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned;
unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned;
unsigned ActualSize = std::min(InputSize, OutputSize);
const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType());
// We can only fold away the float conversion if the input range can be
// represented exactly in the float range.
if (APFloat::semanticsPrecision(sem) >= ActualSize) {
if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) {
unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND;
return DAG.getNode(ExtOp, SDLoc(N), VT, Src);
}
if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits())
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src);
if (SrcVT == VT)
return Src;
return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Src);
}
return SDValue();
}
SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fp_to_sint c1fp) -> c1
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0);
return FoldIntToFPToInt(N, DAG);
}
SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fp_to_uint c1fp) -> c1
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0);
return FoldIntToFPToInt(N, DAG);
}
SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
EVT VT = N->getValueType(0);
// fold (fp_round c1fp) -> c1fp
if (N0CFP)
return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1);
// fold (fp_round (fp_extend x)) -> x
if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
return N0.getOperand(0);
// fold (fp_round (fp_round x)) -> (fp_round x)
if (N0.getOpcode() == ISD::FP_ROUND) {
const bool NIsTrunc = N->getConstantOperandVal(1) == 1;
const bool N0IsTrunc = N0.getNode()->getConstantOperandVal(1) == 1;
// If the first fp_round isn't a value preserving truncation, it might
// introduce a tie in the second fp_round, that wouldn't occur in the
// single-step fp_round we want to fold to.
// In other words, double rounding isn't the same as rounding.
// Also, this is a value preserving truncation iff both fp_round's are.
if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) {
SDLoc DL(N);
return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0),
DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL));
}
}
// fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
N0.getOperand(0), N1);
AddToWorklist(Tmp.getNode());
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
Tmp, N0.getOperand(1));
}
return SDValue();
}
SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
// fold (fp_round_inreg c1fp) -> c1fp
if (N0CFP && isTypeLegal(EVT)) {
SDLoc DL(N);
SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), DL, EVT);
return DAG.getNode(ISD::FP_EXTEND, DL, VT, Round);
}
return SDValue();
}
SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
if (N->hasOneUse() &&
N->use_begin()->getOpcode() == ISD::FP_ROUND)
return SDValue();
// fold (fp_extend c1fp) -> c1fp
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);
// fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op)
if (N0.getOpcode() == ISD::FP16_TO_FP &&
TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal)
return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0));
// Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
// value of X.
if (N0.getOpcode() == ISD::FP_ROUND
&& N0.getNode()->getConstantOperandVal(1) == 1) {
SDValue In = N0.getOperand(0);
if (In.getValueType() == VT) return In;
if (VT.bitsLT(In.getValueType()))
return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT,
In, N0.getOperand(1));
return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In);
}
// fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
DAG.getNode(ISD::FP_ROUND, SDLoc(N0),
N0.getValueType(), ExtLoad,
DAG.getIntPtrConstant(1, SDLoc(N0))),
ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
return SDValue();
}
SDValue DAGCombiner::visitFCEIL(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fceil c1) -> fceil(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ftrunc c1) -> ftrunc(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ffloor c1) -> ffloor(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);
return SDValue();
}
// FIXME: FNEG and FABS have a lot in common; refactor.
SDValue DAGCombiner::visitFNEG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// Constant fold FNEG.
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0);
if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(),
&DAG.getTarget().Options))
return GetNegatedExpression(N0, DAG, LegalOperations);
// Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading
// constant pool values.
if (!TLI.isFNegFree(VT) &&
N0.getOpcode() == ISD::BITCAST &&
N0.getNode()->hasOneUse()) {
SDValue Int = N0.getOperand(0);
EVT IntVT = Int.getValueType();
if (IntVT.isInteger() && !IntVT.isVector()) {
APInt SignMask;
if (N0.getValueType().isVector()) {
// For a vector, get a mask such as 0x80... per scalar element
// and splat it.
SignMask = APInt::getSignBit(N0.getValueType().getScalarSizeInBits());
SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
} else {
// For a scalar, just generate 0x80...
SignMask = APInt::getSignBit(IntVT.getSizeInBits());
}
SDLoc DL0(N0);
Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int,
DAG.getConstant(SignMask, DL0, IntVT));
AddToWorklist(Int.getNode());
return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Int);
}
}
// (fneg (fmul c, x)) -> (fmul -c, x)
if (N0.getOpcode() == ISD::FMUL &&
(N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) {
ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
if (CFP1) {
APFloat CVal = CFP1->getValueAPF();
CVal.changeSign();
if (Level >= AfterLegalizeDAG &&
(TLI.isFPImmLegal(CVal, N->getValueType(0)) ||
TLI.isOperationLegal(ISD::ConstantFP, N->getValueType(0))))
return DAG.getNode(
ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1)));
}
}
return SDValue();
}
SDValue DAGCombiner::visitFMINNUM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
const ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
const ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
if (N0CFP && N1CFP) {
const APFloat &C0 = N0CFP->getValueAPF();
const APFloat &C1 = N1CFP->getValueAPF();
return DAG.getConstantFP(minnum(C0, C1), SDLoc(N), N->getValueType(0));
}
if (N0CFP) {
EVT VT = N->getValueType(0);
// Canonicalize to constant on RHS.
return DAG.getNode(ISD::FMINNUM, SDLoc(N), VT, N1, N0);
}
return SDValue();
}
SDValue DAGCombiner::visitFMAXNUM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
const ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
const ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
if (N0CFP && N1CFP) {
const APFloat &C0 = N0CFP->getValueAPF();
const APFloat &C1 = N1CFP->getValueAPF();
return DAG.getConstantFP(maxnum(C0, C1), SDLoc(N), N->getValueType(0));
}
if (N0CFP) {
EVT VT = N->getValueType(0);
// Canonicalize to constant on RHS.
return DAG.getNode(ISD::FMAXNUM, SDLoc(N), VT, N1, N0);
}
return SDValue();
}
SDValue DAGCombiner::visitFABS(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (fabs c1) -> fabs(c1)
if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
// fold (fabs (fabs x)) -> (fabs x)
if (N0.getOpcode() == ISD::FABS)
return N->getOperand(0);
// fold (fabs (fneg x)) -> (fabs x)
// fold (fabs (fcopysign x, y)) -> (fabs x)
if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));
// Transform fabs(bitconvert(x)) -> bitconvert(x & ~sign) to avoid loading
// constant pool values.
if (!TLI.isFAbsFree(VT) &&
N0.getOpcode() == ISD::BITCAST &&
N0.getNode()->hasOneUse()) {
SDValue Int = N0.getOperand(0);
EVT IntVT = Int.getValueType();
if (IntVT.isInteger() && !IntVT.isVector()) {
APInt SignMask;
if (N0.getValueType().isVector()) {
// For a vector, get a mask such as 0x7f... per scalar element
// and splat it.
SignMask = ~APInt::getSignBit(N0.getValueType().getScalarSizeInBits());
SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
} else {
// For a scalar, just generate 0x7f...
SignMask = ~APInt::getSignBit(IntVT.getSizeInBits());
}
SDLoc DL(N0);
Int = DAG.getNode(ISD::AND, DL, IntVT, Int,
DAG.getConstant(SignMask, DL, IntVT));
AddToWorklist(Int.getNode());
return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Int);
}
}
return SDValue();
}
SDValue DAGCombiner::visitBRCOND(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
// If N is a constant we could fold this into a fallthrough or unconditional
// branch. However that doesn't happen very often in normal code, because
// Instcombine/SimplifyCFG should have handled the available opportunities.
// If we did this folding here, it would be necessary to update the
// MachineBasicBlock CFG, which is awkward.
// fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
// on the target.
if (N1.getOpcode() == ISD::SETCC &&
TLI.isOperationLegalOrCustom(ISD::BR_CC,
N1.getOperand(0).getValueType())) {
return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
Chain, N1.getOperand(2),
N1.getOperand(0), N1.getOperand(1), N2);
}
if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) ||
((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) &&
(N1.getOperand(0).hasOneUse() &&
N1.getOperand(0).getOpcode() == ISD::SRL))) {
SDNode *Trunc = nullptr;
if (N1.getOpcode() == ISD::TRUNCATE) {
// Look pass the truncate.
Trunc = N1.getNode();
N1 = N1.getOperand(0);
}
// Match this pattern so that we can generate simpler code:
//
// %a = ...
// %b = and i32 %a, 2
// %c = srl i32 %b, 1
// brcond i32 %c ...
//
// into
//
// %a = ...
// %b = and i32 %a, 2
// %c = setcc eq %b, 0
// brcond %c ...
//
// This applies only when the AND constant value has one bit set and the
// SRL constant is equal to the log2 of the AND constant. The back-end is
// smart enough to convert the result into a TEST/JMP sequence.
SDValue Op0 = N1.getOperand(0);
SDValue Op1 = N1.getOperand(1);
if (Op0.getOpcode() == ISD::AND &&
Op1.getOpcode() == ISD::Constant) {
SDValue AndOp1 = Op0.getOperand(1);
if (AndOp1.getOpcode() == ISD::Constant) {
const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();
if (AndConst.isPowerOf2() &&
cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) {
SDLoc DL(N);
SDValue SetCC =
DAG.getSetCC(DL,
getSetCCResultType(Op0.getValueType()),
Op0, DAG.getConstant(0, DL, Op0.getValueType()),
ISD::SETNE);
SDValue NewBRCond = DAG.getNode(ISD::BRCOND, DL,
MVT::Other, Chain, SetCC, N2);
// Don't add the new BRCond into the worklist or else SimplifySelectCC
// will convert it back to (X & C1) >> C2.
CombineTo(N, NewBRCond, false);
// Truncate is dead.
if (Trunc)
deleteAndRecombine(Trunc);
// Replace the uses of SRL with SETCC
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
deleteAndRecombine(N1.getNode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
}
if (Trunc)
// Restore N1 if the above transformation doesn't match.
N1 = N->getOperand(1);
}
// Transform br(xor(x, y)) -> br(x != y)
// Transform br(xor(xor(x,y), 1)) -> br (x == y)
if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) {
SDNode *TheXor = N1.getNode();
SDValue Op0 = TheXor->getOperand(0);
SDValue Op1 = TheXor->getOperand(1);
if (Op0.getOpcode() == Op1.getOpcode()) {
// Avoid missing important xor optimizations.
SDValue Tmp = visitXOR(TheXor);
if (Tmp.getNode()) {
if (Tmp.getNode() != TheXor) {
DEBUG(dbgs() << "\nReplacing.8 ";
TheXor->dump(&DAG);
dbgs() << "\nWith: ";
Tmp.getNode()->dump(&DAG);
dbgs() << '\n');
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N1, Tmp);
deleteAndRecombine(TheXor);
return DAG.getNode(ISD::BRCOND, SDLoc(N),
MVT::Other, Chain, Tmp, N2);
}
// visitXOR has changed XOR's operands or replaced the XOR completely,
// bail out.
return SDValue(N, 0);
}
}
if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
bool Equal = false;
if (isOneConstant(Op0) && Op0.hasOneUse() &&
Op0.getOpcode() == ISD::XOR) {
TheXor = Op0.getNode();
Equal = true;
}
EVT SetCCVT = N1.getValueType();
if (LegalTypes)
SetCCVT = getSetCCResultType(SetCCVT);
SDValue SetCC = DAG.getSetCC(SDLoc(TheXor),
SetCCVT,
Op0, Op1,
Equal ? ISD::SETEQ : ISD::SETNE);
// Replace the uses of XOR with SETCC
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
deleteAndRecombine(N1.getNode());
return DAG.getNode(ISD::BRCOND, SDLoc(N),
MVT::Other, Chain, SetCC, N2);
}
}
return SDValue();
}
// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
//
SDValue DAGCombiner::visitBR_CC(SDNode *N) {
CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
// If N is a constant we could fold this into a fallthrough or unconditional
// branch. However that doesn't happen very often in normal code, because
// Instcombine/SimplifyCFG should have handled the available opportunities.
// If we did this folding here, it would be necessary to update the
// MachineBasicBlock CFG, which is awkward.
// Use SimplifySetCC to simplify SETCC's.
SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()),
CondLHS, CondRHS, CC->get(), SDLoc(N),
false);
if (Simp.getNode()) AddToWorklist(Simp.getNode());
// fold to a simpler setcc
if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
N->getOperand(0), Simp.getOperand(2),
Simp.getOperand(0), Simp.getOperand(1),
N->getOperand(4));
return SDValue();
}
/// Return true if 'Use' is a load or a store that uses N as its base pointer
/// and that N may be folded in the load / store addressing mode.
static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
SelectionDAG &DAG,
const TargetLowering &TLI) {
EVT VT;
unsigned AS;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
return false;
VT = LD->getMemoryVT();
AS = LD->getAddressSpace();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
return false;
VT = ST->getMemoryVT();
AS = ST->getAddressSpace();
} else
return false;
TargetLowering::AddrMode AM;
if (N->getOpcode() == ISD::ADD) {
ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (Offset)
// [reg +/- imm]
AM.BaseOffs = Offset->getSExtValue();
else
// [reg +/- reg]
AM.Scale = 1;
} else if (N->getOpcode() == ISD::SUB) {
ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (Offset)
// [reg +/- imm]
AM.BaseOffs = -Offset->getSExtValue();
else
// [reg +/- reg]
AM.Scale = 1;
} else
return false;
return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
VT.getTypeForEVT(*DAG.getContext()), AS);
}
/// Try turning a load/store into a pre-indexed load/store when the base
/// pointer is an add or subtract and it has other uses besides the load/store.
/// After the transformation, the new indexed load/store has effectively folded
/// the add/subtract in and all of its other uses are redirected to the
/// new load/store.
bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
if (Level < AfterLegalizeDAG)
return false;
bool isLoad = true;
SDValue Ptr;
EVT VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
if (LD->isIndexed())
return false;
VT = LD->getMemoryVT();
if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
!TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
return false;
Ptr = LD->getBasePtr();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
if (ST->isIndexed())
return false;
VT = ST->getMemoryVT();
if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
!TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
return false;
Ptr = ST->getBasePtr();
isLoad = false;
} else {
return false;
}
// If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
// out. There is no reason to make this a preinc/predec.
if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
Ptr.getNode()->hasOneUse())
return false;
// Ask the target to do addressing mode selection.
SDValue BasePtr;
SDValue Offset;
ISD::MemIndexedMode AM = ISD::UNINDEXED;
if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
return false;
// Backends without true r+i pre-indexed forms may need to pass a
// constant base with a variable offset so that constant coercion
// will work with the patterns in canonical form.
bool Swapped = false;
if (isa<ConstantSDNode>(BasePtr)) {
std::swap(BasePtr, Offset);
Swapped = true;
}
// Don't create a indexed load / store with zero offset.
if (isNullConstant(Offset))
return false;
// Try turning it into a pre-indexed load / store except when:
// 1) The new base ptr is a frame index.
// 2) If N is a store and the new base ptr is either the same as or is a
// predecessor of the value being stored.
// 3) Another use of old base ptr is a predecessor of N. If ptr is folded
// that would create a cycle.
// 4) All uses are load / store ops that use it as old base ptr.
// Check #1. Preinc'ing a frame index would require copying the stack pointer
// (plus the implicit offset) to a register to preinc anyway.
if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
return false;
// Check #2.
if (!isLoad) {
SDValue Val = cast<StoreSDNode>(N)->getValue();
if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode()))
return false;
}
// If the offset is a constant, there may be other adds of constants that
// can be folded with this one. We should do this to avoid having to keep
// a copy of the original base pointer.
SmallVector<SDNode *, 16> OtherUses;
if (isa<ConstantSDNode>(Offset))
for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(),
UE = BasePtr.getNode()->use_end();
UI != UE; ++UI) {
SDUse &Use = UI.getUse();
// Skip the use that is Ptr and uses of other results from BasePtr's
// node (important for nodes that return multiple results).
if (Use.getUser() == Ptr.getNode() || Use != BasePtr)
continue;
if (Use.getUser()->isPredecessorOf(N))
continue;
if (Use.getUser()->getOpcode() != ISD::ADD &&
Use.getUser()->getOpcode() != ISD::SUB) {
OtherUses.clear();
break;
}
SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1);
if (!isa<ConstantSDNode>(Op1)) {
OtherUses.clear();
break;
}
// FIXME: In some cases, we can be smarter about this.
if (Op1.getValueType() != Offset.getValueType()) {
OtherUses.clear();
break;
}
OtherUses.push_back(Use.getUser());
}
if (Swapped)
std::swap(BasePtr, Offset);
// Now check for #3 and #4.
bool RealUse = false;
// Caches for hasPredecessorHelper
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 16> Worklist;
for (SDNode *Use : Ptr.getNode()->uses()) {
if (Use == N)
continue;
if (N->hasPredecessorHelper(Use, Visited, Worklist))
return false;
// If Ptr may be folded in addressing mode of other use, then it's
// not profitable to do this transformation.
if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI))
RealUse = true;
}
if (!RealUse)
return false;
SDValue Result;
if (isLoad)
Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM);
else
Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM);
++PreIndexedNodes;
++NodesCombined;
DEBUG(dbgs() << "\nReplacing.4 ";
N->dump(&DAG);
dbgs() << "\nWith: ";
Result.getNode()->dump(&DAG);
dbgs() << '\n');
WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
} else {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
}
// Finally, since the node is now dead, remove it from the graph.
deleteAndRecombine(N);
if (Swapped)
std::swap(BasePtr, Offset);
// Replace other uses of BasePtr that can be updated to use Ptr
for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) {
unsigned OffsetIdx = 1;
if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode())
OffsetIdx = 0;
assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() ==
BasePtr.getNode() && "Expected BasePtr operand");
// We need to replace ptr0 in the following expression:
// x0 * offset0 + y0 * ptr0 = t0
// knowing that
// x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
//
// where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
// indexed load/store and the expresion that needs to be re-written.
//
// Therefore, we have:
// t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1
ConstantSDNode *CN =
cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
int X0, X1, Y0, Y1;
APInt Offset0 = CN->getAPIntValue();
APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();
X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;
unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD;
APInt CNV = Offset0;
if (X0 < 0) CNV = -CNV;
if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1;
else CNV = CNV - Offset1;
SDLoc DL(OtherUses[i]);
// We can now generate the new expression.
SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0));
SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0);
SDValue NewUse = DAG.getNode(Opcode,
DL,
OtherUses[i]->getValueType(0), NewOp1, NewOp2);
DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse);
deleteAndRecombine(OtherUses[i]);
}
// Replace the uses of Ptr with uses of the updated base value.
DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0));
deleteAndRecombine(Ptr.getNode());
return true;
}
/// Try to combine a load/store with a add/sub of the base pointer node into a
/// post-indexed load/store. The transformation folded the add/subtract into the
/// new indexed load/store effectively and all of its uses are redirected to the
/// new load/store.
bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
if (Level < AfterLegalizeDAG)
return false;
bool isLoad = true;
SDValue Ptr;
EVT VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
if (LD->isIndexed())
return false;
VT = LD->getMemoryVT();
if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
!TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
return false;
Ptr = LD->getBasePtr();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
if (ST->isIndexed())
return false;
VT = ST->getMemoryVT();
if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
!TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
return false;
Ptr = ST->getBasePtr();
isLoad = false;
} else {
return false;
}
if (Ptr.getNode()->hasOneUse())
return false;
for (SDNode *Op : Ptr.getNode()->uses()) {
if (Op == N ||
(Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
continue;
SDValue BasePtr;
SDValue Offset;
ISD::MemIndexedMode AM = ISD::UNINDEXED;
if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
// Don't create a indexed load / store with zero offset.
if (isNullConstant(Offset))
continue;
// Try turning it into a post-indexed load / store except when
// 1) All uses are load / store ops that use it as base ptr (and
// it may be folded as addressing mmode).
// 2) Op must be independent of N, i.e. Op is neither a predecessor
// nor a successor of N. Otherwise, if Op is folded that would
// create a cycle.
if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
continue;
// Check for #1.
bool TryNext = false;
for (SDNode *Use : BasePtr.getNode()->uses()) {
if (Use == Ptr.getNode())
continue;
// If all the uses are load / store addresses, then don't do the
// transformation.
if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
bool RealUse = false;
for (SDNode *UseUse : Use->uses()) {
if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
RealUse = true;
}
if (!RealUse) {
TryNext = true;
break;
}
}
}
if (TryNext)
continue;
// Check for #2
if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) {
SDValue Result = isLoad
? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM)
: DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
BasePtr, Offset, AM);
++PostIndexedNodes;
++NodesCombined;
DEBUG(dbgs() << "\nReplacing.5 ";
N->dump(&DAG);
dbgs() << "\nWith: ";
Result.getNode()->dump(&DAG);
dbgs() << '\n');
WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
} else {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
}
// Finally, since the node is now dead, remove it from the graph.
deleteAndRecombine(N);
// Replace the uses of Use with uses of the updated base value.
DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
Result.getValue(isLoad ? 1 : 0));
deleteAndRecombine(Op);
return true;
}
}
}
return false;
}
/// \brief Return the base-pointer arithmetic from an indexed \p LD.
SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {
ISD::MemIndexedMode AM = LD->getAddressingMode();
assert(AM != ISD::UNINDEXED);
SDValue BP = LD->getOperand(1);
SDValue Inc = LD->getOperand(2);
// Some backends use TargetConstants for load offsets, but don't expect
// TargetConstants in general ADD nodes. We can convert these constants into
// regular Constants (if the constant is not opaque).
assert((Inc.getOpcode() != ISD::TargetConstant ||
!cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants");
if (Inc.getOpcode() == ISD::TargetConstant) {
ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc);
Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc),
ConstInc->getValueType(0));
}
unsigned Opc =
(AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB);
return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc);
}
SDValue DAGCombiner::visitLOAD(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
// If load is not volatile and there are no uses of the loaded value (and
// the updated indexed value in case of indexed loads), change uses of the
// chain value into uses of the chain input (i.e. delete the dead load).
if (!LD->isVolatile()) {
if (N->getValueType(1) == MVT::Other) {
// Unindexed loads.
if (!N->hasAnyUseOfValue(0)) {
// It's not safe to use the two value CombineTo variant here. e.g.
// v1, chain2 = load chain1, loc
// v2, chain3 = load chain2, loc
// v3 = add v2, c
// Now we replace use of chain2 with chain1. This makes the second load
// isomorphic to the one we are deleting, and thus makes this load live.
DEBUG(dbgs() << "\nReplacing.6 ";
N->dump(&DAG);
dbgs() << "\nWith chain: ";
Chain.getNode()->dump(&DAG);
dbgs() << "\n");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
if (N->use_empty())
deleteAndRecombine(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
} else {
// Indexed loads.
assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
// If this load has an opaque TargetConstant offset, then we cannot split
// the indexing into an add/sub directly (that TargetConstant may not be
// valid for a different type of node, and we cannot convert an opaque
// target constant into a regular constant).
bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant &&
cast<ConstantSDNode>(LD->getOperand(2))->isOpaque();
if (!N->hasAnyUseOfValue(0) &&
((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) {
SDValue Undef = DAG.getUNDEF(N->getValueType(0));
SDValue Index;
if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) {
Index = SplitIndexingFromLoad(LD);
// Try to fold the base pointer arithmetic into subsequent loads and
// stores.
AddUsersToWorklist(N);
} else
Index = DAG.getUNDEF(N->getValueType(1));
DEBUG(dbgs() << "\nReplacing.7 ";
N->dump(&DAG);
dbgs() << "\nWith: ";
Undef.getNode()->dump(&DAG);
dbgs() << " and 2 other values\n");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
deleteAndRecombine(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
}
// If this load is directly stored, replace the load value with the stored
// value.
// TODO: Handle store large -> read small portion.
// TODO: Handle TRUNCSTORE/LOADEXT
if (ISD::isNormalLoad(N) && !LD->isVolatile()) {
if (ISD::isNON_TRUNCStore(Chain.getNode())) {
StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
if (PrevST->getBasePtr() == Ptr &&
PrevST->getValue().getValueType() == N->getValueType(0))
return CombineTo(N, Chain.getOperand(1), Chain);
}
}
// Try to infer better alignment information than the load already has.
if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
if (Align > LD->getMemOperand()->getBaseAlignment()) {
SDValue NewLoad =
DAG.getExtLoad(LD->getExtensionType(), SDLoc(N),
LD->getValueType(0),
Chain, Ptr, LD->getPointerInfo(),
LD->getMemoryVT(),
LD->isVolatile(), LD->isNonTemporal(),
LD->isInvariant(), Align, LD->getAAInfo());
if (NewLoad.getNode() != N)
return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true);
}
}
}
bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA
: DAG.getSubtarget().useAA();
#ifndef NDEBUG
if (CombinerAAOnlyFunc.getNumOccurrences() &&
CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
UseAA = false;
#endif
if (UseAA && LD->isUnindexed()) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(N, Chain);
// If there is a better chain.
if (Chain != BetterChain) {
SDValue ReplLoad;
// Replace the chain to void dependency.
if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD),
BetterChain, Ptr, LD->getMemOperand());
} else {
ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD),
LD->getValueType(0),
BetterChain, Ptr, LD->getMemoryVT(),
LD->getMemOperand());
}
// Create token factor to keep old chain connected.
SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
MVT::Other, Chain, ReplLoad.getValue(1));
// Make sure the new and old chains are cleaned up.
AddToWorklist(Token.getNode());
// Replace uses with load result and token factor. Don't add users
// to work list.
return CombineTo(N, ReplLoad.getValue(0), Token, false);
}
}
// Try transforming N to an indexed load.
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
return SDValue(N, 0);
// Try to slice up N to more direct loads if the slices are mapped to
// different register banks or pairing can take place.
if (SliceUpLoad(N))
return SDValue(N, 0);
return SDValue();
}
namespace {
/// \brief Helper structure used to slice a load in smaller loads.
/// Basically a slice is obtained from the following sequence:
/// Origin = load Ty1, Base
/// Shift = srl Ty1 Origin, CstTy Amount
/// Inst = trunc Shift to Ty2
///
/// Then, it will be rewriten into:
/// Slice = load SliceTy, Base + SliceOffset
/// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2
///
/// SliceTy is deduced from the number of bits that are actually used to
/// build Inst.
struct LoadedSlice {
/// \brief Helper structure used to compute the cost of a slice.
struct Cost {
/// Are we optimizing for code size.
bool ForCodeSize;
/// Various cost.
unsigned Loads;
unsigned Truncates;
unsigned CrossRegisterBanksCopies;
unsigned ZExts;
unsigned Shift;
Cost(bool ForCodeSize = false)
: ForCodeSize(ForCodeSize), Loads(0), Truncates(0),
CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {}
/// \brief Get the cost of one isolated slice.
Cost(const LoadedSlice &LS, bool ForCodeSize = false)
: ForCodeSize(ForCodeSize), Loads(1), Truncates(0),
CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {
EVT TruncType = LS.Inst->getValueType(0);
EVT LoadedType = LS.getLoadedType();
if (TruncType != LoadedType &&
!LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType))
ZExts = 1;
}
/// \brief Account for slicing gain in the current cost.
/// Slicing provide a few gains like removing a shift or a
/// truncate. This method allows to grow the cost of the original
/// load with the gain from this slice.
void addSliceGain(const LoadedSlice &LS) {
// Each slice saves a truncate.
const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo();
if (!TLI.isTruncateFree(LS.Inst->getValueType(0),
LS.Inst->getOperand(0).getValueType()))
++Truncates;
// If there is a shift amount, this slice gets rid of it.
if (LS.Shift)
++Shift;
// If this slice can merge a cross register bank copy, account for it.
if (LS.canMergeExpensiveCrossRegisterBankCopy())
++CrossRegisterBanksCopies;
}
Cost &operator+=(const Cost &RHS) {
Loads += RHS.Loads;
Truncates += RHS.Truncates;
CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies;
ZExts += RHS.ZExts;
Shift += RHS.Shift;
return *this;
}
bool operator==(const Cost &RHS) const {
return Loads == RHS.Loads && Truncates == RHS.Truncates &&
CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies &&
ZExts == RHS.ZExts && Shift == RHS.Shift;
}
bool operator!=(const Cost &RHS) const { return !(*this == RHS); }
bool operator<(const Cost &RHS) const {
// Assume cross register banks copies are as expensive as loads.
// FIXME: Do we want some more target hooks?
unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies;
unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies;
// Unless we are optimizing for code size, consider the
// expensive operation first.
if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS)
return ExpensiveOpsLHS < ExpensiveOpsRHS;
return (Truncates + ZExts + Shift + ExpensiveOpsLHS) <
(RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS);
}
bool operator>(const Cost &RHS) const { return RHS < *this; }
bool operator<=(const Cost &RHS) const { return !(RHS < *this); }
bool operator>=(const Cost &RHS) const { return !(*this < RHS); }
};
// The last instruction that represent the slice. This should be a
// truncate instruction.
SDNode *Inst;
// The original load instruction.
LoadSDNode *Origin;
// The right shift amount in bits from the original load.
unsigned Shift;
// The DAG from which Origin came from.
// This is used to get some contextual information about legal types, etc.
SelectionDAG *DAG;
LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr,
unsigned Shift = 0, SelectionDAG *DAG = nullptr)
: Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}
/// \brief Get the bits used in a chunk of bits \p BitWidth large.
/// \return Result is \p BitWidth and has used bits set to 1 and
/// not used bits set to 0.
APInt getUsedBits() const {
// Reproduce the trunc(lshr) sequence:
// - Start from the truncated value.
// - Zero extend to the desired bit width.
// - Shift left.
assert(Origin && "No original load to compare against.");
unsigned BitWidth = Origin->getValueSizeInBits(0);
assert(Inst && "This slice is not bound to an instruction");
assert(Inst->getValueSizeInBits(0) <= BitWidth &&
"Extracted slice is bigger than the whole type!");
APInt UsedBits(Inst->getValueSizeInBits(0), 0);
UsedBits.setAllBits();
UsedBits = UsedBits.zext(BitWidth);
UsedBits <<= Shift;
return UsedBits;
}
/// \brief Get the size of the slice to be loaded in bytes.
unsigned getLoadedSize() const {
unsigned SliceSize = getUsedBits().countPopulation();
assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte.");
return SliceSize / 8;
}
/// \brief Get the type that will be loaded for this slice.
/// Note: This may not be the final type for the slice.
EVT getLoadedType() const {
assert(DAG && "Missing context");
LLVMContext &Ctxt = *DAG->getContext();
return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8);
}
/// \brief Get the alignment of the load used for this slice.
unsigned getAlignment() const {
unsigned Alignment = Origin->getAlignment();
unsigned Offset = getOffsetFromBase();
if (Offset != 0)
Alignment = MinAlign(Alignment, Alignment + Offset);
return Alignment;
}
/// \brief Check if this slice can be rewritten with legal operations.
bool isLegal() const {
// An invalid slice is not legal.
if (!Origin || !Inst || !DAG)
return false;
// Offsets are for indexed load only, we do not handle that.
if (Origin->getOffset().getOpcode() != ISD::UNDEF)
return false;
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
// Check that the type is legal.
EVT SliceType = getLoadedType();
if (!TLI.isTypeLegal(SliceType))
return false;
// Check that the load is legal for this type.
if (!TLI.isOperationLegal(ISD::LOAD, SliceType))
return false;
// Check that the offset can be computed.
// 1. Check its type.
EVT PtrType = Origin->getBasePtr().getValueType();
if (PtrType == MVT::Untyped || PtrType.isExtended())
return false;
// 2. Check that it fits in the immediate.
if (!TLI.isLegalAddImmediate(getOffsetFromBase()))
return false;
// 3. Check that the computation is legal.
if (!TLI.isOperationLegal(ISD::ADD, PtrType))
return false;
// Check that the zext is legal if it needs one.
EVT TruncateType = Inst->getValueType(0);
if (TruncateType != SliceType &&
!TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType))
return false;
return true;
}
/// \brief Get the offset in bytes of this slice in the original chunk of
/// bits.
/// \pre DAG != nullptr.
uint64_t getOffsetFromBase() const {
assert(DAG && "Missing context.");
bool IsBigEndian = DAG->getDataLayout().isBigEndian();
assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.");
uint64_t Offset = Shift / 8;
unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
assert(!(Origin->getValueSizeInBits(0) & 0x7) &&
"The size of the original loaded type is not a multiple of a"
" byte.");
// If Offset is bigger than TySizeInBytes, it means we are loading all
// zeros. This should have been optimized before in the process.
assert(TySizeInBytes > Offset &&
"Invalid shift amount for given loaded size");
if (IsBigEndian)
Offset = TySizeInBytes - Offset - getLoadedSize();
return Offset;
}
/// \brief Generate the sequence of instructions to load the slice
/// represented by this object and redirect the uses of this slice to
/// this new sequence of instructions.
/// \pre this->Inst && this->Origin are valid Instructions and this
/// object passed the legal check: LoadedSlice::isLegal returned true.
/// \return The last instruction of the sequence used to load the slice.
SDValue loadSlice() const {
assert(Inst && Origin && "Unable to replace a non-existing slice.");
const SDValue &OldBaseAddr = Origin->getBasePtr();
SDValue BaseAddr = OldBaseAddr;
// Get the offset in that chunk of bytes w.r.t. the endianess.
int64_t Offset = static_cast<int64_t>(getOffsetFromBase());
assert(Offset >= 0 && "Offset too big to fit in int64_t!");
if (Offset) {
// BaseAddr = BaseAddr + Offset.
EVT ArithType = BaseAddr.getValueType();
SDLoc DL(Origin);
BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr,
DAG->getConstant(Offset, DL, ArithType));
}
// Create the type of the loaded slice according to its size.
EVT SliceType = getLoadedType();
// Create the load for the slice.
SDValue LastInst = DAG->getLoad(
SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr,
Origin->getPointerInfo().getWithOffset(Offset), Origin->isVolatile(),
Origin->isNonTemporal(), Origin->isInvariant(), getAlignment());
// If the final type is not the same as the loaded type, this means that
// we have to pad with zero. Create a zero extend for that.
EVT FinalType = Inst->getValueType(0);
if (SliceType != FinalType)
LastInst =
DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst);
return LastInst;
}
/// \brief Check if this slice can be merged with an expensive cross register
/// bank copy. E.g.,
/// i = load i32
/// f = bitcast i32 i to float
bool canMergeExpensiveCrossRegisterBankCopy() const {
if (!Inst || !Inst->hasOneUse())
return false;
SDNode *Use = *Inst->use_begin();
if (Use->getOpcode() != ISD::BITCAST)
return false;
assert(DAG && "Missing context");
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
EVT ResVT = Use->getValueType(0);
const TargetRegisterClass *ResRC = TLI.getRegClassFor(ResVT.getSimpleVT());
const TargetRegisterClass *ArgRC =
TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT());
if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT))
return false;
// At this point, we know that we perform a cross-register-bank copy.
// Check if it is expensive.
const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo();
// Assume bitcasts are cheap, unless both register classes do not
// explicitly share a common sub class.
if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC))
return false;
// Check if it will be merged with the load.
// 1. Check the alignment constraint.
unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment(
ResVT.getTypeForEVT(*DAG->getContext()));
if (RequiredAlignment > getAlignment())
return false;
// 2. Check that the load is a legal operation for that type.
if (!TLI.isOperationLegal(ISD::LOAD, ResVT))
return false;
// 3. Check that we do not have a zext in the way.
if (Inst->getValueType(0) != getLoadedType())
return false;
return true;
}
};
}
/// \brief Check that all bits set in \p UsedBits form a dense region, i.e.,
/// \p UsedBits looks like 0..0 1..1 0..0.
static bool areUsedBitsDense(const APInt &UsedBits) {
// If all the bits are one, this is dense!
if (UsedBits.isAllOnesValue())
return true;
// Get rid of the unused bits on the right.
APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros());
// Get rid of the unused bits on the left.
if (NarrowedUsedBits.countLeadingZeros())
NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits());
// Check that the chunk of bits is completely used.
return NarrowedUsedBits.isAllOnesValue();
}
/// \brief Check whether or not \p First and \p Second are next to each other
/// in memory. This means that there is no hole between the bits loaded
/// by \p First and the bits loaded by \p Second.
static bool areSlicesNextToEachOther(const LoadedSlice &First,
const LoadedSlice &Second) {
assert(First.Origin == Second.Origin && First.Origin &&
"Unable to match different memory origins.");
APInt UsedBits = First.getUsedBits();
assert((UsedBits & Second.getUsedBits()) == 0 &&
"Slices are not supposed to overlap.");
UsedBits |= Second.getUsedBits();
return areUsedBitsDense(UsedBits);
}
/// \brief Adjust the \p GlobalLSCost according to the target
/// paring capabilities and the layout of the slices.
/// \pre \p GlobalLSCost should account for at least as many loads as
/// there is in the slices in \p LoadedSlices.
static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
LoadedSlice::Cost &GlobalLSCost) {
unsigned NumberOfSlices = LoadedSlices.size();
// If there is less than 2 elements, no pairing is possible.
if (NumberOfSlices < 2)
return;
// Sort the slices so that elements that are likely to be next to each
// other in memory are next to each other in the list.
std::sort(LoadedSlices.begin(), LoadedSlices.end(),
[](const LoadedSlice &LHS, const LoadedSlice &RHS) {
assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
});
const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
// First (resp. Second) is the first (resp. Second) potentially candidate
// to be placed in a paired load.
const LoadedSlice *First = nullptr;
const LoadedSlice *Second = nullptr;
for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice,
// Set the beginning of the pair.
First = Second) {
Second = &LoadedSlices[CurrSlice];
// If First is NULL, it means we start a new pair.
// Get to the next slice.
if (!First)
continue;
EVT LoadedType = First->getLoadedType();
// If the types of the slices are different, we cannot pair them.
if (LoadedType != Second->getLoadedType())
continue;
// Check if the target supplies paired loads for this type.
unsigned RequiredAlignment = 0;
if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) {
// move to the next pair, this type is hopeless.
Second = nullptr;
continue;
}
// Check if we meet the alignment requirement.
if (RequiredAlignment > First->getAlignment())
continue;
// Check that both loads are next to each other in memory.
if (!areSlicesNextToEachOther(*First, *Second))
continue;
assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!");
--GlobalLSCost.Loads;
// Move to the next pair.
Second = nullptr;
}
}
/// \brief Check the profitability of all involved LoadedSlice.
/// Currently, it is considered profitable if there is exactly two
/// involved slices (1) which are (2) next to each other in memory, and
/// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3).
///
/// Note: The order of the elements in \p LoadedSlices may be modified, but not
/// the elements themselves.
///
/// FIXME: When the cost model will be mature enough, we can relax
/// constraints (1) and (2).
static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices,
const APInt &UsedBits, bool ForCodeSize) {
unsigned NumberOfSlices = LoadedSlices.size();
if (StressLoadSlicing)
return NumberOfSlices > 1;
// Check (1).
if (NumberOfSlices != 2)
return false;
// Check (2).
if (!areUsedBitsDense(UsedBits))
return false;
// Check (3).
LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize);
// The original code has one big load.
OrigCost.Loads = 1;
for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) {
const LoadedSlice &LS = LoadedSlices[CurrSlice];
// Accumulate the cost of all the slices.
LoadedSlice::Cost SliceCost(LS, ForCodeSize);
GlobalSlicingCost += SliceCost;
// Account as cost in the original configuration the gain obtained
// with the current slices.
OrigCost.addSliceGain(LS);
}
// If the target supports paired load, adjust the cost accordingly.
adjustCostForPairing(LoadedSlices, GlobalSlicingCost);
return OrigCost > GlobalSlicingCost;
}
/// \brief If the given load, \p LI, is used only by trunc or trunc(lshr)
/// operations, split it in the various pieces being extracted.
///
/// This sort of thing is introduced by SROA.
/// This slicing takes care not to insert overlapping loads.
/// \pre LI is a simple load (i.e., not an atomic or volatile load).
bool DAGCombiner::SliceUpLoad(SDNode *N) {
if (Level < AfterLegalizeDAG)
return false;
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->isVolatile() || !ISD::isNormalLoad(LD) ||
!LD->getValueType(0).isInteger())
return false;
// Keep track of already used bits to detect overlapping values.
// In that case, we will just abort the transformation.
APInt UsedBits(LD->getValueSizeInBits(0), 0);
SmallVector<LoadedSlice, 4> LoadedSlices;
// Check if this load is used as several smaller chunks of bits.
// Basically, look for uses in trunc or trunc(lshr) and record a new chain
// of computation for each trunc.
for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end();
UI != UIEnd; ++UI) {
// Skip the uses of the chain.
if (UI.getUse().getResNo() != 0)
continue;
SDNode *User = *UI;
unsigned Shift = 0;
// Check if this is a trunc(lshr).
if (User->getOpcode() == ISD::SRL && User->hasOneUse() &&
isa<ConstantSDNode>(User->getOperand(1))) {
Shift = cast<ConstantSDNode>(User->getOperand(1))->getZExtValue();
User = *User->use_begin();
}
// At this point, User is a Truncate, iff we encountered, trunc or
// trunc(lshr).
if (User->getOpcode() != ISD::TRUNCATE)
return false;
// The width of the type must be a power of 2 and greater than 8-bits.
// Otherwise the load cannot be represented in LLVM IR.
// Moreover, if we shifted with a non-8-bits multiple, the slice
// will be across several bytes. We do not support that.
unsigned Width = User->getValueSizeInBits(0);
if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
return 0;
// Build the slice for this chain of computations.
LoadedSlice LS(User, LD, Shift, &DAG);
APInt CurrentUsedBits = LS.getUsedBits();
// Check if this slice overlaps with another.
if ((CurrentUsedBits & UsedBits) != 0)
return false;
// Update the bits used globally.
UsedBits |= CurrentUsedBits;
// Check if the new slice would be legal.
if (!LS.isLegal())
return false;
// Record the slice.
LoadedSlices.push_back(LS);
}
// Abort slicing if it does not seem to be profitable.
if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize))
return false;
++SlicedLoads;
// Rewrite each chain to use an independent load.
// By construction, each chain can be represented by a unique load.
// Prepare the argument for the new token factor for all the slices.
SmallVector<SDValue, 8> ArgChains;
for (SmallVectorImpl<LoadedSlice>::const_iterator
LSIt = LoadedSlices.begin(),
LSItEnd = LoadedSlices.end();
LSIt != LSItEnd; ++LSIt) {
SDValue SliceInst = LSIt->loadSlice();
CombineTo(LSIt->Inst, SliceInst, true);
if (SliceInst.getNode()->getOpcode() != ISD::LOAD)
SliceInst = SliceInst.getOperand(0);
assert(SliceInst->getOpcode() == ISD::LOAD &&
"It takes more than a zext to get to the loaded slice!!");
ArgChains.push_back(SliceInst.getValue(1));
}
SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
ArgChains);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
return true;
}
/// Check to see if V is (and load (ptr), imm), where the load is having
/// specific bytes cleared out. If so, return the byte size being masked out
/// and the shift amount.
static std::pair<unsigned, unsigned>
CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
std::pair<unsigned, unsigned> Result(0, 0);
// Check for the structure we're looking for.
if (V->getOpcode() != ISD::AND ||
!isa<ConstantSDNode>(V->getOperand(1)) ||
!ISD::isNormalLoad(V->getOperand(0).getNode()))
return Result;
// Check the chain and pointer.
LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
// The store should be chained directly to the load or be an operand of a
// tokenfactor.
if (LD == Chain.getNode())
; // ok.
else if (Chain->getOpcode() != ISD::TokenFactor)
return Result; // Fail.
else {
bool isOk = false;
for (const SDValue &ChainOp : Chain->op_values())
if (ChainOp.getNode() == LD) {
isOk = true;
break;
}
if (!isOk) return Result;
}
// This only handles simple types.
if (V.getValueType() != MVT::i16 &&
V.getValueType() != MVT::i32 &&
V.getValueType() != MVT::i64)
return Result;
// Check the constant mask. Invert it so that the bits being masked out are
// 0 and the bits being kept are 1. Use getSExtValue so that leading bits
// follow the sign bit for uniformity.
uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
unsigned NotMaskLZ = countLeadingZeros(NotMask);
if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
unsigned NotMaskTZ = countTrailingZeros(NotMask);
if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
if (NotMaskLZ == 64) return Result; // All zero mask.
// See if we have a continuous run of bits. If so, we have 0*1+0*
if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64)
return Result;
// Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
if (V.getValueType() != MVT::i64 && NotMaskLZ)
NotMaskLZ -= 64-V.getValueSizeInBits();
unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
switch (MaskedBytes) {
case 1:
case 2:
case 4: break;
default: return Result; // All one mask, or 5-byte mask.
}
// Verify that the first bit starts at a multiple of mask so that the access
// is aligned the same as the access width.
if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
Result.first = MaskedBytes;
Result.second = NotMaskTZ/8;
return Result;
}
/// Check to see if IVal is something that provides a value as specified by
/// MaskInfo. If so, replace the specified store with a narrower store of
/// truncated IVal.
static SDNode *
ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
SDValue IVal, StoreSDNode *St,
DAGCombiner *DC) {
unsigned NumBytes = MaskInfo.first;
unsigned ByteShift = MaskInfo.second;
SelectionDAG &DAG = DC->getDAG();
// Check to see if IVal is all zeros in the part being masked in by the 'or'
// that uses this. If not, this is not a replacement.
APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
ByteShift*8, (ByteShift+NumBytes)*8);
if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr;
// Check that it is legal on the target to do this. It is legal if the new
// VT we're shrinking to (i8/i16/i32) is legal or we're still before type
// legalization.
MVT VT = MVT::getIntegerVT(NumBytes*8);
if (!DC->isTypeLegal(VT))
return nullptr;
// Okay, we can do this! Replace the 'St' store with a store of IVal that is
// shifted by ByteShift and truncated down to NumBytes.
if (ByteShift) {
SDLoc DL(IVal);
IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal,
DAG.getConstant(ByteShift*8, DL,
DC->getShiftAmountTy(IVal.getValueType())));
}
// Figure out the offset for the store and the alignment of the access.
unsigned StOffset;
unsigned NewAlign = St->getAlignment();
if (DAG.getDataLayout().isLittleEndian())
StOffset = ByteShift;
else
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
SDValue Ptr = St->getBasePtr();
if (StOffset) {
SDLoc DL(IVal);
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(),
Ptr, DAG.getConstant(StOffset, DL, Ptr.getValueType()));
NewAlign = MinAlign(NewAlign, StOffset);
}
// Truncate down to the new size.
IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal);
++OpsNarrowed;
return DAG.getStore(St->getChain(), SDLoc(St), IVal, Ptr,
St->getPointerInfo().getWithOffset(StOffset),
false, false, NewAlign).getNode();
}
/// Look for sequence of load / op / store where op is one of 'or', 'xor', and
/// 'and' of immediates. If 'op' is only touching some of the loaded bits, try
/// narrowing the load and store if it would end up being a win for performance
/// or code size.
SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
if (ST->isVolatile())
return SDValue();
SDValue Chain = ST->getChain();
SDValue Value = ST->getValue();
SDValue Ptr = ST->getBasePtr();
EVT VT = Value.getValueType();
if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
return SDValue();
unsigned Opc = Value.getOpcode();
// If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
// is a byte mask indicating a consecutive number of bytes, check to see if
// Y is known to provide just those bytes. If so, we try to replace the
// load + replace + store sequence with a single (narrower) store, which makes
// the load dead.
if (Opc == ISD::OR) {
std::pair<unsigned, unsigned> MaskedLoad;
MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
if (MaskedLoad.first)
if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
Value.getOperand(1), ST,this))
return SDValue(NewST, 0);
// Or is commutative, so try swapping X and Y.
MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
if (MaskedLoad.first)
if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
Value.getOperand(0), ST,this))
return SDValue(NewST, 0);
}
if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
Value.getOperand(1).getOpcode() != ISD::Constant)
return SDValue();
SDValue N0 = Value.getOperand(0);
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
Chain == SDValue(N0.getNode(), 1)) {
LoadSDNode *LD = cast<LoadSDNode>(N0);
if (LD->getBasePtr() != Ptr ||
LD->getPointerInfo().getAddrSpace() !=
ST->getPointerInfo().getAddrSpace())
return SDValue();
// Find the type to narrow it the load / op / store to.
SDValue N1 = Value.getOperand(1);
unsigned BitWidth = N1.getValueSizeInBits();
APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
if (Opc == ISD::AND)
Imm ^= APInt::getAllOnesValue(BitWidth);
if (Imm == 0 || Imm.isAllOnesValue())
return SDValue();
unsigned ShAmt = Imm.countTrailingZeros();
unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
unsigned NewBW = NextPowerOf2(MSB - ShAmt);
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
// The narrowing should be profitable, the load/store operation should be
// legal (or custom) and the store size should be equal to the NewVT width.
while (NewBW < BitWidth &&
(NewVT.getStoreSizeInBits() != NewBW ||
!TLI.isOperationLegalOrCustom(Opc, NewVT) ||
!TLI.isNarrowingProfitable(VT, NewVT))) {
NewBW = NextPowerOf2(NewBW);
NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
}
if (NewBW >= BitWidth)
return SDValue();
// If the lsb changed does not start at the type bitwidth boundary,
// start at the previous one.
if (ShAmt % NewBW)
ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
APInt Mask = APInt::getBitsSet(BitWidth, ShAmt,
std::min(BitWidth, ShAmt + NewBW));
if ((Imm & Mask) == Imm) {
APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
if (Opc == ISD::AND)
NewImm ^= APInt::getAllOnesValue(NewBW);
uint64_t PtrOff = ShAmt / 8;
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
if (DAG.getDataLayout().isBigEndian())
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
Ptr.getValueType(), Ptr,
DAG.getConstant(PtrOff, SDLoc(LD),
Ptr.getValueType()));
SDValue NewLD = DAG.getLoad(NewVT, SDLoc(N0),
LD->getChain(), NewPtr,
LD->getPointerInfo().getWithOffset(PtrOff),
LD->isVolatile(), LD->isNonTemporal(),
LD->isInvariant(), NewAlign,
LD->getAAInfo());
SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD,
DAG.getConstant(NewImm, SDLoc(Value),
NewVT));
SDValue NewST = DAG.getStore(Chain, SDLoc(N),
NewVal, NewPtr,
ST->getPointerInfo().getWithOffset(PtrOff),
false, false, NewAlign);
AddToWorklist(NewPtr.getNode());
AddToWorklist(NewLD.getNode());
AddToWorklist(NewVal.getNode());
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
++OpsNarrowed;
return NewST;
}
}
return SDValue();
}
/// For a given floating point load / store pair, if the load value isn't used
/// by any other operations, then consider transforming the pair to integer
/// load / store operations if the target deems the transformation profitable.
SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Chain = ST->getChain();
SDValue Value = ST->getValue();
if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) &&
Value.hasOneUse() &&
Chain == SDValue(Value.getNode(), 1)) {
LoadSDNode *LD = cast<LoadSDNode>(Value);
EVT VT = LD->getMemoryVT();
if (!VT.isFloatingPoint() ||
VT != ST->getMemoryVT() ||
LD->isNonTemporal() ||
ST->isNonTemporal() ||
LD->getPointerInfo().getAddrSpace() != 0 ||
ST->getPointerInfo().getAddrSpace() != 0)
return SDValue();
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
if (!TLI.isOperationLegal(ISD::LOAD, IntVT) ||
!TLI.isOperationLegal(ISD::STORE, IntVT) ||
!TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) ||
!TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT))
return SDValue();
unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment();
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue();
SDValue NewLD = DAG.getLoad(IntVT, SDLoc(Value),
LD->getChain(), LD->getBasePtr(),
LD->getPointerInfo(),
false, false, false, LDAlign);
SDValue NewST = DAG.getStore(NewLD.getValue(1), SDLoc(N),
NewLD, ST->getBasePtr(),
ST->getPointerInfo(),
false, false, STAlign);
AddToWorklist(NewLD.getNode());
AddToWorklist(NewST.getNode());
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
++LdStFP2Int;
return NewST;
}
return SDValue();
}
namespace {
/// Helper struct to parse and store a memory address as base + index + offset.
/// We ignore sign extensions when it is safe to do so.
/// The following two expressions are not equivalent. To differentiate we need
/// to store whether there was a sign extension involved in the index
/// computation.
/// (load (i64 add (i64 copyfromreg %c)
/// (i64 signextend (add (i8 load %index)
/// (i8 1))))
/// vs
///
/// (load (i64 add (i64 copyfromreg %c)
/// (i64 signextend (i32 add (i32 signextend (i8 load %index))
/// (i32 1)))))
struct BaseIndexOffset {
SDValue Base;
SDValue Index;
int64_t Offset;
bool IsIndexSignExt;
BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {}
BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset,
bool IsIndexSignExt) :
Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {}
bool equalBaseIndex(const BaseIndexOffset &Other) {
return Other.Base == Base && Other.Index == Index &&
Other.IsIndexSignExt == IsIndexSignExt;
}
/// Parses tree in Ptr for base, index, offset addresses.
static BaseIndexOffset match(SDValue Ptr) {
bool IsIndexSignExt = false;
// We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD
// instruction, then it could be just the BASE or everything else we don't
// know how to handle. Just use Ptr as BASE and give up.
if (Ptr->getOpcode() != ISD::ADD)
return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt);
// We know that we have at least an ADD instruction. Try to pattern match
// the simple case of BASE + OFFSET.
if (isa<ConstantSDNode>(Ptr->getOperand(1))) {
int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue();
return BaseIndexOffset(Ptr->getOperand(0), SDValue(), Offset,
IsIndexSignExt);
}
// Inside a loop the current BASE pointer is calculated using an ADD and a
// MUL instruction. In this case Ptr is the actual BASE pointer.
// (i64 add (i64 %array_ptr)
// (i64 mul (i64 %induction_var)
// (i64 %element_size)))
if (Ptr->getOperand(1)->getOpcode() == ISD::MUL)
return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt);
// Look at Base + Index + Offset cases.
SDValue Base = Ptr->getOperand(0);
SDValue IndexOffset = Ptr->getOperand(1);
// Skip signextends.
if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) {
IndexOffset = IndexOffset->getOperand(0);
IsIndexSignExt = true;
}
// Either the case of Base + Index (no offset) or something else.
if (IndexOffset->getOpcode() != ISD::ADD)
return BaseIndexOffset(Base, IndexOffset, 0, IsIndexSignExt);
// Now we have the case of Base + Index + offset.
SDValue Index = IndexOffset->getOperand(0);
SDValue Offset = IndexOffset->getOperand(1);
if (!isa<ConstantSDNode>(Offset))
return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt);
// Ignore signextends.
if (Index->getOpcode() == ISD::SIGN_EXTEND) {
Index = Index->getOperand(0);
IsIndexSignExt = true;
} else IsIndexSignExt = false;
int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue();
return BaseIndexOffset(Base, Index, Off, IsIndexSignExt);
}
};
} // namespace
SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG,
SDLoc SL,
ArrayRef<MemOpLink> Stores,
EVT Ty) const {
SmallVector<SDValue, 8> BuildVector;
for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I)
BuildVector.push_back(cast<StoreSDNode>(Stores[I].MemNode)->getValue());
return DAG.getNode(ISD::BUILD_VECTOR, SL, Ty, BuildVector);
}
bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT,
unsigned NumElem, bool IsConstantSrc, bool UseVector) {
// Make sure we have something to merge.
if (NumElem < 2)
return false;
int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8;
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned LatestNodeUsed = 0;
for (unsigned i=0; i < NumElem; ++i) {
// Find a chain for the new wide-store operand. Notice that some
// of the store nodes that we found may not be selected for inclusion
// in the wide store. The chain we use needs to be the chain of the
// latest store node which is *used* and replaced by the wide store.
if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum)
LatestNodeUsed = i;
}
// The latest Node in the DAG.
LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode;
SDLoc DL(StoreNodes[0].MemNode);
SDValue StoredVal;
if (UseVector) {
// Find a legal type for the vector store.
EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
assert(TLI.isTypeLegal(Ty) && "Illegal vector store");
if (IsConstantSrc) {
StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Ty);
} else {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElem ; ++i) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
SDValue Val = St->getValue();
// All of the operands of a BUILD_VECTOR must have the same type.
if (Val.getValueType() != MemVT)
return false;
Ops.push_back(Val);
}
// Build the extracted vector elements back into a vector.
StoredVal = DAG.getNode(ISD::BUILD_VECTOR, DL, Ty, Ops);
}
} else {
// We should always use a vector store when merging extracted vector
// elements, so this path implies a store of constants.
assert(IsConstantSrc && "Merged vector elements should use vector store");
unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
APInt StoreInt(SizeInBits, 0);
// Construct a single integer constant which is made of the smaller
// constant inputs.
bool IsLE = DAG.getDataLayout().isLittleEndian();
for (unsigned i = 0; i < NumElem ; ++i) {
unsigned Idx = IsLE ? (NumElem - 1 - i) : i;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
SDValue Val = St->getValue();
StoreInt <<= ElementSizeBytes * 8;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
StoreInt |= C->getAPIntValue().zext(SizeInBits);
} else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits);
} else {
llvm_unreachable("Invalid constant element type");
}
}
// Create the new Load and Store operations.
EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
}
SDValue NewStore = DAG.getStore(LatestOp->getChain(), DL, StoredVal,
FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(),
false, false,
FirstInChain->getAlignment());
// Replace the last store with the new store
CombineTo(LatestOp, NewStore);
// Erase all other stores.
for (unsigned i = 0; i < NumElem ; ++i) {
if (StoreNodes[i].MemNode == LatestOp)
continue;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
// ReplaceAllUsesWith will replace all uses that existed when it was
// called, but graph optimizations may cause new ones to appear. For
// example, the case in pr14333 looks like
//
// St's chain -> St -> another store -> X
//
// And the only difference from St to the other store is the chain.
// When we change it's chain to be St's chain they become identical,
// get CSEed and the net result is that X is now a use of St.
// Since we know that St is redundant, just iterate.
while (!St->use_empty())
DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain());
deleteAndRecombine(St);
}
return true;
}
static bool allowableAlignment(const SelectionDAG &DAG,
const TargetLowering &TLI, EVT EVTTy,
unsigned AS, unsigned Align) {
if (TLI.allowsMisalignedMemoryAccesses(EVTTy, AS, Align))
return true;
Type *Ty = EVTTy.getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = DAG.getDataLayout().getPrefTypeAlignment(Ty);
return (Align >= ABIAlignment);
}
void DAGCombiner::getStoreMergeAndAliasCandidates(
StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) {
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr());
// We must have a base and an offset.
if (!BasePtr.Base.getNode())
return;
// Do not handle stores to undef base pointers.
if (BasePtr.Base.getOpcode() == ISD::UNDEF)
return;
// Walk up the chain and look for nodes with offsets from the same
// base pointer. Stop when reaching an instruction with a different kind
// or instruction which has a different base pointer.
EVT MemVT = St->getMemoryVT();
unsigned Seq = 0;
StoreSDNode *Index = St;
while (Index) {
// If the chain has more than one use, then we can't reorder the mem ops.
if (Index != St && !SDValue(Index, 0)->hasOneUse())
break;
// Find the base pointer and offset for this memory node.
BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr());
// Check that the base pointer is the same as the original one.
if (!Ptr.equalBaseIndex(BasePtr))
break;
// The memory operands must not be volatile.
if (Index->isVolatile() || Index->isIndexed())
break;
// No truncation.
if (StoreSDNode *St = dyn_cast<StoreSDNode>(Index))
if (St->isTruncatingStore())
break;
// The stored memory type must be the same.
if (Index->getMemoryVT() != MemVT)
break;
// We found a potential memory operand to merge.
StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++));
// Find the next memory operand in the chain. If the next operand in the
// chain is a store then move up and continue the scan with the next
// memory operand. If the next operand is a load save it and use alias
// information to check if it interferes with anything.
SDNode *NextInChain = Index->getChain().getNode();
while (1) {
if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) {
// We found a store node. Use it for the next iteration.
Index = STn;
break;
} else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) {
if (Ldn->isVolatile()) {
Index = nullptr;
break;
}
// Save the load node for later. Continue the scan.
AliasLoadNodes.push_back(Ldn);
NextInChain = Ldn->getChain().getNode();
continue;
} else {
Index = nullptr;
break;
}
}
}
}
bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
if (OptLevel == CodeGenOpt::None)
return false;
EVT MemVT = St->getMemoryVT();
int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8;
bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
Attribute::NoImplicitFloat);
// This function cannot currently deal with non-byte-sized memory sizes.
if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
return false;
// Don't merge vectors into wider inputs.
if (MemVT.isVector() || !MemVT.isSimple())
return false;
// Perform an early exit check. Do not bother looking at stored values that
// are not constants, loads, or extracted vector elements.
SDValue StoredVal = St->getValue();
bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
isa<ConstantFPSDNode>(StoredVal);
bool IsExtractVecEltSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT);
if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecEltSrc)
return false;
// Only look at ends of store sequences.
SDValue Chain = SDValue(St, 0);
if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
return false;
// Save the LoadSDNodes that we find in the chain.
// We need to make sure that these nodes do not interfere with
// any of the store nodes.
SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
// Save the StoreSDNodes that we find in the chain.
SmallVector<MemOpLink, 8> StoreNodes;
getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes);
// Check if there is anything to merge.
if (StoreNodes.size() < 2)
return false;
// Sort the memory operands according to their distance from the base pointer.
std::sort(StoreNodes.begin(), StoreNodes.end(),
[](MemOpLink LHS, MemOpLink RHS) {
return LHS.OffsetFromBase < RHS.OffsetFromBase ||
(LHS.OffsetFromBase == RHS.OffsetFromBase &&
LHS.SequenceNum > RHS.SequenceNum);
});
// Scan the memory operations on the chain and find the first non-consecutive
// store memory address.
unsigned LastConsecutiveStore = 0;
int64_t StartAddress = StoreNodes[0].OffsetFromBase;
for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) {
// Check that the addresses are consecutive starting from the second
// element in the list of stores.
if (i > 0) {
int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
if (CurrAddress - StartAddress != (ElementSizeBytes * i))
break;
}
bool Alias = false;
// Check if this store interferes with any of the loads that we found.
for (unsigned ld = 0, lde = AliasLoadNodes.size(); ld < lde; ++ld)
if (isAlias(AliasLoadNodes[ld], StoreNodes[i].MemNode)) {
Alias = true;
break;
}
// We found a load that alias with this store. Stop the sequence.
if (Alias)
break;
// Mark this node as useful.
LastConsecutiveStore = i;
}
// The node with the lowest store address.
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
// Store the constants into memory as one consecutive store.
if (IsConstantSrc) {
unsigned LastLegalType = 0;
unsigned LastLegalVectorType = 0;
bool NonZero = false;
for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
SDValue StoredVal = St->getValue();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) {
NonZero |= !C->isNullValue();
} else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) {
NonZero |= !C->getConstantFPValue()->isNullValue();
} else {
// Non-constant.
break;
}
// Find a legal type for the constant store.
unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS,
FirstStoreAlign)) {
LastLegalType = i+1;
// Or check whether a truncstore is legal.
} else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy =
TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
allowableAlignment(DAG, TLI, LegalizedStoredValueTy, FirstStoreAS,
FirstStoreAlign)) {
LastLegalType = i + 1;
}
}
// Find a legal type for the vector store.
EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
if (TLI.isTypeLegal(Ty) &&
allowableAlignment(DAG, TLI, Ty, FirstStoreAS, FirstStoreAlign)) {
LastLegalVectorType = i + 1;
}
}
// We only use vectors if the constant is known to be zero or the target
// allows it and the function is not marked with the noimplicitfloat
// attribute.
if (NoVectors) {
LastLegalVectorType = 0;
} else if (NonZero && !TLI.storeOfVectorConstantIsCheap(MemVT,
LastLegalVectorType,
FirstStoreAS)) {
LastLegalVectorType = 0;
}
// Check if we found a legal integer type to store.
if (LastLegalType == 0 && LastLegalVectorType == 0)
return false;
bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors;
unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType;
return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem,
true, UseVector);
}
// When extracting multiple vector elements, try to store them
// in one vector store rather than a sequence of scalar stores.
if (IsExtractVecEltSrc) {
unsigned NumElem = 0;
for (unsigned i = 0; i < LastConsecutiveStore + 1; ++i) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
SDValue StoredVal = St->getValue();
// This restriction could be loosened.
// Bail out if any stored values are not elements extracted from a vector.
// It should be possible to handle mixed sources, but load sources need
// more careful handling (see the block of code below that handles
// consecutive loads).
if (StoredVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return false;
// Find a legal type for the vector store.
EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
if (TLI.isTypeLegal(Ty) &&
allowableAlignment(DAG, TLI, Ty, FirstStoreAS, FirstStoreAlign))
NumElem = i + 1;
}
return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem,
false, true);
}
// Below we handle the case of multiple consecutive stores that
// come from multiple consecutive loads. We merge them into a single
// wide load and a single wide store.
// Look for load nodes which are used by the stored values.
SmallVector<MemOpLink, 8> LoadNodes;
// Find acceptable loads. Loads need to have the same chain (token factor),
// must not be zext, volatile, indexed, and they must be consecutive.
BaseIndexOffset LdBasePtr;
for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue());
if (!Ld) break;
// Loads must only have one use.
if (!Ld->hasNUsesOfValue(1, 0))
break;
// The memory operands must not be volatile.
if (Ld->isVolatile() || Ld->isIndexed())
break;
// We do not accept ext loads.
if (Ld->getExtensionType() != ISD::NON_EXTLOAD)
break;
// The stored memory type must be the same.
if (Ld->getMemoryVT() != MemVT)
break;
BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr());
// If this is not the first ptr that we check.
if (LdBasePtr.Base.getNode()) {
// The base ptr must be the same.
if (!LdPtr.equalBaseIndex(LdBasePtr))
break;
} else {
// Check that all other base pointers are the same as this one.
LdBasePtr = LdPtr;
}
// We found a potential memory operand to merge.
LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0));
}
if (LoadNodes.size() < 2)
return false;
// If we have load/store pair instructions and we only have two values,
// don't bother.
unsigned RequiredAlignment;
if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
St->getAlignment() >= RequiredAlignment)
return false;
LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
unsigned FirstLoadAS = FirstLoad->getAddressSpace();
unsigned FirstLoadAlign = FirstLoad->getAlignment();
// Scan the memory operations on the chain and find the first non-consecutive
// load memory address. These variables hold the index in the store node
// array.
unsigned LastConsecutiveLoad = 0;
// This variable refers to the size and not index in the array.
unsigned LastLegalVectorType = 0;
unsigned LastLegalIntegerType = 0;
StartAddress = LoadNodes[0].OffsetFromBase;
SDValue FirstChain = FirstLoad->getChain();
for (unsigned i = 1; i < LoadNodes.size(); ++i) {
// All loads much share the same chain.
if (LoadNodes[i].MemNode->getChain() != FirstChain)
break;
int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
if (CurrAddress - StartAddress != (ElementSizeBytes * i))
break;
LastConsecutiveLoad = i;
// Find a legal type for the vector store.
EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
if (TLI.isTypeLegal(StoreTy) &&
allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS, FirstStoreAlign) &&
allowableAlignment(DAG, TLI, StoreTy, FirstLoadAS, FirstLoadAlign)) {
LastLegalVectorType = i + 1;
}
// Find a legal type for the integer store.
unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS, FirstStoreAlign) &&
allowableAlignment(DAG, TLI, StoreTy, FirstLoadAS, FirstLoadAlign))
LastLegalIntegerType = i + 1;
// Or check whether a truncstore and extload is legal.
else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy =
TLI.getTypeToTransformTo(*DAG.getContext(), StoreTy);
if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) &&
allowableAlignment(DAG, TLI, LegalizedStoredValueTy, FirstStoreAS,
FirstStoreAlign) &&
allowableAlignment(DAG, TLI, LegalizedStoredValueTy, FirstLoadAS,
FirstLoadAlign))
LastLegalIntegerType = i+1;
}
}
// Only use vector types if the vector type is larger than the integer type.
// If they are the same, use integers.
bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors;
unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType);
// We add +1 here because the LastXXX variables refer to location while
// the NumElem refers to array/index size.
unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1;
NumElem = std::min(LastLegalType, NumElem);
if (NumElem < 2)
return false;
// The latest Node in the DAG.
unsigned LatestNodeUsed = 0;
for (unsigned i=1; i<NumElem; ++i) {
// Find a chain for the new wide-store operand. Notice that some
// of the store nodes that we found may not be selected for inclusion
// in the wide store. The chain we use needs to be the chain of the
// latest store node which is *used* and replaced by the wide store.
if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum)
LatestNodeUsed = i;
}
LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode;
// Find if it is better to use vectors or integers to load and store
// to memory.
EVT JointMemOpVT;
if (UseVectorTy) {
JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
} else {
unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
}
SDLoc LoadDL(LoadNodes[0].MemNode);
SDLoc StoreDL(StoreNodes[0].MemNode);
SDValue NewLoad = DAG.getLoad(
JointMemOpVT, LoadDL, FirstLoad->getChain(), FirstLoad->getBasePtr(),
FirstLoad->getPointerInfo(), false, false, false, FirstLoadAlign);
SDValue NewStore = DAG.getStore(
LatestOp->getChain(), StoreDL, NewLoad, FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(), false, false, FirstStoreAlign);
// Replace one of the loads with the new load.
LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[0].MemNode);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
SDValue(NewLoad.getNode(), 1));
// Remove the rest of the load chains.
for (unsigned i = 1; i < NumElem ; ++i) {
// Replace all chain users of the old load nodes with the chain of the new
// load node.
LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Ld->getChain());
}
// Replace the last store with the new store.
CombineTo(LatestOp, NewStore);
// Erase all other stores.
for (unsigned i = 0; i < NumElem ; ++i) {
// Remove all Store nodes.
if (StoreNodes[i].MemNode == LatestOp)
continue;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain());
deleteAndRecombine(St);
}
return true;
}
SDValue DAGCombiner::visitSTORE(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Chain = ST->getChain();
SDValue Value = ST->getValue();
SDValue Ptr = ST->getBasePtr();
// If this is a store of a bit convert, store the input value if the
// resultant store does not need a higher alignment than the original.
if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment();
EVT SVT = Value.getOperand(0).getValueType();
unsigned Align = DAG.getDataLayout().getABITypeAlignment(
SVT.getTypeForEVT(*DAG.getContext()));
if (Align <= OrigAlign &&
((!LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0),
Ptr, ST->getPointerInfo(), ST->isVolatile(),
ST->isNonTemporal(), OrigAlign,
ST->getAAInfo());
}
// Turn 'store undef, Ptr' -> nothing.
if (Value.getOpcode() == ISD::UNDEF && ST->isUnindexed())
return Chain;
// Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
// NOTE: If the original store is volatile, this transform must not increase
// the number of stores. For example, on x86-32 an f64 can be stored in one
// processor operation but an i64 (which is not legal) requires two. So the
// transform should not be done in this case.
if (Value.getOpcode() != ISD::TargetConstantFP) {
SDValue Tmp;
switch (CFP->getSimpleValueType(0).SimpleTy) {
default: llvm_unreachable("Unknown FP type");
case MVT::f16: // We don't do this for these yet.
case MVT::f80:
case MVT::f128:
case MVT::ppcf128:
break;
case MVT::f32:
if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
;
Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
bitcastToAPInt().getZExtValue(), SDLoc(CFP),
MVT::i32);
return DAG.getStore(Chain, SDLoc(N), Tmp,
Ptr, ST->getMemOperand());
}
break;
case MVT::f64:
if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
!ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
;
Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
getZExtValue(), SDLoc(CFP), MVT::i64);
return DAG.getStore(Chain, SDLoc(N), Tmp,
Ptr, ST->getMemOperand());
}
if (!ST->isVolatile() &&
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
// Many FP stores are not made apparent until after legalize, e.g. for
// argument passing. Since this is so common, custom legalize the
// 64-bit integer store into two 32-bit stores.
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
AAMDNodes AAInfo = ST->getAAInfo();
SDLoc DL(N);
SDValue St0 = DAG.getStore(Chain, SDLoc(ST), Lo,
Ptr, ST->getPointerInfo(),
isVolatile, isNonTemporal,
ST->getAlignment(), AAInfo);
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(4, DL, Ptr.getValueType()));
Alignment = MinAlign(Alignment, 4U);
SDValue St1 = DAG.getStore(Chain, SDLoc(ST), Hi,
Ptr, ST->getPointerInfo().getWithOffset(4),
isVolatile, isNonTemporal,
Alignment, AAInfo);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
St0, St1);
}
break;
}
}
}
// Try to infer better alignment information than the store already has.
if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
if (Align > ST->getAlignment()) {
SDValue NewStore =
DAG.getTruncStore(Chain, SDLoc(N), Value,
Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
ST->isVolatile(), ST->isNonTemporal(), Align,
ST->getAAInfo());
if (NewStore.getNode() != N)
return CombineTo(ST, NewStore, true);
}
}
}
// Try transforming a pair floating point load / store ops to integer
// load / store ops.
SDValue NewST = TransformFPLoadStorePair(N);
if (NewST.getNode())
return NewST;
bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA
: DAG.getSubtarget().useAA();
#ifndef NDEBUG
if (CombinerAAOnlyFunc.getNumOccurrences() &&
CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
UseAA = false;
#endif
if (UseAA && ST->isUnindexed()) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(N, Chain);
// If there is a better chain.
if (Chain != BetterChain) {
SDValue ReplStore;
// Replace the chain to avoid dependency.
if (ST->isTruncatingStore()) {
ReplStore = DAG.getTruncStore(BetterChain, SDLoc(N), Value, Ptr,
ST->getMemoryVT(), ST->getMemOperand());
} else {
ReplStore = DAG.getStore(BetterChain, SDLoc(N), Value, Ptr,
ST->getMemOperand());
}
// Create token to keep both nodes around.
SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
MVT::Other, Chain, ReplStore);
// Make sure the new and old chains are cleaned up.
AddToWorklist(Token.getNode());
// Don't add users to work list.
return CombineTo(N, Token, false);
}
}
// Try transforming N to an indexed store.
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
return SDValue(N, 0);
// FIXME: is there such a thing as a truncating indexed store?
if (ST->isTruncatingStore() && ST->isUnindexed() &&
Value.getValueType().isInteger()) {
// See if we can simplify the input to this truncstore with knowledge that
// only the low bits are being used. For example:
// "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
SDValue Shorter =
GetDemandedBits(Value,
APInt::getLowBitsSet(
Value.getValueType().getScalarType().getSizeInBits(),
ST->getMemoryVT().getScalarType().getSizeInBits()));
AddToWorklist(Value.getNode());
if (Shorter.getNode())
return DAG.getTruncStore(Chain, SDLoc(N), Shorter,
Ptr, ST->getMemoryVT(), ST->getMemOperand());
// Otherwise, see if we can simplify the operation with
// SimplifyDemandedBits, which only works if the value has a single use.
if (SimplifyDemandedBits(Value,
APInt::getLowBitsSet(
Value.getValueType().getScalarType().getSizeInBits(),
ST->getMemoryVT().getScalarType().getSizeInBits())))
return SDValue(N, 0);
}
// If this is a load followed by a store to the same location, then the store
// is dead/noop.
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
ST->isUnindexed() && !ST->isVolatile() &&
// There can't be any side effects between the load and store, such as
// a call or store.
Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
// The store is dead, remove it.
return Chain;
}
}
// If this is a store followed by a store with the same value to the same
// location, then the store is dead/noop.
if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) {
if (ST1->getBasePtr() == Ptr && ST->getMemoryVT() == ST1->getMemoryVT() &&
ST1->getValue() == Value && ST->isUnindexed() && !ST->isVolatile() &&
ST1->isUnindexed() && !ST1->isVolatile()) {
// The store is dead, remove it.
return Chain;
}
}
// If this is an FP_ROUND or TRUNC followed by a store, fold this into a
// truncating store. We can do this even if this is already a truncstore.
if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
&& Value.getNode()->hasOneUse() && ST->isUnindexed() &&
TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
ST->getMemoryVT())) {
return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0),
Ptr, ST->getMemoryVT(), ST->getMemOperand());
}
// Only perform this optimization before the types are legal, because we
// don't want to perform this optimization on every DAGCombine invocation.
if (!LegalTypes) {
bool EverChanged = false;
do {
// There can be multiple store sequences on the same chain.
// Keep trying to merge store sequences until we are unable to do so
// or until we merge the last store on the chain.
bool Changed = MergeConsecutiveStores(ST);
EverChanged |= Changed;
if (!Changed) break;
} while (ST->getOpcode() != ISD::DELETED_NODE);
if (EverChanged)
return SDValue(N, 0);
}
return ReduceLoadOpStoreWidth(N);
}
SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
SDValue InVec = N->getOperand(0);
SDValue InVal = N->getOperand(1);
SDValue EltNo = N->getOperand(2);
SDLoc dl(N);
// If the inserted element is an UNDEF, just use the input vector.
if (InVal.getOpcode() == ISD::UNDEF)
return InVec;
EVT VT = InVec.getValueType();
// If we can't generate a legal BUILD_VECTOR, exit
if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
return SDValue();
// Check that we know which element is being inserted
if (!isa<ConstantSDNode>(EltNo))
return SDValue();
unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
// Canonicalize insert_vector_elt dag nodes.
// Example:
// (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
// -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
//
// Do this only if the child insert_vector node has one use; also
// do this only if indices are both constants and Idx1 < Idx0.
if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
&& isa<ConstantSDNode>(InVec.getOperand(2))) {
unsigned OtherElt =
cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue();
if (Elt < OtherElt) {
// Swap nodes.
SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VT,
InVec.getOperand(0), InVal, EltNo);
AddToWorklist(NewOp.getNode());
return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
}
}
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
// vector elements.
SmallVector<SDValue, 8> Ops;
// Do not combine these two vectors if the output vector will not replace
// the input vector.
if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) {
Ops.append(InVec.getNode()->op_begin(),
InVec.getNode()->op_end());
} else if (InVec.getOpcode() == ISD::UNDEF) {
unsigned NElts = VT.getVectorNumElements();
Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
} else {
return SDValue();
}
// Insert the element
if (Elt < Ops.size()) {
// All the operands of BUILD_VECTOR must have the same type;
// we enforce that here.
EVT OpVT = Ops[0].getValueType();
if (InVal.getValueType() != OpVT)
InVal = OpVT.bitsGT(InVal.getValueType()) ?
DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) :
DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal);
Ops[Elt] = InVal;
}
// Return the new vector
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad) {
EVT ResultVT = EVE->getValueType(0);
EVT VecEltVT = InVecVT.getVectorElementType();
unsigned Align = OriginalLoad->getAlignment();
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
VecEltVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
return SDValue();
Align = NewAlign;
SDValue NewPtr = OriginalLoad->getBasePtr();
SDValue Offset;
EVT PtrType = NewPtr.getValueType();
MachinePointerInfo MPI;
SDLoc DL(EVE);
if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
int Elt = ConstEltNo->getZExtValue();
unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8;
Offset = DAG.getConstant(PtrOff, DL, PtrType);
MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff);
} else {
Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType);
Offset = DAG.getNode(
ISD::MUL, DL, PtrType, Offset,
DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType));
MPI = OriginalLoad->getPointerInfo();
}
NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, NewPtr, Offset);
// The replacement we need to do here is a little tricky: we need to
// replace an extractelement of a load with a load.
// Use ReplaceAllUsesOfValuesWith to do the replacement.
// Note that this replacement assumes that the extractvalue is the only
// use of the load; that's okay because we don't want to perform this
// transformation in other cases anyway.
SDValue Load;
SDValue Chain;
if (ResultVT.bitsGT(VecEltVT)) {
// If the result type of vextract is wider than the load, then issue an
// extending load instead.
ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT,
VecEltVT)
? ISD::ZEXTLOAD
: ISD::EXTLOAD;
Load = DAG.getExtLoad(
ExtType, SDLoc(EVE), ResultVT, OriginalLoad->getChain(), NewPtr, MPI,
VecEltVT, OriginalLoad->isVolatile(), OriginalLoad->isNonTemporal(),
OriginalLoad->isInvariant(), Align, OriginalLoad->getAAInfo());
Chain = Load.getValue(1);
} else {
Load = DAG.getLoad(
VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, MPI,
OriginalLoad->isVolatile(), OriginalLoad->isNonTemporal(),
OriginalLoad->isInvariant(), Align, OriginalLoad->getAAInfo());
Chain = Load.getValue(1);
if (ResultVT.bitsLT(VecEltVT))
Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
else
Load = DAG.getNode(ISD::BITCAST, SDLoc(EVE), ResultVT, Load);
}
WorklistRemover DeadNodes(*this);
SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
SDValue To[] = { Load, Chain };
DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
// Since we're explicitly calling ReplaceAllUses, add the new node to the
// worklist explicitly as well.
AddToWorklist(Load.getNode());
AddUsersToWorklist(Load.getNode()); // Add users too
// Make sure to revisit this node to clean it up; it will usually be dead.
AddToWorklist(EVE);
++OpsNarrowed;
return SDValue(EVE, 0);
}
SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// (vextract (scalar_to_vector val, 0) -> val
SDValue InVec = N->getOperand(0);
EVT VT = InVec.getValueType();
EVT NVT = N->getValueType(0);
if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
// Check if the result type doesn't match the inserted element type. A
// SCALAR_TO_VECTOR may truncate the inserted element and the
// EXTRACT_VECTOR_ELT may widen the extracted vector.
SDValue InOp = InVec.getOperand(0);
if (InOp.getValueType() != NVT) {
assert(InOp.getValueType().isInteger() && NVT.isInteger());
return DAG.getSExtOrTrunc(InOp, SDLoc(InVec), NVT);
}
return InOp;
}
SDValue EltNo = N->getOperand(1);
bool ConstEltNo = isa<ConstantSDNode>(EltNo);
// Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
// We only perform this optimization before the op legalization phase because
// we may introduce new vector instructions which are not backed by TD
// patterns. For example on AVX, extracting elements from a wide vector
// without using extract_subvector. However, if we can find an underlying
// scalar value, then we can always use that.
if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE
&& ConstEltNo) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
int NumElem = VT.getVectorNumElements();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec);
// Find the new index to extract from.
int OrigElt = SVOp->getMaskElt(Elt);
// Extracting an undef index is undef.
if (OrigElt == -1)
return DAG.getUNDEF(NVT);
// Select the right vector half to extract from.
SDValue SVInVec;
if (OrigElt < NumElem) {
SVInVec = InVec->getOperand(0);
} else {
SVInVec = InVec->getOperand(1);
OrigElt -= NumElem;
}
if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) {
SDValue InOp = SVInVec.getOperand(OrigElt);
if (InOp.getValueType() != NVT) {
assert(InOp.getValueType().isInteger() && NVT.isInteger());
InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT);
}
return InOp;
}
// FIXME: We should handle recursing on other vector shuffles and
// scalar_to_vector here as well.
if (!LegalOperations) {
EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, SVInVec,
DAG.getConstant(OrigElt, SDLoc(SVOp), IndexTy));
}
}
bool BCNumEltsChanged = false;
EVT ExtVT = VT.getVectorElementType();
EVT LVT = ExtVT;
// If the result of load has to be truncated, then it's not necessarily
// profitable.
if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT))
return SDValue();
if (InVec.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
if (!InVec.hasOneUse())
return SDValue();
EVT BCVT = InVec.getOperand(0).getValueType();
if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
return SDValue();
if (VT.getVectorNumElements() != BCVT.getVectorNumElements())
BCNumEltsChanged = true;
InVec = InVec.getOperand(0);
ExtVT = BCVT.getVectorElementType();
}
// (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size)
if (!LegalOperations && !ConstEltNo && InVec.hasOneUse() &&
ISD::isNormalLoad(InVec.getNode()) &&
!N->getOperand(1)->hasPredecessor(InVec.getNode())) {
SDValue Index = N->getOperand(1);
if (LoadSDNode *OrigLoad = dyn_cast<LoadSDNode>(InVec))
return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index,
OrigLoad);
}
// Perform only after legalization to ensure build_vector / vector_shuffle
// optimizations have already been done.
if (!LegalOperations) return SDValue();
// (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
// (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
// (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
if (ConstEltNo) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
LoadSDNode *LN0 = nullptr;
const ShuffleVectorSDNode *SVN = nullptr;
if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec);
} else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
InVec.getOperand(0).getValueType() == ExtVT &&
ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
// Don't duplicate a load with other uses.
if (!InVec.hasOneUse())
return SDValue();
LN0 = cast<LoadSDNode>(InVec.getOperand(0));
} else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) {
// (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
// =>
// (load $addr+1*size)
// Don't duplicate a load with other uses.
if (!InVec.hasOneUse())
return SDValue();
// If the bit convert changed the number of elements, it is unsafe
// to examine the mask.
if (BCNumEltsChanged)
return SDValue();
// Select the input vector, guarding against out of range extract vector.
unsigned NumElems = VT.getVectorNumElements();
int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
if (InVec.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
if (!InVec.hasOneUse())
return SDValue();
InVec = InVec.getOperand(0);
}
if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec);
Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems;
EltNo = DAG.getConstant(Elt, SDLoc(EltNo), EltNo.getValueType());
}
}
// Make sure we found a non-volatile load and the extractelement is
// the only use.
if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile())
return SDValue();
// If Idx was -1 above, Elt is going to be -1, so just return undef.
if (Elt == -1)
return DAG.getUNDEF(LVT);
return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, EltNo, LN0);
}
return SDValue();
}
// Simplify (build_vec (ext )) to (bitcast (build_vec ))
SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
// We perform this optimization post type-legalization because
// the type-legalizer often scalarizes integer-promoted vectors.
// Performing this optimization before may create bit-casts which
// will be type-legalized to complex code sequences.
// We perform this optimization only before the operation legalizer because we
// may introduce illegal operations.
if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
return SDValue();
unsigned NumInScalars = N->getNumOperands();
SDLoc dl(N);
EVT VT = N->getValueType(0);
// Check to see if this is a BUILD_VECTOR of a bunch of values
// which come from any_extend or zero_extend nodes. If so, we can create
// a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
// optimizations. We do not handle sign-extend because we can't fill the sign
// using shuffles.
EVT SourceType = MVT::Other;
bool AllAnyExt = true;
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue In = N->getOperand(i);
// Ignore undef inputs.
if (In.getOpcode() == ISD::UNDEF) continue;
bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
// Abort if the element is not an extension.
if (!ZeroExt && !AnyExt) {
SourceType = MVT::Other;
break;
}
// The input is a ZeroExt or AnyExt. Check the original type.
EVT InTy = In.getOperand(0).getValueType();
// Check that all of the widened source types are the same.
if (SourceType == MVT::Other)
// First time.
SourceType = InTy;
else if (InTy != SourceType) {
// Multiple income types. Abort.
SourceType = MVT::Other;
break;
}
// Check if all of the extends are ANY_EXTENDs.
AllAnyExt &= AnyExt;
}
// In order to have valid types, all of the inputs must be extended from the
// same source type and all of the inputs must be any or zero extend.
// Scalar sizes must be a power of two.
EVT OutScalarTy = VT.getScalarType();
bool ValidTypes = SourceType != MVT::Other &&
isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
isPowerOf2_32(SourceType.getSizeInBits());
// Create a new simpler BUILD_VECTOR sequence which other optimizations can
// turn into a single shuffle instruction.
if (!ValidTypes)
return SDValue();
bool isLE = DAG.getDataLayout().isLittleEndian();
unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
assert(ElemRatio > 1 && "Invalid element size ratio");
SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
DAG.getConstant(0, SDLoc(N), SourceType);
unsigned NewBVElems = ElemRatio * VT.getVectorNumElements();
SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
// Populate the new build_vector
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDValue Cast = N->getOperand(i);
assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
Cast.getOpcode() == ISD::ZERO_EXTEND ||
Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode");
SDValue In;
if (Cast.getOpcode() == ISD::UNDEF)
In = DAG.getUNDEF(SourceType);
else
In = Cast->getOperand(0);
unsigned Index = isLE ? (i * ElemRatio) :
(i * ElemRatio + (ElemRatio - 1));
assert(Index < Ops.size() && "Invalid index");
Ops[Index] = In;
}
// The type of the new BUILD_VECTOR node.
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
assert(VecVT.getSizeInBits() == VT.getSizeInBits() &&
"Invalid vector size");
// Check if the new vector type is legal.
if (!isTypeLegal(VecVT)) return SDValue();
// Make the new BUILD_VECTOR.
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
// The new BUILD_VECTOR node has the potential to be further optimized.
AddToWorklist(BV.getNode());
// Bitcast to the desired type.
return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}
SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
EVT VT = N->getValueType(0);
unsigned NumInScalars = N->getNumOperands();
SDLoc dl(N);
EVT SrcVT = MVT::Other;
unsigned Opcode = ISD::DELETED_NODE;
unsigned NumDefs = 0;
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue In = N->getOperand(i);
unsigned Opc = In.getOpcode();
if (Opc == ISD::UNDEF)
continue;
// If all scalar values are floats and converted from integers.
if (Opcode == ISD::DELETED_NODE &&
(Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) {
Opcode = Opc;
}
if (Opc != Opcode)
return SDValue();
EVT InVT = In.getOperand(0).getValueType();
// If all scalar values are typed differently, bail out. It's chosen to
// simplify BUILD_VECTOR of integer types.
if (SrcVT == MVT::Other)
SrcVT = InVT;
if (SrcVT != InVT)
return SDValue();
NumDefs++;
}
// If the vector has just one element defined, it's not worth to fold it into
// a vectorized one.
if (NumDefs < 2)
return SDValue();
assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP)
&& "Should only handle conversion from integer to float.");
assert(SrcVT != MVT::Other && "Cannot determine source type!");
EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars);
if (!TLI.isOperationLegalOrCustom(Opcode, NVT))
return SDValue();
// Just because the floating-point vector type is legal does not necessarily
// mean that the corresponding integer vector type is.
if (!isTypeLegal(NVT))
return SDValue();
SmallVector<SDValue, 8> Opnds;
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue In = N->getOperand(i);
if (In.getOpcode() == ISD::UNDEF)
Opnds.push_back(DAG.getUNDEF(SrcVT));
else
Opnds.push_back(In.getOperand(0));
}
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Opnds);
AddToWorklist(BV.getNode());
return DAG.getNode(Opcode, dl, VT, BV);
}
SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
unsigned NumInScalars = N->getNumOperands();
SDLoc dl(N);
EVT VT = N->getValueType(0);
// A vector built entirely of undefs is undef.
if (ISD::allOperandsUndef(N))
return DAG.getUNDEF(VT);
if (SDValue V = reduceBuildVecExtToExtBuildVec(N))
return V;
if (SDValue V = reduceBuildVecConvertToConvertBuildVec(N))
return V;
// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
// operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
// at most two distinct vectors, turn this into a shuffle node.
// Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes.
if (!isTypeLegal(VT))
return SDValue();
// May only combine to shuffle after legalize if shuffle is legal.
if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT))
return SDValue();
SDValue VecIn1, VecIn2;
bool UsesZeroVector = false;
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue Op = N->getOperand(i);
// Ignore undef inputs.
if (Op.getOpcode() == ISD::UNDEF) continue;
// See if we can combine this build_vector into a blend with a zero vector.
if (!VecIn2.getNode() && (isNullConstant(Op) || isNullFPConstant(Op))) {
UsesZeroVector = true;
continue;
}
// If this input is something other than a EXTRACT_VECTOR_ELT with a
// constant index, bail out.
if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(Op.getOperand(1))) {
VecIn1 = VecIn2 = SDValue(nullptr, 0);
break;
}
// We allow up to two distinct input vectors.
SDValue ExtractedFromVec = Op.getOperand(0);
if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
continue;
if (!VecIn1.getNode()) {
VecIn1 = ExtractedFromVec;
} else if (!VecIn2.getNode() && !UsesZeroVector) {
VecIn2 = ExtractedFromVec;
} else {
// Too many inputs.
VecIn1 = VecIn2 = SDValue(nullptr, 0);
break;
}
}
// If everything is good, we can make a shuffle operation.
if (VecIn1.getNode()) {
unsigned InNumElements = VecIn1.getValueType().getVectorNumElements();
SmallVector<int, 8> Mask;
for (unsigned i = 0; i != NumInScalars; ++i) {
unsigned Opcode = N->getOperand(i).getOpcode();
if (Opcode == ISD::UNDEF) {
Mask.push_back(-1);
continue;
}
// Operands can also be zero.
if (Opcode != ISD::EXTRACT_VECTOR_ELT) {
assert(UsesZeroVector &&
(Opcode == ISD::Constant || Opcode == ISD::ConstantFP) &&
"Unexpected node found!");
Mask.push_back(NumInScalars+i);
continue;
}
// If extracting from the first vector, just use the index directly.
SDValue Extract = N->getOperand(i);
SDValue ExtVal = Extract.getOperand(1);
unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue();
if (Extract.getOperand(0) == VecIn1) {
Mask.push_back(ExtIndex);
continue;
}
// Otherwise, use InIdx + InputVecSize
Mask.push_back(InNumElements + ExtIndex);
}
// Avoid introducing illegal shuffles with zero.
if (UsesZeroVector && !TLI.isVectorClearMaskLegal(Mask, VT))
return SDValue();
// We can't generate a shuffle node with mismatched input and output types.
// Attempt to transform a single input vector to the correct type.
if ((VT != VecIn1.getValueType())) {
// If the input vector type has a different base type to the output
// vector type, bail out.
EVT VTElemType = VT.getVectorElementType();
if ((VecIn1.getValueType().getVectorElementType() != VTElemType) ||
(VecIn2.getNode() &&
(VecIn2.getValueType().getVectorElementType() != VTElemType)))
return SDValue();
// If the input vector is too small, widen it.
// We only support widening of vectors which are half the size of the
// output registers. For example XMM->YMM widening on X86 with AVX.
EVT VecInT = VecIn1.getValueType();
if (VecInT.getSizeInBits() * 2 == VT.getSizeInBits()) {
// If we only have one small input, widen it by adding undef values.
if (!VecIn2.getNode())
VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, VecIn1,
DAG.getUNDEF(VecIn1.getValueType()));
else if (VecIn1.getValueType() == VecIn2.getValueType()) {
// If we have two small inputs of the same type, try to concat them.
VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, VecIn1, VecIn2);
VecIn2 = SDValue(nullptr, 0);
} else
return SDValue();
} else if (VecInT.getSizeInBits() == VT.getSizeInBits() * 2) {
// If the input vector is too large, try to split it.
// We don't support having two input vectors that are too large.
// If the zero vector was used, we can not split the vector,
// since we'd need 3 inputs.
if (UsesZeroVector || VecIn2.getNode())
return SDValue();
if (!TLI.isExtractSubvectorCheap(VT, VT.getVectorNumElements()))
return SDValue();
// Try to replace VecIn1 with two extract_subvectors
// No need to update the masks, they should still be correct.
VecIn2 = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
DAG.getConstant(VT.getVectorNumElements(), dl,
TLI.getVectorIdxTy(DAG.getDataLayout())));
VecIn1 = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
} else
return SDValue();
}
if (UsesZeroVector)
VecIn2 = VT.isInteger() ? DAG.getConstant(0, dl, VT) :
DAG.getConstantFP(0.0, dl, VT);
else
// If VecIn2 is unused then change it to undef.
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
// Check that we were able to transform all incoming values to the same
// type.
if (VecIn2.getValueType() != VecIn1.getValueType() ||
VecIn1.getValueType() != VT)
return SDValue();
// Return the new VECTOR_SHUFFLE node.
SDValue Ops[2];
Ops[0] = VecIn1;
Ops[1] = VecIn2;
return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], &Mask[0]);
}
return SDValue();
}
static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT OpVT = N->getOperand(0).getValueType();
// If the operands are legal vectors, leave them alone.
if (TLI.isTypeLegal(OpVT))
return SDValue();
SDLoc DL(N);
EVT VT = N->getValueType(0);
SmallVector<SDValue, 8> Ops;
EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
// Keep track of what we encounter.
bool AnyInteger = false;
bool AnyFP = false;
for (const SDValue &Op : N->ops()) {
if (ISD::BITCAST == Op.getOpcode() &&
!Op.getOperand(0).getValueType().isVector())
Ops.push_back(Op.getOperand(0));
else if (ISD::UNDEF == Op.getOpcode())
Ops.push_back(ScalarUndef);
else
return SDValue();
// Note whether we encounter an integer or floating point scalar.
// If it's neither, bail out, it could be something weird like x86mmx.
EVT LastOpVT = Ops.back().getValueType();
if (LastOpVT.isFloatingPoint())
AnyFP = true;
else if (LastOpVT.isInteger())
AnyInteger = true;
else
return SDValue();
}
// If any of the operands is a floating point scalar bitcast to a vector,
// use floating point types throughout, and bitcast everything.
// Replace UNDEFs by another scalar UNDEF node, of the final desired type.
if (AnyFP) {
SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
if (AnyInteger) {
for (SDValue &Op : Ops) {
if (Op.getValueType() == SVT)
continue;
if (Op.getOpcode() == ISD::UNDEF)
Op = ScalarUndef;
else
Op = DAG.getNode(ISD::BITCAST, DL, SVT, Op);
}
}
}
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
VT.getSizeInBits() / SVT.getSizeInBits());
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(ISD::BUILD_VECTOR, DL, VecVT, Ops));
}
SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// TODO: Check to see if this is a CONCAT_VECTORS of a bunch of
// EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector
// inputs come from at most two distinct vectors, turn this into a shuffle
// node.
// If we only have one input vector, we don't need to do any concatenation.
if (N->getNumOperands() == 1)
return N->getOperand(0);
// Check if all of the operands are undefs.
EVT VT = N->getValueType(0);
if (ISD::allOperandsUndef(N))
return DAG.getUNDEF(VT);
// Optimize concat_vectors where all but the first of the vectors are undef.
if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) {
return Op.getOpcode() == ISD::UNDEF;
})) {
SDValue In = N->getOperand(0);
assert(In.getValueType().isVector() && "Must concat vectors");
// Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr).
if (In->getOpcode() == ISD::BITCAST &&
!In->getOperand(0)->getValueType(0).isVector()) {
SDValue Scalar = In->getOperand(0);
// If the bitcast type isn't legal, it might be a trunc of a legal type;
// look through the trunc so we can still do the transform:
// concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar)
if (Scalar->getOpcode() == ISD::TRUNCATE &&
!TLI.isTypeLegal(Scalar.getValueType()) &&
TLI.isTypeLegal(Scalar->getOperand(0).getValueType()))
Scalar = Scalar->getOperand(0);
EVT SclTy = Scalar->getValueType(0);
if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
return SDValue();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy,
VT.getSizeInBits() / SclTy.getSizeInBits());
if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType()))
return SDValue();
SDLoc dl = SDLoc(N);
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NVT, Scalar);
return DAG.getNode(ISD::BITCAST, dl, VT, Res);
}
}
// Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR.
// We have already tested above for an UNDEF only concatenation.
// fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
// -> (BUILD_VECTOR A, B, ..., C, D, ...)
auto IsBuildVectorOrUndef = [](const SDValue &Op) {
return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode();
};
bool AllBuildVectorsOrUndefs =
std::all_of(N->op_begin(), N->op_end(), IsBuildVectorOrUndef);
if (AllBuildVectorsOrUndefs) {
SmallVector<SDValue, 8> Opnds;
EVT SVT = VT.getScalarType();
EVT MinVT = SVT;
if (!SVT.isFloatingPoint()) {
// If BUILD_VECTOR are from built from integer, they may have different
// operand types. Get the smallest type and truncate all operands to it.
bool FoundMinVT = false;
for (const SDValue &Op : N->ops())
if (ISD::BUILD_VECTOR == Op.getOpcode()) {
EVT OpSVT = Op.getOperand(0)->getValueType(0);
MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT;
FoundMinVT = true;
}
assert(FoundMinVT && "Concat vector type mismatch");
}
for (const SDValue &Op : N->ops()) {
EVT OpVT = Op.getValueType();
unsigned NumElts = OpVT.getVectorNumElements();
if (ISD::UNDEF == Op.getOpcode())
Opnds.append(NumElts, DAG.getUNDEF(MinVT));
if (ISD::BUILD_VECTOR == Op.getOpcode()) {
if (SVT.isFloatingPoint()) {
assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch");
Opnds.append(Op->op_begin(), Op->op_begin() + NumElts);
} else {
for (unsigned i = 0; i != NumElts; ++i)
Opnds.push_back(
DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i)));
}
}
}
assert(VT.getVectorNumElements() == Opnds.size() &&
"Concat vector type mismatch");
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
}
// Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
if (SDValue V = combineConcatVectorOfScalars(N, DAG))
return V;
// Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
// nodes often generate nop CONCAT_VECTOR nodes.
// Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
// place the incoming vectors at the exact same location.
SDValue SingleSource = SDValue();
unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements();
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDValue Op = N->getOperand(i);
if (Op.getOpcode() == ISD::UNDEF)
continue;
// Check if this is the identity extract:
if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
return SDValue();
// Find the single incoming vector for the extract_subvector.
if (SingleSource.getNode()) {
if (Op.getOperand(0) != SingleSource)
return SDValue();
} else {
SingleSource = Op.getOperand(0);
// Check the source type is the same as the type of the result.
// If not, this concat may extend the vector, so we can not
// optimize it away.
if (SingleSource.getValueType() != N->getValueType(0))
return SDValue();
}
unsigned IdentityIndex = i * PartNumElem;
ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1));
// The extract index must be constant.
if (!CS)
return SDValue();
// Check that we are reading from the identity index.
if (CS->getZExtValue() != IdentityIndex)
return SDValue();
}
if (SingleSource.getNode())
return SingleSource;
return SDValue();
}
SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
EVT NVT = N->getValueType(0);
SDValue V = N->getOperand(0);
if (V->getOpcode() == ISD::CONCAT_VECTORS) {
// Combine:
// (extract_subvec (concat V1, V2, ...), i)
// Into:
// Vi if possible
// Only operand 0 is checked as 'concat' assumes all inputs of the same
// type.
if (V->getOperand(0).getValueType() != NVT)
return SDValue();
unsigned Idx = N->getConstantOperandVal(1);
unsigned NumElems = NVT.getVectorNumElements();
assert((Idx % NumElems) == 0 &&
"IDX in concat is not a multiple of the result vector length.");
return V->getOperand(Idx / NumElems);
}
// Skip bitcasting
if (V->getOpcode() == ISD::BITCAST)
V = V.getOperand(0);
if (V->getOpcode() == ISD::INSERT_SUBVECTOR) {
SDLoc dl(N);
// Handle only simple case where vector being inserted and vector
// being extracted are of same type, and are half size of larger vectors.
EVT BigVT = V->getOperand(0).getValueType();
EVT SmallVT = V->getOperand(1).getValueType();
if (!NVT.bitsEq(SmallVT) || NVT.getSizeInBits()*2 != BigVT.getSizeInBits())
return SDValue();
// Only handle cases where both indexes are constants with the same type.
ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2));
if (InsIdx && ExtIdx &&
InsIdx->getValueType(0).getSizeInBits() <= 64 &&
ExtIdx->getValueType(0).getSizeInBits() <= 64) {
// Combine:
// (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
// Into:
// indices are equal or bit offsets are equal => V1
// otherwise => (extract_subvec V1, ExtIdx)
if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() ==
ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits())
return DAG.getNode(ISD::BITCAST, dl, NVT, V->getOperand(1));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT,
DAG.getNode(ISD::BITCAST, dl,
N->getOperand(0).getValueType(),
V->getOperand(0)), N->getOperand(1));
}
}
return SDValue();
}
static SDValue simplifyShuffleOperandRecursively(SmallBitVector &UsedElements,
SDValue V, SelectionDAG &DAG) {
SDLoc DL(V);
EVT VT = V.getValueType();
switch (V.getOpcode()) {
default:
return V;
case ISD::CONCAT_VECTORS: {
EVT OpVT = V->getOperand(0).getValueType();
int OpSize = OpVT.getVectorNumElements();
SmallBitVector OpUsedElements(OpSize, false);
bool FoundSimplification = false;
SmallVector<SDValue, 4> NewOps;
NewOps.reserve(V->getNumOperands());
for (int i = 0, NumOps = V->getNumOperands(); i < NumOps; ++i) {
SDValue Op = V->getOperand(i);
bool OpUsed = false;
for (int j = 0; j < OpSize; ++j)
if (UsedElements[i * OpSize + j]) {
OpUsedElements[j] = true;
OpUsed = true;
}
NewOps.push_back(
OpUsed ? simplifyShuffleOperandRecursively(OpUsedElements, Op, DAG)
: DAG.getUNDEF(OpVT));
FoundSimplification |= Op == NewOps.back();
OpUsedElements.reset();
}
if (FoundSimplification)
V = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, NewOps);
return V;
}
case ISD::INSERT_SUBVECTOR: {
SDValue BaseV = V->getOperand(0);
SDValue SubV = V->getOperand(1);
auto *IdxN = dyn_cast<ConstantSDNode>(V->getOperand(2));
if (!IdxN)
return V;
int SubSize = SubV.getValueType().getVectorNumElements();
int Idx = IdxN->getZExtValue();
bool SubVectorUsed = false;
SmallBitVector SubUsedElements(SubSize, false);
for (int i = 0; i < SubSize; ++i)
if (UsedElements[i + Idx]) {
SubVectorUsed = true;
SubUsedElements[i] = true;
UsedElements[i + Idx] = false;
}
// Now recurse on both the base and sub vectors.
SDValue SimplifiedSubV =
SubVectorUsed
? simplifyShuffleOperandRecursively(SubUsedElements, SubV, DAG)
: DAG.getUNDEF(SubV.getValueType());
SDValue SimplifiedBaseV = simplifyShuffleOperandRecursively(UsedElements, BaseV, DAG);
if (SimplifiedSubV != SubV || SimplifiedBaseV != BaseV)
V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
SimplifiedBaseV, SimplifiedSubV, V->getOperand(2));
return V;
}
}
}
static SDValue simplifyShuffleOperands(ShuffleVectorSDNode *SVN, SDValue N0,
SDValue N1, SelectionDAG &DAG) {
EVT VT = SVN->getValueType(0);
int NumElts = VT.getVectorNumElements();
SmallBitVector N0UsedElements(NumElts, false), N1UsedElements(NumElts, false);
for (int M : SVN->getMask())
if (M >= 0 && M < NumElts)
N0UsedElements[M] = true;
else if (M >= NumElts)
N1UsedElements[M - NumElts] = true;
SDValue S0 = simplifyShuffleOperandRecursively(N0UsedElements, N0, DAG);
SDValue S1 = simplifyShuffleOperandRecursively(N1UsedElements, N1, DAG);
if (S0 == N0 && S1 == N1)
return SDValue();
return DAG.getVectorShuffle(VT, SDLoc(SVN), S0, S1, SVN->getMask());
}
// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat,
// or turn a shuffle of a single concat into simpler shuffle then concat.
static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
SmallVector<SDValue, 4> Ops;
EVT ConcatVT = N0.getOperand(0).getValueType();
unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements();
unsigned NumConcats = NumElts / NumElemsPerConcat;
// Special case: shuffle(concat(A,B)) can be more efficiently represented
// as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high
// half vector elements.
if (NumElemsPerConcat * 2 == NumElts && N1.getOpcode() == ISD::UNDEF &&
std::all_of(SVN->getMask().begin() + NumElemsPerConcat,
SVN->getMask().end(), [](int i) { return i == -1; })) {
N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0), N0.getOperand(1),
ArrayRef<int>(SVN->getMask().begin(), NumElemsPerConcat));
N1 = DAG.getUNDEF(ConcatVT);
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1);
}
// Look at every vector that's inserted. We're looking for exact
// subvector-sized copies from a concatenated vector
for (unsigned I = 0; I != NumConcats; ++I) {
// Make sure we're dealing with a copy.
unsigned Begin = I * NumElemsPerConcat;
bool AllUndef = true, NoUndef = true;
for (unsigned J = Begin; J != Begin + NumElemsPerConcat; ++J) {
if (SVN->getMaskElt(J) >= 0)
AllUndef = false;
else
NoUndef = false;
}
if (NoUndef) {
if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0)
return SDValue();
for (unsigned J = 1; J != NumElemsPerConcat; ++J)
if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J))
return SDValue();
unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat;
if (FirstElt < N0.getNumOperands())
Ops.push_back(N0.getOperand(FirstElt));
else
Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands()));
} else if (AllUndef) {
Ops.push_back(DAG.getUNDEF(N0.getOperand(0).getValueType()));
} else { // Mixed with general masks and undefs, can't do optimization.
return SDValue();
}
}
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
}
SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
EVT VT = N->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG");
// Canonicalize shuffle undef, undef -> undef
if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
return DAG.getUNDEF(VT);
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
// Canonicalize shuffle v, v -> v, undef
if (N0 == N1) {
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
if (Idx >= (int)NumElts) Idx -= NumElts;
NewMask.push_back(Idx);
}
return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT),
&NewMask[0]);
}
// Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
if (N0.getOpcode() == ISD::UNDEF) {
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
if (Idx >= 0) {
if (Idx >= (int)NumElts)
Idx -= NumElts;
else
Idx = -1; // remove reference to lhs
}
NewMask.push_back(Idx);
}
return DAG.getVectorShuffle(VT, SDLoc(N), N1, DAG.getUNDEF(VT),
&NewMask[0]);
}
// Remove references to rhs if it is undef
if (N1.getOpcode() == ISD::UNDEF) {
bool Changed = false;
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
if (Idx >= (int)NumElts) {
Idx = -1;
Changed = true;
}
NewMask.push_back(Idx);
}
if (Changed)
return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, &NewMask[0]);
}
// If it is a splat, check if the argument vector is another splat or a
// build_vector.
if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
SDNode *V = N0.getNode();
// If this is a bit convert that changes the element type of the vector but
// not the number of vector elements, look through it. Be careful not to
// look though conversions that change things like v4f32 to v2f64.
if (V->getOpcode() == ISD::BITCAST) {
SDValue ConvInput = V->getOperand(0);
if (ConvInput.getValueType().isVector() &&
ConvInput.getValueType().getVectorNumElements() == NumElts)
V = ConvInput.getNode();
}
if (V->getOpcode() == ISD::BUILD_VECTOR) {
assert(V->getNumOperands() == NumElts &&
"BUILD_VECTOR has wrong number of operands");
SDValue Base;
bool AllSame = true;
for (unsigned i = 0; i != NumElts; ++i) {
if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
Base = V->getOperand(i);
break;
}
}
// Splat of <u, u, u, u>, return <u, u, u, u>
if (!Base.getNode())
return N0;
for (unsigned i = 0; i != NumElts; ++i) {
if (V->getOperand(i) != Base) {
AllSame = false;
break;
}
}
// Splat of <x, x, x, x>, return <x, x, x, x>
if (AllSame)
return N0;
// Canonicalize any other splat as a build_vector.
const SDValue &Splatted = V->getOperand(SVN->getSplatIndex());
SmallVector<SDValue, 8> Ops(NumElts, Splatted);
SDValue NewBV = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
V->getValueType(0), Ops);
// We may have jumped through bitcasts, so the type of the
// BUILD_VECTOR may not match the type of the shuffle.
if (V->getValueType(0) != VT)
NewBV = DAG.getNode(ISD::BITCAST, SDLoc(N), VT, NewBV);
return NewBV;
}
}
// There are various patterns used to build up a vector from smaller vectors,
// subvectors, or elements. Scan chains of these and replace unused insertions
// or components with undef.
if (SDValue S = simplifyShuffleOperands(SVN, N0, N1, DAG))
return S;
if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
Level < AfterLegalizeVectorOps &&
(N1.getOpcode() == ISD::UNDEF ||
(N1.getOpcode() == ISD::CONCAT_VECTORS &&
N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
SDValue V = partitionShuffleOfConcats(N, DAG);
if (V.getNode())
return V;
}
// Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
// BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) {
SmallVector<SDValue, 8> Ops;
for (int M : SVN->getMask()) {
SDValue Op = DAG.getUNDEF(VT.getScalarType());
if (M >= 0) {
int Idx = M % NumElts;
SDValue &S = (M < (int)NumElts ? N0 : N1);
if (S.getOpcode() == ISD::BUILD_VECTOR && S.hasOneUse()) {
Op = S.getOperand(Idx);
} else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR && S.hasOneUse()) {
if (Idx == 0)
Op = S.getOperand(0);
} else {
// Operand can't be combined - bail out.
break;
}
}
Ops.push_back(Op);
}
if (Ops.size() == VT.getVectorNumElements()) {
// BUILD_VECTOR requires all inputs to be of the same type, find the
// maximum type and extend them all.
EVT SVT = VT.getScalarType();
if (SVT.isInteger())
for (SDValue &Op : Ops)
SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
if (SVT != VT.getScalarType())
for (SDValue &Op : Ops)
Op = TLI.isZExtFree(Op.getValueType(), SVT)
? DAG.getZExtOrTrunc(Op, SDLoc(N), SVT)
: DAG.getSExtOrTrunc(Op, SDLoc(N), SVT);
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Ops);
}
}
// If this shuffle only has a single input that is a bitcasted shuffle,
// attempt to merge the 2 shuffles and suitably bitcast the inputs/output
// back to their original types.
if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
N1.getOpcode() == ISD::UNDEF && Level < AfterLegalizeVectorOps &&
TLI.isTypeLegal(VT)) {
// Peek through the bitcast only if there is one user.
SDValue BC0 = N0;
while (BC0.getOpcode() == ISD::BITCAST) {
if (!BC0.hasOneUse())
break;
BC0 = BC0.getOperand(0);
}
auto ScaleShuffleMask = [](ArrayRef<int> Mask, int Scale) {
if (Scale == 1)
return SmallVector<int, 8>(Mask.begin(), Mask.end());
SmallVector<int, 8> NewMask;
for (int M : Mask)
for (int s = 0; s != Scale; ++s)
NewMask.push_back(M < 0 ? -1 : Scale * M + s);
return NewMask;
};
if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) {
EVT SVT = VT.getScalarType();
EVT InnerVT = BC0->getValueType(0);
EVT InnerSVT = InnerVT.getScalarType();
// Determine which shuffle works with the smaller scalar type.
EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT;
EVT ScaleSVT = ScaleVT.getScalarType();
if (TLI.isTypeLegal(ScaleVT) &&
0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) &&
0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) {
int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits();
int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits();
// Scale the shuffle masks to the smaller scalar type.
ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0);
SmallVector<int, 8> InnerMask =
ScaleShuffleMask(InnerSVN->getMask(), InnerScale);
SmallVector<int, 8> OuterMask =
ScaleShuffleMask(SVN->getMask(), OuterScale);
// Merge the shuffle masks.
SmallVector<int, 8> NewMask;
for (int M : OuterMask)
NewMask.push_back(M < 0 ? -1 : InnerMask[M]);
// Test for shuffle mask legality over both commutations.
SDValue SV0 = BC0->getOperand(0);
SDValue SV1 = BC0->getOperand(1);
bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
if (!LegalMask) {
std::swap(SV0, SV1);
ShuffleVectorSDNode::commuteMask(NewMask);
LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
}
if (LegalMask) {
SV0 = DAG.getNode(ISD::BITCAST, SDLoc(N), ScaleVT, SV0);
SV1 = DAG.getNode(ISD::BITCAST, SDLoc(N), ScaleVT, SV1);
return DAG.getNode(
ISD::BITCAST, SDLoc(N), VT,
DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask));
}
}
}
}
// Canonicalize shuffles according to rules:
// shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
// shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
// shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
if (N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
TLI.isTypeLegal(VT)) {
// The incoming shuffle must be of the same type as the result of the
// current shuffle.
assert(N1->getOperand(0).getValueType() == VT &&
"Shuffle types don't match");
SDValue SV0 = N1->getOperand(0);
SDValue SV1 = N1->getOperand(1);
bool HasSameOp0 = N0 == SV0;
bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF;
if (HasSameOp0 || IsSV1Undef || N0 == SV1)
// Commute the operands of this shuffle so that next rule
// will trigger.
return DAG.getCommutedVectorShuffle(*SVN);
}
// Try to fold according to rules:
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
// Don't try to fold shuffles with illegal type.
// Only fold if this shuffle is the only user of the other shuffle.
if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) &&
Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
// The incoming shuffle must be of the same type as the result of the
// current shuffle.
assert(OtherSV->getOperand(0).getValueType() == VT &&
"Shuffle types don't match");
SDValue SV0, SV1;
SmallVector<int, 4> Mask;
// Compute the combined shuffle mask for a shuffle with SV0 as the first
// operand, and SV1 as the second operand.
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
if (Idx < 0) {
// Propagate Undef.
Mask.push_back(Idx);
continue;
}
SDValue CurrentVec;
if (Idx < (int)NumElts) {
// This shuffle index refers to the inner shuffle N0. Lookup the inner
// shuffle mask to identify which vector is actually referenced.
Idx = OtherSV->getMaskElt(Idx);
if (Idx < 0) {
// Propagate Undef.
Mask.push_back(Idx);
continue;
}
CurrentVec = (Idx < (int) NumElts) ? OtherSV->getOperand(0)
: OtherSV->getOperand(1);
} else {
// This shuffle index references an element within N1.
CurrentVec = N1;
}
// Simple case where 'CurrentVec' is UNDEF.
if (CurrentVec.getOpcode() == ISD::UNDEF) {
Mask.push_back(-1);
continue;
}
// Canonicalize the shuffle index. We don't know yet if CurrentVec
// will be the first or second operand of the combined shuffle.
Idx = Idx % NumElts;
if (!SV0.getNode() || SV0 == CurrentVec) {
// Ok. CurrentVec is the left hand side.
// Update the mask accordingly.
SV0 = CurrentVec;
Mask.push_back(Idx);
continue;
}
// Bail out if we cannot convert the shuffle pair into a single shuffle.
if (SV1.getNode() && SV1 != CurrentVec)
return SDValue();
// Ok. CurrentVec is the right hand side.
// Update the mask accordingly.
SV1 = CurrentVec;
Mask.push_back(Idx + NumElts);
}
// Check if all indices in Mask are Undef. In case, propagate Undef.
bool isUndefMask = true;
for (unsigned i = 0; i != NumElts && isUndefMask; ++i)
isUndefMask &= Mask[i] < 0;
if (isUndefMask)
return DAG.getUNDEF(VT);
if (!SV0.getNode())
SV0 = DAG.getUNDEF(VT);
if (!SV1.getNode())
SV1 = DAG.getUNDEF(VT);
// Avoid introducing shuffles with illegal mask.
if (!TLI.isShuffleMaskLegal(Mask, VT)) {
ShuffleVectorSDNode::commuteMask(Mask);
if (!TLI.isShuffleMaskLegal(Mask, VT))
return SDValue();
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2)
std::swap(SV0, SV1);
}
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
// shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, &Mask[0]);
}
return SDValue();
}
SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
SDValue InVal = N->getOperand(0);
EVT VT = N->getValueType(0);
// Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern
// with a VECTOR_SHUFFLE.
if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
SDValue InVec = InVal->getOperand(0);
SDValue EltNo = InVal->getOperand(1);
// FIXME: We could support implicit truncation if the shuffle can be
// scaled to a smaller vector scalar type.
ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo);
if (C0 && VT == InVec.getValueType() &&
VT.getScalarType() == InVal.getValueType()) {
SmallVector<int, 8> NewMask(VT.getVectorNumElements(), -1);
int Elt = C0->getZExtValue();
NewMask[0] = Elt;
if (TLI.isShuffleMaskLegal(NewMask, VT))
return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT),
NewMask);
}
}
return SDValue();
}
SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N2 = N->getOperand(2);
// If the input vector is a concatenation, and the insert replaces
// one of the halves, we can optimize into a single concat_vectors.
if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
N0->getNumOperands() == 2 && N2.getOpcode() == ISD::Constant) {
APInt InsIdx = cast<ConstantSDNode>(N2)->getAPIntValue();
EVT VT = N->getValueType(0);
// Lower half: fold (insert_subvector (concat_vectors X, Y), Z) ->
// (concat_vectors Z, Y)
if (InsIdx == 0)
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
N->getOperand(1), N0.getOperand(1));
// Upper half: fold (insert_subvector (concat_vectors X, Y), Z) ->
// (concat_vectors X, Z)
if (InsIdx == VT.getVectorNumElements()/2)
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
N0.getOperand(0), N->getOperand(1));
}
return SDValue();
}
SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) {
SDValue N0 = N->getOperand(0);
// fold (fp_to_fp16 (fp16_to_fp op)) -> op
if (N0->getOpcode() == ISD::FP16_TO_FP)
return N0->getOperand(0);
return SDValue();
}
/// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle
/// with the destination vector and a zero vector.
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
/// vector_shuffle V, Zero, <0, 4, 2, 4>
SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDLoc dl(N);
// Make sure we're not running after operation legalization where it
// may have custom lowered the vector shuffles.
if (LegalOperations)
return SDValue();
if (N->getOpcode() != ISD::AND)
return SDValue();
if (RHS.getOpcode() == ISD::BITCAST)
RHS = RHS.getOperand(0);
if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<int, 8> Indices;
unsigned NumElts = RHS.getNumOperands();
for (unsigned i = 0; i != NumElts; ++i) {
SDValue Elt = RHS.getOperand(i);
if (isAllOnesConstant(Elt))
Indices.push_back(i);
else if (isNullConstant(Elt))
Indices.push_back(NumElts+i);
else
return SDValue();
}
// Let's see if the target supports this vector_shuffle.
EVT RVT = RHS.getValueType();
if (!TLI.isVectorClearMaskLegal(Indices, RVT))
return SDValue();
// Return the new VECTOR_SHUFFLE node.
EVT EltVT = RVT.getVectorElementType();
SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(),
DAG.getConstant(0, dl, EltVT));
SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, RVT, ZeroOps);
LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
}
return SDValue();
}
/// Visit a binary vector operation, like ADD.
SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
assert(N->getValueType(0).isVector() &&
"SimplifyVBinOp only works on vectors!");
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (SDValue Shuffle = XformToShuffleWithZero(N))
return Shuffle;
// If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold
// this operation.
if (LHS.getOpcode() == ISD::BUILD_VECTOR &&
RHS.getOpcode() == ISD::BUILD_VECTOR) {
// Check if both vectors are constants. If not bail out.
if (!(cast<BuildVectorSDNode>(LHS)->isConstant() &&
cast<BuildVectorSDNode>(RHS)->isConstant()))
return SDValue();
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
SDValue LHSOp = LHS.getOperand(i);
SDValue RHSOp = RHS.getOperand(i);
// Can't fold divide by zero.
if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV ||
N->getOpcode() == ISD::FDIV) {
if (isNullConstant(RHSOp) || (RHSOp.getOpcode() == ISD::ConstantFP &&
cast<ConstantFPSDNode>(RHSOp.getNode())->isZero()))
break;
}
EVT VT = LHSOp.getValueType();
EVT RVT = RHSOp.getValueType();
if (RVT != VT) {
// Integer BUILD_VECTOR operands may have types larger than the element
// size (e.g., when the element type is not legal). Prior to type
// legalization, the types may not match between the two BUILD_VECTORS.
// Truncate one of the operands to make them match.
if (RVT.getSizeInBits() > VT.getSizeInBits()) {
RHSOp = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, RHSOp);
} else {
LHSOp = DAG.getNode(ISD::TRUNCATE, SDLoc(N), RVT, LHSOp);
VT = RVT;
}
}
SDValue FoldOp = DAG.getNode(N->getOpcode(), SDLoc(LHS), VT,
LHSOp, RHSOp);
if (FoldOp.getOpcode() != ISD::UNDEF &&
FoldOp.getOpcode() != ISD::Constant &&
FoldOp.getOpcode() != ISD::ConstantFP)
break;
Ops.push_back(FoldOp);
AddToWorklist(FoldOp.getNode());
}
if (Ops.size() == LHS.getNumOperands())
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), LHS.getValueType(), Ops);
}
// Type legalization might introduce new shuffles in the DAG.
// Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask)))
// -> (shuffle (VBinOp (A, B)), Undef, Mask).
if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) &&
isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() &&
LHS.getOperand(1).getOpcode() == ISD::UNDEF &&
RHS.getOperand(1).getOpcode() == ISD::UNDEF) {
ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS);
ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS);
if (SVN0->getMask().equals(SVN1->getMask())) {
EVT VT = N->getValueType(0);
SDValue UndefVector = LHS.getOperand(1);
SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
LHS.getOperand(0), RHS.getOperand(0));
AddUsersToWorklist(N);
return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector,
&SVN0->getMask()[0]);
}
}
return SDValue();
}
SDValue DAGCombiner::SimplifySelect(SDLoc DL, SDValue N0,
SDValue N1, SDValue N2){
assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If we got a simplified select_cc node back from SimplifySelectCC, then
// break it down into a new SETCC node, and a new SELECT node, and then return
// the SELECT node, since we were called with a SELECT node.
if (SCC.getNode()) {
// Check to see if we got a select_cc back (to turn into setcc/select).
// Otherwise, just return whatever node we got back, like fabs.
if (SCC.getOpcode() == ISD::SELECT_CC) {
SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0),
N0.getValueType(),
SCC.getOperand(0), SCC.getOperand(1),
SCC.getOperand(4));
AddToWorklist(SETCC.getNode());
return DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC,
SCC.getOperand(2), SCC.getOperand(3));
}
return SCC;
}
return SDValue();
}
/// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values
/// being selected between, see if we can simplify the select. Callers of this
/// should assume that TheSelect is deleted if this returns true. As such, they
/// should return the appropriate thing (e.g. the node) back to the top-level of
/// the DAG combiner loop to avoid it being looked at.
bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
SDValue RHS) {
// fold (select (setcc x, -0.0, *lt), NaN, (fsqrt x))
// The select + setcc is redundant, because fsqrt returns NaN for X < -0.
if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) {
if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) {
// We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?))
SDValue Sqrt = RHS;
ISD::CondCode CC;
SDValue CmpLHS;
const ConstantFPSDNode *NegZero = nullptr;
if (TheSelect->getOpcode() == ISD::SELECT_CC) {
CC = dyn_cast<CondCodeSDNode>(TheSelect->getOperand(4))->get();
CmpLHS = TheSelect->getOperand(0);
NegZero = isConstOrConstSplatFP(TheSelect->getOperand(1));
} else {
// SELECT or VSELECT
SDValue Cmp = TheSelect->getOperand(0);
if (Cmp.getOpcode() == ISD::SETCC) {
CC = dyn_cast<CondCodeSDNode>(Cmp.getOperand(2))->get();
CmpLHS = Cmp.getOperand(0);
NegZero = isConstOrConstSplatFP(Cmp.getOperand(1));
}
}
if (NegZero && NegZero->isNegative() && NegZero->isZero() &&
Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT ||
CC == ISD::SETULT || CC == ISD::SETLT)) {
// We have: (select (setcc x, -0.0, *lt), NaN, (fsqrt x))
CombineTo(TheSelect, Sqrt);
return true;
}
}
}
// Cannot simplify select with vector condition
if (TheSelect->getOperand(0).getValueType().isVector()) return false;
// If this is a select from two identical things, try to pull the operation
// through the select.
if (LHS.getOpcode() != RHS.getOpcode() ||
!LHS.hasOneUse() || !RHS.hasOneUse())
return false;
// If this is a load and the token chain is identical, replace the select
// of two loads with a load through a select of the address to load from.
// This triggers in things like "select bool X, 10.0, 123.0" after the FP
// constants have been dropped into the constant pool.
if (LHS.getOpcode() == ISD::LOAD) {
LoadSDNode *LLD = cast<LoadSDNode>(LHS);
LoadSDNode *RLD = cast<LoadSDNode>(RHS);
// Token chains must be identical.
if (LHS.getOperand(0) != RHS.getOperand(0) ||
// Do not let this transformation reduce the number of volatile loads.
LLD->isVolatile() || RLD->isVolatile() ||
// FIXME: If either is a pre/post inc/dec load,
// we'd need to split out the address adjustment.
LLD->isIndexed() || RLD->isIndexed() ||
// If this is an EXTLOAD, the VT's must match.
LLD->getMemoryVT() != RLD->getMemoryVT() ||
// If this is an EXTLOAD, the kind of extension must match.
(LLD->getExtensionType() != RLD->getExtensionType() &&
// The only exception is if one of the extensions is anyext.
LLD->getExtensionType() != ISD::EXTLOAD &&
RLD->getExtensionType() != ISD::EXTLOAD) ||
// FIXME: this discards src value information. This is
// over-conservative. It would be beneficial to be able to remember
// both potential memory locations. Since we are discarding
// src value info, don't do the transformation if the memory
// locations are not in the default address space.
LLD->getPointerInfo().getAddrSpace() != 0 ||
RLD->getPointerInfo().getAddrSpace() != 0 ||
!TLI.isOperationLegalOrCustom(TheSelect->getOpcode(),
LLD->getBasePtr().getValueType()))
return false;
// Check that the select condition doesn't reach either load. If so,
// folding this will induce a cycle into the DAG. If not, this is safe to
// xform, so create a select of the addresses.
SDValue Addr;
if (TheSelect->getOpcode() == ISD::SELECT) {
SDNode *CondNode = TheSelect->getOperand(0).getNode();
if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) ||
(RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode)))
return false;
// The loads must not depend on one another.
if (LLD->isPredecessorOf(RLD) ||
RLD->isPredecessorOf(LLD))
return false;
Addr = DAG.getSelect(SDLoc(TheSelect),
LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0), LLD->getBasePtr(),
RLD->getBasePtr());
} else { // Otherwise SELECT_CC
SDNode *CondLHS = TheSelect->getOperand(0).getNode();
SDNode *CondRHS = TheSelect->getOperand(1).getNode();
if ((LLD->hasAnyUseOfValue(1) &&
(LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) ||
(RLD->hasAnyUseOfValue(1) &&
(RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS))))
return false;
Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect),
LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0),
TheSelect->getOperand(1),
LLD->getBasePtr(), RLD->getBasePtr(),
TheSelect->getOperand(4));
}
SDValue Load;
// It is safe to replace the two loads if they have different alignments,
// but the new load must be the minimum (most restrictive) alignment of the
// inputs.
bool isInvariant = LLD->isInvariant() & RLD->isInvariant();
unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment());
if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
Load = DAG.getLoad(TheSelect->getValueType(0),
SDLoc(TheSelect),
// FIXME: Discards pointer and AA info.
LLD->getChain(), Addr, MachinePointerInfo(),
LLD->isVolatile(), LLD->isNonTemporal(),
isInvariant, Alignment);
} else {
Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ?
RLD->getExtensionType() : LLD->getExtensionType(),
SDLoc(TheSelect),
TheSelect->getValueType(0),
// FIXME: Discards pointer and AA info.
LLD->getChain(), Addr, MachinePointerInfo(),
LLD->getMemoryVT(), LLD->isVolatile(),
LLD->isNonTemporal(), isInvariant, Alignment);
}
// Users of the select now use the result of the load.
CombineTo(TheSelect, Load);
// Users of the old loads now use the new load's chain. We know the
// old-load value is dead now.
CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
return true;
}
return false;
}
/// Simplify an expression of the form (N0 cond N1) ? N2 : N3
/// where 'cond' is the comparison specified by CC.
SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3,
ISD::CondCode CC, bool NotExtCompare) {
// (x ? y : y) -> y.
if (N2 == N3) return N2;
EVT VT = N2.getValueType();
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
// Determine if the condition we're dealing with is constant
SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
N0, N1, CC, DL, false);
if (SCC.getNode()) AddToWorklist(SCC.getNode());
if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) {
// fold select_cc true, x, y -> x
// fold select_cc false, x, y -> y
return !SCCC->isNullValue() ? N2 : N3;
}
// Check to see if we can simplify the select into an fabs node
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) {
// Allow either -0.0 or 0.0
if (CFP->isZero()) {
// select (setg[te] X, +/-0.0), X, fneg(X) -> fabs
if ((CC == ISD::SETGE || CC == ISD::SETGT) &&
N0 == N2 && N3.getOpcode() == ISD::FNEG &&
N2 == N3.getOperand(0))
return DAG.getNode(ISD::FABS, DL, VT, N0);
// select (setl[te] X, +/-0.0), fneg(X), X -> fabs
if ((CC == ISD::SETLT || CC == ISD::SETLE) &&
N0 == N3 && N2.getOpcode() == ISD::FNEG &&
N2.getOperand(0) == N3)
return DAG.getNode(ISD::FABS, DL, VT, N3);
}
}
// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
// in it. This is a win when the constant is not otherwise available because
// it replaces two constant pool loads with one. We only do this if the FP
// type is known to be legal, because if it isn't, then we are before legalize
// types an we want the other legalization to happen first (e.g. to avoid
// messing with soft float) and if the ConstantFP is not legal, because if
// it is legal, we may not need to store the FP constant in a constant pool.
if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2))
if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) {
if (TLI.isTypeLegal(N2.getValueType()) &&
(TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) !=
TargetLowering::Legal &&
!TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) &&
!TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) &&
// If both constants have multiple uses, then we won't need to do an
// extra load, they are likely around in registers for other users.
(TV->hasOneUse() || FV->hasOneUse())) {
Constant *Elts[] = {
const_cast<ConstantFP*>(FV->getConstantFPValue()),
const_cast<ConstantFP*>(TV->getConstantFPValue())
};
Type *FPTy = Elts[0]->getType();
const DataLayout &TD = DAG.getDataLayout();
// Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
SDValue CPIdx =
DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()),
TD.getPrefTypeAlignment(FPTy));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
// Get the offsets to the 0 and 1 element of the array so that we can
// select between them.
SDValue Zero = DAG.getIntPtrConstant(0, DL);
unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV));
SDValue Cond = DAG.getSetCC(DL,
getSetCCResultType(N0.getValueType()),
N0, N1, CC);
AddToWorklist(Cond.getNode());
SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(),
Cond, One, Zero);
AddToWorklist(CstOffset.getNode());
CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx,
CstOffset);
AddToWorklist(CPIdx.getNode());
return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(), false,
false, false, Alignment);
}
}
// Check to see if we can perform the "gzip trick", transforming
// (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
if (isNullConstant(N3) && CC == ISD::SETLT &&
(isNullConstant(N1) || // (a < 0) ? b : 0
(isOneConstant(N1) && N0 == N2))) { // (a < 1) ? a : 0
EVT XType = N0.getValueType();
EVT AType = N2.getValueType();
if (XType.bitsGE(AType)) {
// and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
// single-bit constant.
if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) {
unsigned ShCtV = N2C->getAPIntValue().logBase2();
ShCtV = XType.getSizeInBits() - ShCtV - 1;
SDValue ShCt = DAG.getConstant(ShCtV, SDLoc(N0),
getShiftAmountTy(N0.getValueType()));
SDValue Shift = DAG.getNode(ISD::SRL, SDLoc(N0),
XType, N0, ShCt);
AddToWorklist(Shift.getNode());
if (XType.bitsGT(AType)) {
Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
AddToWorklist(Shift.getNode());
}
return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
}
SDValue Shift = DAG.getNode(ISD::SRA, SDLoc(N0),
XType, N0,
DAG.getConstant(XType.getSizeInBits() - 1,
SDLoc(N0),
getShiftAmountTy(N0.getValueType())));
AddToWorklist(Shift.getNode());
if (XType.bitsGT(AType)) {
Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
AddToWorklist(Shift.getNode());
}
return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
}
}
// fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
// where y is has a single bit set.
// A plaintext description would be, we can turn the SELECT_CC into an AND
// when the condition can be materialized as an all-ones register. Any
// single bit-test can be materialized as an all-ones register with
// shift-left and shift-right-arith.
if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) {
SDValue AndLHS = N0->getOperand(0);
ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
// Shift the tested bit over the sign bit.
APInt AndMask = ConstAndRHS->getAPIntValue();
SDValue ShlAmt =
DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS),
getShiftAmountTy(AndLHS.getValueType()));
SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt);
// Now arithmetic right shift it all the way over, so the result is either
// all-ones, or zero.
SDValue ShrAmt =
DAG.getConstant(AndMask.getBitWidth() - 1, SDLoc(Shl),
getShiftAmountTy(Shl.getValueType()));
SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt);
return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
}
}
// fold select C, 16, 0 -> shl C, 4
if (N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2() &&
TLI.getBooleanContents(N0.getValueType()) ==
TargetLowering::ZeroOrOneBooleanContent) {
// If the caller doesn't want us to simplify this into a zext of a compare,
// don't do it.
if (NotExtCompare && N2C->isOne())
return SDValue();
// Get a SetCC of the condition
// NOTE: Don't create a SETCC if it's not legal on this target.
if (!LegalOperations ||
TLI.isOperationLegal(ISD::SETCC,
LegalTypes ? getSetCCResultType(N0.getValueType()) : MVT::i1)) {
SDValue Temp, SCC;
// cast from setcc result type to select result type
if (LegalTypes) {
SCC = DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()),
N0, N1, CC);
if (N2.getValueType().bitsLT(SCC.getValueType()))
Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2),
N2.getValueType());
else
Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2),
N2.getValueType(), SCC);
} else {
SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC);
Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2),
N2.getValueType(), SCC);
}
AddToWorklist(SCC.getNode());
AddToWorklist(Temp.getNode());
if (N2C->isOne())
return Temp;
// shl setcc result by log2 n2c
return DAG.getNode(
ISD::SHL, DL, N2.getValueType(), Temp,
DAG.getConstant(N2C->getAPIntValue().logBase2(), SDLoc(Temp),
getShiftAmountTy(Temp.getValueType())));
}
}
// Check to see if this is the equivalent of setcc
// FIXME: Turn all of these into setcc if setcc if setcc is legal
// otherwise, go ahead with the folds.
if (0 && isNullConstant(N3) && isOneConstant(N2)) {
EVT XType = N0.getValueType();
if (!LegalOperations ||
TLI.isOperationLegal(ISD::SETCC, getSetCCResultType(XType))) {
SDValue Res = DAG.getSetCC(DL, getSetCCResultType(XType), N0, N1, CC);
if (Res.getValueType() != VT)
Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res);
return Res;
}
// fold (seteq X, 0) -> (srl (ctlz X, log2(size(X))))
if (isNullConstant(N1) && CC == ISD::SETEQ &&
(!LegalOperations ||
TLI.isOperationLegal(ISD::CTLZ, XType))) {
SDValue Ctlz = DAG.getNode(ISD::CTLZ, SDLoc(N0), XType, N0);
return DAG.getNode(ISD::SRL, DL, XType, Ctlz,
DAG.getConstant(Log2_32(XType.getSizeInBits()),
SDLoc(Ctlz),
getShiftAmountTy(Ctlz.getValueType())));
}
// fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1))
if (isNullConstant(N1) && CC == ISD::SETGT) {
SDLoc DL(N0);
SDValue NegN0 = DAG.getNode(ISD::SUB, DL,
XType, DAG.getConstant(0, DL, XType), N0);
SDValue NotN0 = DAG.getNOT(DL, N0, XType);
return DAG.getNode(ISD::SRL, DL, XType,
DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0),
DAG.getConstant(XType.getSizeInBits() - 1, DL,
getShiftAmountTy(XType)));
}
// fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1))
if (isAllOnesConstant(N1) && CC == ISD::SETGT) {
SDLoc DL(N0);
SDValue Sign = DAG.getNode(ISD::SRL, DL, XType, N0,
DAG.getConstant(XType.getSizeInBits() - 1, DL,
getShiftAmountTy(N0.getValueType())));
return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, DL,
XType));
}
}
// Check to see if this is an integer abs.
// select_cc setg[te] X, 0, X, -X ->
// select_cc setgt X, -1, X, -X ->
// select_cc setl[te] X, 0, -X, X ->
// select_cc setlt X, 1, -X, X ->
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
if (N1C) {
ConstantSDNode *SubC = nullptr;
if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
(N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0));
else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) ||
(N1C->isOne() && CC == ISD::SETLT)) &&
N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1))
SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0));
EVT XType = N0.getValueType();
if (SubC && SubC->isNullValue() && XType.isInteger()) {
SDLoc DL(N0);
SDValue Shift = DAG.getNode(ISD::SRA, DL, XType,
N0,
DAG.getConstant(XType.getSizeInBits() - 1, DL,
getShiftAmountTy(N0.getValueType())));
SDValue Add = DAG.getNode(ISD::ADD, DL,
XType, N0, Shift);
AddToWorklist(Shift.getNode());
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
}
}
return SDValue();
}
/// This is a stub for TargetLowering::SimplifySetCC.
SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
SDValue N1, ISD::CondCode Cond,
SDLoc DL, bool foldBooleans) {
TargetLowering::DAGCombinerInfo
DagCombineInfo(DAG, Level, false, this);
return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
}
/// Given an ISD::SDIV node expressing a divide by constant, return
/// a DAG expression to select that will generate the same value by multiplying
/// by a magic number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue DAGCombiner::BuildSDIV(SDNode *N) {
ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
if (!C)
return SDValue();
// Avoid division by zero.
if (C->isNullValue())
return SDValue();
std::vector<SDNode*> Built;
SDValue S =
TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built);
for (SDNode *N : Built)
AddToWorklist(N);
return S;
}
/// Given an ISD::SDIV node expressing a divide by constant power of 2, return a
/// DAG expression that will generate the same value by right shifting.
SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) {
ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
if (!C)
return SDValue();
// Avoid division by zero.
if (C->isNullValue())
return SDValue();
std::vector<SDNode *> Built;
SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, &Built);
for (SDNode *N : Built)
AddToWorklist(N);
return S;
}
/// Given an ISD::UDIV node expressing a divide by constant, return a DAG
/// expression that will generate the same value by multiplying by a magic
/// number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue DAGCombiner::BuildUDIV(SDNode *N) {
ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
if (!C)
return SDValue();
// Avoid division by zero.
if (C->isNullValue())
return SDValue();
std::vector<SDNode*> Built;
SDValue S =
TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built);
for (SDNode *N : Built)
AddToWorklist(N);
return S;
}
SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op) {
if (Level >= AfterLegalizeDAG)
return SDValue();
// Expose the DAG combiner to the target combiner implementations.
TargetLowering::DAGCombinerInfo DCI(DAG, Level, false, this);
unsigned Iterations = 0;
if (SDValue Est = TLI.getRecipEstimate(Op, DCI, Iterations)) {
if (Iterations) {
// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
// For the reciprocal, we need to find the zero of the function:
// F(X) = A X - 1 [which has a zero at X = 1/A]
// =>
// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
// does not require additional intermediate precision]
EVT VT = Op.getValueType();
SDLoc DL(Op);
SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
AddToWorklist(Est.getNode());
// Newton iterations: Est = Est + Est (1 - Arg * Est)
for (unsigned i = 0; i < Iterations; ++i) {
SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, Est);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPOne, NewEst);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst);
AddToWorklist(NewEst.getNode());
Est = DAG.getNode(ISD::FADD, DL, VT, Est, NewEst);
AddToWorklist(Est.getNode());
}
}
return Est;
}
return SDValue();
}
/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal sqrt, we need to find the zero of the function:
/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
/// =>
/// X_{i+1} = X_i (1.5 - A X_i^2 / 2)
/// As a result, we precompute A/2 prior to the iteration loop.
SDValue DAGCombiner::BuildRsqrtNROneConst(SDValue Arg, SDValue Est,
unsigned Iterations) {
EVT VT = Arg.getValueType();
SDLoc DL(Arg);
SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT);
// We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that
// this entire sequence requires only one FP constant.
SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg);
AddToWorklist(HalfArg.getNode());
HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg);
AddToWorklist(HalfArg.getNode());
// Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est)
for (unsigned i = 0; i < Iterations; ++i) {
SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst);
AddToWorklist(NewEst.getNode());
NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst);
AddToWorklist(NewEst.getNode());
Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst);
AddToWorklist(Est.getNode());
}
return Est;
}
/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal sqrt, we need to find the zero of the function:
/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
/// =>
/// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0))
SDValue DAGCombiner::BuildRsqrtNRTwoConst(SDValue Arg, SDValue Est,
unsigned Iterations) {
EVT VT = Arg.getValueType();
SDLoc DL(Arg);
SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT);
SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT);
// Newton iterations: Est = -0.5 * Est * (-3.0 + Arg * Est * Est)
for (unsigned i = 0; i < Iterations; ++i) {
SDValue HalfEst = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf);
AddToWorklist(HalfEst.getNode());
Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Est);
AddToWorklist(Est.getNode());
Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg);
AddToWorklist(Est.getNode());
Est = DAG.getNode(ISD::FADD, DL, VT, Est, MinusThree);
AddToWorklist(Est.getNode());
Est = DAG.getNode(ISD::FMUL, DL, VT, Est, HalfEst);
AddToWorklist(Est.getNode());
}
return Est;
}
SDValue DAGCombiner::BuildRsqrtEstimate(SDValue Op) {
if (Level >= AfterLegalizeDAG)
return SDValue();
// Expose the DAG combiner to the target combiner implementations.
TargetLowering::DAGCombinerInfo DCI(DAG, Level, false, this);
unsigned Iterations = 0;
bool UseOneConstNR = false;
if (SDValue Est = TLI.getRsqrtEstimate(Op, DCI, Iterations, UseOneConstNR)) {
AddToWorklist(Est.getNode());
if (Iterations) {
Est = UseOneConstNR ?
BuildRsqrtNROneConst(Op, Est, Iterations) :
BuildRsqrtNRTwoConst(Op, Est, Iterations);
}
return Est;
}
return SDValue();
}
/// Return true if base is a frame index, which is known not to alias with
/// anything but itself. Provides base object and offset as results.
static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
const GlobalValue *&GV, const void *&CV) {
// Assume it is a primitive operation.
Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr;
// If it's an adding a simple constant then integrate the offset.
if (Base.getOpcode() == ISD::ADD) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) {
Base = Base.getOperand(0);
Offset += C->getZExtValue();
}
}
// Return the underlying GlobalValue, and update the Offset. Return false
// for GlobalAddressSDNode since the same GlobalAddress may be represented
// by multiple nodes with different offsets.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) {
GV = G->getGlobal();
Offset += G->getOffset();
return false;
}
// Return the underlying Constant value, and update the Offset. Return false
// for ConstantSDNodes since the same constant pool entry may be represented
// by multiple nodes with different offsets.
if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) {
CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal()
: (const void *)C->getConstVal();
Offset += C->getOffset();
return false;
}
// If it's any of the following then it can't alias with anything but itself.
return isa<FrameIndexSDNode>(Base);
}
/// Return true if there is any possibility that the two addresses overlap.
bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
// If they are the same then they must be aliases.
if (Op0->getBasePtr() == Op1->getBasePtr()) return true;
// If they are both volatile then they cannot be reordered.
if (Op0->isVolatile() && Op1->isVolatile()) return true;
// If one operation reads from invariant memory, and the other may store, they
// cannot alias. These should really be checking the equivalent of mayWrite,
// but it only matters for memory nodes other than load /store.
if (Op0->isInvariant() && Op1->writeMem())
return false;
if (Op1->isInvariant() && Op0->writeMem())
return false;
// Gather base node and offset information.
SDValue Base1, Base2;
int64_t Offset1, Offset2;
const GlobalValue *GV1, *GV2;
const void *CV1, *CV2;
bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(),
Base1, Offset1, GV1, CV1);
bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(),
Base2, Offset2, GV2, CV2);
// If they have a same base address then check to see if they overlap.
if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
(Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
// It is possible for different frame indices to alias each other, mostly
// when tail call optimization reuses return address slots for arguments.
// To catch this case, look up the actual index of frame indices to compute
// the real alias relationship.
if (isFrameIndex1 && isFrameIndex2) {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex());
Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex());
return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
(Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
}
// Otherwise, if we know what the bases are, and they aren't identical, then
// we know they cannot alias.
if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
return false;
// If we know required SrcValue1 and SrcValue2 have relatively large alignment
// compared to the size and offset of the access, we may be able to prove they
// do not alias. This check is conservative for now to catch cases created by
// splitting vector types.
if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) &&
(Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) &&
(Op0->getMemoryVT().getSizeInBits() >> 3 ==
Op1->getMemoryVT().getSizeInBits() >> 3) &&
(Op0->getOriginalAlignment() > Op0->getMemoryVT().getSizeInBits()) >> 3) {
int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment();
int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment();
// There is no overlap between these relatively aligned accesses of similar
// size, return no alias.
if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 ||
(OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1)
return false;
}
bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0
? CombinerGlobalAA
: DAG.getSubtarget().useAA();
#ifndef NDEBUG
if (CombinerAAOnlyFunc.getNumOccurrences() &&
CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
UseAA = false;
#endif
if (UseAA &&
Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) {
// Use alias analysis information.
int64_t MinOffset = std::min(Op0->getSrcValueOffset(),
Op1->getSrcValueOffset());
int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) +
Op0->getSrcValueOffset() - MinOffset;
int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) +
Op1->getSrcValueOffset() - MinOffset;
AliasResult AAResult =
AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1,
UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2,
UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
if (AAResult == NoAlias)
return false;
}
// Otherwise we have to assume they alias.
return true;
}
/// Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
SmallVectorImpl<SDValue> &Aliases) {
SmallVector<SDValue, 8> Chains; // List of chains to visit.
SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
// Get alias information for node.
bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile();
// Starting off.
Chains.push_back(OriginalChain);
unsigned Depth = 0;
// Look at each chain and determine if it is an alias. If so, add it to the
// aliases list. If not, then continue up the chain looking for the next
// candidate.
while (!Chains.empty()) {
SDValue Chain = Chains.pop_back_val();
// For TokenFactor nodes, look at each operand and only continue up the
// chain until we find two aliases. If we've seen two aliases, assume we'll
// find more and revert to original chain since the xform is unlikely to be
// profitable.
//
// FIXME: The depth check could be made to return the last non-aliasing
// chain we found before we hit a tokenfactor rather than the original
// chain.
if (Depth > 6 || Aliases.size() == 2) {
Aliases.clear();
Aliases.push_back(OriginalChain);
return;
}
// Don't bother if we've been before.
if (!Visited.insert(Chain.getNode()).second)
continue;
switch (Chain.getOpcode()) {
case ISD::EntryToken:
// Entry token is ideal chain operand, but handled in FindBetterChain.
break;
case ISD::LOAD:
case ISD::STORE: {
// Get alias information for Chain.
bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) &&
!cast<LSBaseSDNode>(Chain.getNode())->isVolatile();
// If chain is alias then stop here.
if (!(IsLoad && IsOpLoad) &&
isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) {
Aliases.push_back(Chain);
} else {
// Look further up the chain.
Chains.push_back(Chain.getOperand(0));
++Depth;
}
break;
}
case ISD::TokenFactor:
// We have to check each of the operands of the token factor for "small"
// token factors, so we queue them up. Adding the operands to the queue
// (stack) in reverse order maintains the original order and increases the
// likelihood that getNode will find a matching token factor (CSE.)
if (Chain.getNumOperands() > 16) {
Aliases.push_back(Chain);
break;
}
for (unsigned n = Chain.getNumOperands(); n;)
Chains.push_back(Chain.getOperand(--n));
++Depth;
break;
default:
// For all other instructions we will just have to take what we can get.
Aliases.push_back(Chain);
break;
}
}
// We need to be careful here to also search for aliases through the
// value operand of a store, etc. Consider the following situation:
// Token1 = ...
// L1 = load Token1, %52
// S1 = store Token1, L1, %51
// L2 = load Token1, %52+8
// S2 = store Token1, L2, %51+8
// Token2 = Token(S1, S2)
// L3 = load Token2, %53
// S3 = store Token2, L3, %52
// L4 = load Token2, %53+8
// S4 = store Token2, L4, %52+8
// If we search for aliases of S3 (which loads address %52), and we look
// only through the chain, then we'll miss the trivial dependence on L1
// (which also loads from %52). We then might change all loads and
// stores to use Token1 as their chain operand, which could result in
// copying %53 into %52 before copying %52 into %51 (which should
// happen first).
//
// The problem is, however, that searching for such data dependencies
// can become expensive, and the cost is not directly related to the
// chain depth. Instead, we'll rule out such configurations here by
// insisting that we've visited all chain users (except for users
// of the original chain, which is not necessary). When doing this,
// we need to look through nodes we don't care about (otherwise, things
// like register copies will interfere with trivial cases).
SmallVector<const SDNode *, 16> Worklist;
for (const SDNode *N : Visited)
if (N != OriginalChain.getNode())
Worklist.push_back(N);
while (!Worklist.empty()) {
const SDNode *M = Worklist.pop_back_val();
// We have already visited M, and want to make sure we've visited any uses
// of M that we care about. For uses that we've not visisted, and don't
// care about, queue them to the worklist.
for (SDNode::use_iterator UI = M->use_begin(),
UIE = M->use_end(); UI != UIE; ++UI)
if (UI.getUse().getValueType() == MVT::Other &&
Visited.insert(*UI).second) {
if (isa<MemSDNode>(*UI)) {
// We've not visited this use, and we care about it (it could have an
// ordering dependency with the original node).
Aliases.clear();
Aliases.push_back(OriginalChain);
return;
}
// We've not visited this use, but we don't care about it. Mark it as
// visited and enqueue it to the worklist.
Worklist.push_back(*UI);
}
}
}
/// Walk up chain skipping non-aliasing memory nodes, looking for a better chain
/// (aliasing node.)
SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor.
// Accumulate all the aliases to this node.
GatherAllAliases(N, OldChain, Aliases);
// If no operands then chain to entry token.
if (Aliases.size() == 0)
return DAG.getEntryNode();
// If a single operand then chain to it. We don't need to revisit it.
if (Aliases.size() == 1)
return Aliases[0];
// Construct a custom tailored token factor.
return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases);
}
/// This is the entry point for the file.
void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA,
CodeGenOpt::Level OptLevel) {
/// This is the main entry point to this class.
DAGCombiner(*this, AA, OptLevel).Run(Level);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/FastISel.cpp | //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the FastISel class.
//
// "Fast" instruction selection is designed to emit very poor code quickly.
// Also, it is not designed to be able to do much lowering, so most illegal
// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
// also not intended to be able to do much optimization, except in a few cases
// where doing optimizations reduces overall compile time. For example, folding
// constants into immediate fields is often done, because it's cheap and it
// reduces the number of instructions later phases have to examine.
//
// "Fast" instruction selection is able to fail gracefully and transfer
// control to the SelectionDAG selector for operations that it doesn't
// support. In many cases, this allows us to avoid duplicating a lot of
// the complicated lowering logic that SelectionDAG currently has.
//
// The intended use for "fast" instruction selection is "-O0" mode
// compilation, where the quality of the generated code is irrelevant when
// weighed against the speed at which the code can be generated. Also,
// at -O0, the LLVM optimizers are not running, and this makes the
// compile time of codegen a much higher portion of the overall compile
// time. Despite its limitations, "fast" instruction selection is able to
// handle enough code on its own to provide noticeable overall speedups
// in -O0 compiles.
//
// Basic operations are supported in a target-independent way, by reading
// the same instruction descriptions that the SelectionDAG selector reads,
// and identifying simple arithmetic operations that can be directly selected
// from simple operators. More complicated operations currently require
// target-specific code.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Analysis.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "isel"
STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
"target-independent selector");
STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
"target-specific selector");
STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
unsigned AttrIdx) {
IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
Alignment = CS->getParamAlignment(AttrIdx);
}
/// Set the current block to which generated machine instructions will be
/// appended, and clear the local CSE map.
void FastISel::startNewBlock() {
LocalValueMap.clear();
// Instructions are appended to FuncInfo.MBB. If the basic block already
// contains labels or copies, use the last instruction as the last local
// value.
EmitStartPt = nullptr;
if (!FuncInfo.MBB->empty())
EmitStartPt = &FuncInfo.MBB->back();
LastLocalValue = EmitStartPt;
}
bool FastISel::lowerArguments() {
if (!FuncInfo.CanLowerReturn)
// Fallback to SDISel argument lowering code to deal with sret pointer
// parameter.
return false;
if (!fastLowerArguments())
return false;
// Enter arguments into ValueMap for uses in non-entry BBs.
for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
E = FuncInfo.Fn->arg_end();
I != E; ++I) {
DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
assert(VI != LocalValueMap.end() && "Missed an argument?");
FuncInfo.ValueMap[I] = VI->second;
}
return true;
}
void FastISel::flushLocalValueMap() {
LocalValueMap.clear();
LastLocalValue = EmitStartPt;
recomputeInsertPt();
SavedInsertPt = FuncInfo.InsertPt;
}
bool FastISel::hasTrivialKill(const Value *V) {
// Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V);
if (!I)
return false;
// No-op casts are trivially coalesced by fast-isel.
if (const auto *Cast = dyn_cast<CastInst>(I))
if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
!hasTrivialKill(Cast->getOperand(0)))
return false;
// Even the value might have only one use in the LLVM IR, it is possible that
// FastISel might fold the use into another instruction and now there is more
// than one use at the Machine Instruction level.
unsigned Reg = lookUpRegForValue(V);
if (Reg && !MRI.use_empty(Reg))
return false;
// GEPs with all zero indices are trivially coalesced by fast-isel.
if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
return false;
// Only instructions with a single use in the same basic block are considered
// to have trivial kills.
return I->hasOneUse() &&
!(I->getOpcode() == Instruction::BitCast ||
I->getOpcode() == Instruction::PtrToInt ||
I->getOpcode() == Instruction::IntToPtr) &&
cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
}
unsigned FastISel::getRegForValue(const Value *V) {
EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel.
if (!RealVT.isSimple())
return 0;
// Ignore illegal types. We must do this before looking up the value
// in ValueMap because Arguments are given virtual registers regardless
// of whether FastISel can handle them.
MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT)) {
// Handle integer promotions, though, because they're common and easy.
if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
else
return 0;
}
// Look up the value to see if we already have a register for it.
unsigned Reg = lookUpRegForValue(V);
if (Reg)
return Reg;
// In bottom-up mode, just create the virtual register which will be used
// to hold the value. It will be materialized later.
if (isa<Instruction>(V) &&
(!isa<AllocaInst>(V) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
return FuncInfo.InitializeRegForValue(V);
SavePoint SaveInsertPt = enterLocalValueArea();
// Materialize the value in a register. Emit any instructions in the
// local value area.
Reg = materializeRegForValue(V, VT);
leaveLocalValueArea(SaveInsertPt);
return Reg;
}
unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
unsigned Reg = 0;
if (const auto *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() <= 64)
Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<AllocaInst>(V))
Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
else if (isa<ConstantPointerNull>(V))
// Translate this as an integer zero so that it can be
// local-CSE'd with actual integer zeros.
Reg = getRegForValue(
Constant::getNullValue(DL.getIntPtrType(V->getContext())));
else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
if (CF->isNullValue())
Reg = fastMaterializeFloatZero(CF);
else
// Try to emit the constant directly.
Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
if (!Reg) {
// Try to emit the constant by using an integer constant with a cast.
const APFloat &Flt = CF->getValueAPF();
EVT IntVT = TLI.getPointerTy(DL);
uint64_t x[2];
uint32_t IntBitWidth = IntVT.getSizeInBits();
bool isExact;
(void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
APFloat::rmTowardZero, &isExact);
if (isExact) {
APInt IntVal(IntBitWidth, x);
unsigned IntegerReg =
getRegForValue(ConstantInt::get(V->getContext(), IntVal));
if (IntegerReg != 0)
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
/*Kill=*/false);
}
}
} else if (const auto *Op = dyn_cast<Operator>(V)) {
if (!selectOperator(Op, Op->getOpcode()))
if (!isa<Instruction>(Op) ||
!fastSelectInstruction(cast<Instruction>(Op)))
return 0;
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
return Reg;
}
/// Helper for getRegForValue. This function is called when the value isn't
/// already available in a register and must be materialized with new
/// instructions.
unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
unsigned Reg = 0;
// Give the target-specific code a try first.
if (isa<Constant>(V))
Reg = fastMaterializeConstant(cast<Constant>(V));
// If target-specific code couldn't or didn't want to handle the value, then
// give target-independent code a try.
if (!Reg)
Reg = materializeConstant(V, VT);
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
if (Reg) {
LocalValueMap[V] = Reg;
LastLocalValue = MRI.getVRegDef(Reg);
}
return Reg;
}
unsigned FastISel::lookUpRegForValue(const Value *V) {
// Look up the value to see if we already have a register for it. We
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
// def-dominates-use requirement enforced.
DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
if (I != FuncInfo.ValueMap.end())
return I->second;
return LocalValueMap[V];
}
void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
if (!isa<Instruction>(I)) {
LocalValueMap[I] = Reg;
return;
}
unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
// Use the new register.
AssignedReg = Reg;
else if (Reg != AssignedReg) {
// Arrange for uses of AssignedReg to be replaced by uses of Reg.
for (unsigned i = 0; i < NumRegs; i++)
FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
AssignedReg = Reg;
}
}
std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
unsigned IdxN = getRegForValue(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
return std::pair<unsigned, bool>(0, false);
bool IdxNIsKill = hasTrivialKill(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend it.
MVT PtrVT = TLI.getPointerTy(DL);
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT)) {
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
IdxNIsKill);
IdxNIsKill = true;
} else if (IdxVT.bitsGT(PtrVT)) {
IdxN =
fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
IdxNIsKill = true;
}
return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
}
void FastISel::recomputeInsertPt() {
if (getLastLocalValue()) {
FuncInfo.InsertPt = getLastLocalValue();
FuncInfo.MBB = FuncInfo.InsertPt->getParent();
++FuncInfo.InsertPt;
} else
FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
// Now skip past any EH_LABELs, which must remain at the beginning.
while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
++FuncInfo.InsertPt;
}
void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E) {
assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!");
while (I != E) {
MachineInstr *Dead = &*I;
++I;
Dead->eraseFromParent();
++NumFastIselDead;
}
recomputeInsertPt();
}
FastISel::SavePoint FastISel::enterLocalValueArea() {
MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
DebugLoc OldDL = DbgLoc;
recomputeInsertPt();
DbgLoc = DebugLoc();
SavePoint SP = {OldInsertPt, OldDL};
return SP;
}
void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
LastLocalValue = std::prev(FuncInfo.InsertPt);
// Restore the previous insert position.
FuncInfo.InsertPt = OldInsertPt.InsertPt;
DbgLoc = OldInsertPt.DL;
}
bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
// We only handle legal types. For example, on x86-32 the instruction
// selector contains all of the 64-bit instructions from x86-64,
// under the assumption that i64 won't be used if the target doesn't
// support it.
if (!TLI.isTypeLegal(VT)) {
// MVT::i1 is special. Allow AND, OR, or XOR because they
// don't require additional zeroing, which makes them easy.
if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
ISDOpcode == ISD::XOR))
VT = TLI.getTypeToTransformTo(I->getContext(), VT);
else
return false;
}
// Check if the first operand is a constant, and handle it as "ri". At -O0,
// we don't have anything that canonicalizes operand order.
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
unsigned Op1 = getRegForValue(I->getOperand(1));
if (!Op1)
return false;
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
unsigned ResultReg =
fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
CI->getZExtValue(), VT.getSimpleVT());
if (!ResultReg)
return false;
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, ResultReg);
return true;
}
unsigned Op0 = getRegForValue(I->getOperand(0));
if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// Check if the second operand is a constant and handle it appropriately.
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint64_t Imm = CI->getSExtValue();
// Transform "sdiv exact X, 8" -> "sra X, 3".
if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
Imm = Log2_64(Imm);
ISDOpcode = ISD::SRA;
}
// Transform "urem x, pow2" -> "and x, pow2-1".
if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
isPowerOf2_64(Imm)) {
--Imm;
ISDOpcode = ISD::AND;
}
unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
Op0IsKill, Imm, VT.getSimpleVT());
if (!ResultReg)
return false;
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, ResultReg);
return true;
}
// Check if the second operand is a constant float.
if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
ISDOpcode, Op0, Op0IsKill, CF);
if (ResultReg) {
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, ResultReg);
return true;
}
}
unsigned Op1 = getRegForValue(I->getOperand(1));
if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
// Now we have both operands in registers. Emit the instruction.
unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
if (!ResultReg)
// Target-specific code wasn't able to find a machine opcode for
// the given ISD opcode and type. Halt "fast" selection and bail.
return false;
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectGetElementPtr(const User *I) {
unsigned N = getRegForValue(I->getOperand(0));
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool NIsKill = hasTrivialKill(I->getOperand(0));
// Keep a running tab of the total offset to coalesce multiple N = N + Offset
// into a single N = N + TotalOffset.
uint64_t TotalOffs = 0;
// FIXME: What's a good SWAG number for MaxOffs?
uint64_t MaxOffs = 2048;
Type *Ty = I->getOperand(0)->getType();
MVT VT = TLI.getPointerTy(DL);
for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1,
E = I->op_end();
OI != E; ++OI) {
const Value *Idx = *OI;
if (auto *StTy = dyn_cast<StructType>(Ty)) {
uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
TotalOffs = 0;
}
}
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
// If this is a constant subscript, handle it quickly.
if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero())
continue;
// N = N + Offset
uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
TotalOffs = 0;
}
continue;
}
if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
TotalOffs = 0;
}
// N = N + Idx * ElementSize;
uint64_t ElementSize = DL.getTypeAllocSize(Ty);
std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
unsigned IdxN = Pair.first;
bool IdxNIsKill = Pair.second;
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
return false;
if (ElementSize != 1) {
IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
return false;
IdxNIsKill = true;
}
N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
}
// We successfully emitted code for the given LLVM Instruction.
updateValueMap(I, N);
return true;
}
bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
const CallInst *CI, unsigned StartIdx) {
for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
Value *Val = CI->getArgOperand(i);
// Check for constants and encode them with a StackMaps::ConstantOp prefix.
if (const auto *C = dyn_cast<ConstantInt>(Val)) {
Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
} else if (isa<ConstantPointerNull>(Val)) {
Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
Ops.push_back(MachineOperand::CreateImm(0));
} else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
// Values coming from a stack location also require a sepcial encoding,
// but that is added later on by the target specific frame index
// elimination implementation.
auto SI = FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
Ops.push_back(MachineOperand::CreateFI(SI->second));
else
return false;
} else {
unsigned Reg = getRegForValue(Val);
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
}
}
return true;
}
bool FastISel::selectStackmap(const CallInst *I) {
// void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
// [live variables...])
assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
"Stackmap cannot return a value.");
// The stackmap intrinsic only records the live variables (the arguments
// passed to it) and emits NOPS (if requested). Unlike the patchpoint
// intrinsic, this won't be lowered to a function call. This means we don't
// have to worry about calling conventions and target-specific lowering code.
// Instead we perform the call lowering right here.
//
// CALLSEQ_START(0)
// STACKMAP(id, nbytes, ...)
// CALLSEQ_END(0, 0)
//
SmallVector<MachineOperand, 32> Ops;
// Add the <id> and <numBytes> constants.
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
"Expected a constant integer.");
const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
"Expected a constant integer.");
const auto *NumBytes =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
// Push live variables for the stack map (skipping the first two arguments
// <id> and <numBytes>).
if (!addStackMapLiveVars(Ops, I, 2))
return false;
// We are not adding any register mask info here, because the stackmap doesn't
// clobber anything.
// Add scratch registers as implicit def and early clobber.
CallingConv::ID CC = I->getCallingConv();
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
/*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
.addImm(0);
// Issue STACKMAP.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::STACKMAP));
for (auto const &MO : Ops)
MIB.addOperand(MO);
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
.addImm(0)
.addImm(0);
// Inform the Frame Information that we have a stackmap in this function.
FuncInfo.MF->getFrameInfo()->setHasStackMap();
return true;
}
/// \brief Lower an argument list according to the target calling convention.
///
/// This is a helper for lowering intrinsics that follow a target calling
/// convention or require stack pointer adjustment. Only a subset of the
/// intrinsic's operands need to participate in the calling convention.
bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
unsigned NumArgs, const Value *Callee,
bool ForceRetVoidTy, CallLoweringInfo &CLI) {
ArgListTy Args;
Args.reserve(NumArgs);
// Populate the argument list.
// Attributes for args start at offset 1, after the return attribute.
ImmutableCallSite CS(CI);
for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
ArgI != ArgE; ++ArgI) {
Value *V = CI->getOperand(ArgI);
assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
ArgListEntry Entry;
Entry.Val = V;
Entry.Ty = V->getType();
Entry.setAttributes(&CS, AttrI);
Args.push_back(Entry);
}
Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
: CI->getType();
CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
return lowerCallTo(CLI);
}
FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
const char *Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
SmallString<32> MangledName;
Mangler::getNameWithPrefix(MangledName, Target, DL);
MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
}
bool FastISel::selectPatchpoint(const CallInst *I) {
// void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
// i32 <numBytes>,
// i8* <target>,
// i32 <numArgs>,
// [Args...],
// [live variables...])
CallingConv::ID CC = I->getCallingConv();
bool IsAnyRegCC = CC == CallingConv::AnyReg;
bool HasDef = !I->getType()->isVoidTy();
Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
// Get the real number of arguments participating in the call <numArgs>
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
"Expected a constant integer.");
const auto *NumArgsVal =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
unsigned NumArgs = NumArgsVal->getZExtValue();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
// This includes all meta-operands up to but not including CC.
unsigned NumMetaOpers = PatchPointOpers::CCPos;
assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
"Not enough arguments provided to the patchpoint intrinsic");
// For AnyRegCC the arguments are lowered later on manually.
unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
CallLoweringInfo CLI;
CLI.setIsPatchPoint();
if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
return false;
assert(CLI.Call && "No call instruction specified.");
SmallVector<MachineOperand, 32> Ops;
// Add an explicit result reg if we use the anyreg calling convention.
if (IsAnyRegCC && HasDef) {
assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
CLI.NumResultRegs = 1;
Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
}
// Add the <id> and <numBytes> constants.
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
"Expected a constant integer.");
const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
"Expected a constant integer.");
const auto *NumBytes =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
// Add the call target.
if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
uint64_t CalleeConstAddr =
cast<ConstantInt>(C->getOperand(0))->getZExtValue();
Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
} else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
if (C->getOpcode() == Instruction::IntToPtr) {
uint64_t CalleeConstAddr =
cast<ConstantInt>(C->getOperand(0))->getZExtValue();
Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
} else
llvm_unreachable("Unsupported ConstantExpr.");
} else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
Ops.push_back(MachineOperand::CreateGA(GV, 0));
} else if (isa<ConstantPointerNull>(Callee))
Ops.push_back(MachineOperand::CreateImm(0));
else
llvm_unreachable("Unsupported callee address.");
// Adjust <numArgs> to account for any arguments that have been passed on
// the stack instead.
unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
// Add the calling convention
Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
// Add the arguments we omitted previously. The register allocator should
// place these in any free register.
if (IsAnyRegCC) {
for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
unsigned Reg = getRegForValue(I->getArgOperand(i));
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
}
}
// Push the arguments from the call instruction.
for (auto Reg : CLI.OutRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
// Push live variables for the stack map.
if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
return false;
// Push the register mask info.
Ops.push_back(MachineOperand::CreateRegMask(
TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
// Add scratch registers as implicit def and early clobber.
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
/*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
// Add implicit defs (return values).
for (auto Reg : CLI.InRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
/*IsImpl=*/true));
// Insert the patchpoint instruction before the call generated by the target.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
TII.get(TargetOpcode::PATCHPOINT));
for (auto &MO : Ops)
MIB.addOperand(MO);
MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
// Delete the original call instruction.
CLI.Call->eraseFromParent();
// Inform the Frame Information that we have a patchpoint in this function.
FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
if (CLI.NumResultRegs)
updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
return true;
}
/// Returns an AttributeSet representing the attributes applied to the return
/// value of the given call.
static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
SmallVector<Attribute::AttrKind, 2> Attrs;
if (CLI.RetSExt)
Attrs.push_back(Attribute::SExt);
if (CLI.RetZExt)
Attrs.push_back(Attribute::ZExt);
if (CLI.IsInReg)
Attrs.push_back(Attribute::InReg);
return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
Attrs);
}
bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
unsigned NumArgs) {
MCContext &Ctx = MF->getContext();
SmallString<32> MangledName;
Mangler::getNameWithPrefix(MangledName, SymName, DL);
MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
return lowerCallTo(CI, Sym, NumArgs);
}
bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
unsigned NumArgs) {
ImmutableCallSite CS(CI);
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Type *RetTy = FTy->getReturnType();
ArgListTy Args;
Args.reserve(NumArgs);
// Populate the argument list.
// Attributes for args start at offset 1, after the return attribute.
for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
Value *V = CI->getOperand(ArgI);
assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
ArgListEntry Entry;
Entry.Val = V;
Entry.Ty = V->getType();
Entry.setAttributes(&CS, ArgI + 1);
Args.push_back(Entry);
}
CallLoweringInfo CLI;
CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
return lowerCallTo(CLI);
}
bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
// Handle the incoming return values from the call.
CLI.clearIns();
SmallVector<EVT, 4> RetTys;
ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
bool CanLowerReturn = TLI.CanLowerReturn(
CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
// FIXME: sret demotion isn't supported yet - bail out.
if (!CanLowerReturn)
return false;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT;
MyFlags.ArgVT = VT;
MyFlags.Used = CLI.IsReturnValueUsed;
if (CLI.RetSExt)
MyFlags.Flags.setSExt();
if (CLI.RetZExt)
MyFlags.Flags.setZExt();
if (CLI.IsInReg)
MyFlags.Flags.setInReg();
CLI.Ins.push_back(MyFlags);
}
}
// Handle all of the outgoing arguments.
CLI.clearOuts();
for (auto &Arg : CLI.getArgs()) {
Type *FinalType = Arg.Ty;
if (Arg.IsByVal)
FinalType = cast<PointerType>(Arg.Ty)->getElementType();
bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
FinalType, CLI.CallConv, CLI.IsVarArg);
ISD::ArgFlagsTy Flags;
if (Arg.IsZExt)
Flags.setZExt();
if (Arg.IsSExt)
Flags.setSExt();
if (Arg.IsInReg)
Flags.setInReg();
if (Arg.IsSRet)
Flags.setSRet();
if (Arg.IsByVal)
Flags.setByVal();
if (Arg.IsInAlloca) {
Flags.setInAlloca();
// Set the byval flag for CCAssignFn callbacks that don't know about
// inalloca. This way we can know how many bytes we should've allocated
// and how many bytes a callee cleanup function will pop. If we port
// inalloca to more targets, we'll have to add custom inalloca handling in
// the various CC lowering callbacks.
Flags.setByVal();
}
if (Arg.IsByVal || Arg.IsInAlloca) {
PointerType *Ty = cast<PointerType>(Arg.Ty);
Type *ElementTy = Ty->getElementType();
unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
// For ByVal, alignment should come from FE. BE will guess if this info is
// not there, but there are cases it cannot get right.
unsigned FrameAlign = Arg.Alignment;
if (!FrameAlign)
FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
Flags.setByValSize(FrameSize);
Flags.setByValAlign(FrameAlign);
}
if (Arg.IsNest)
Flags.setNest();
if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
Flags.setOrigAlign(OriginalAlignment);
CLI.OutVals.push_back(Arg.Val);
CLI.OutFlags.push_back(Flags);
}
if (!fastLowerCall(CLI))
return false;
// Set all unused physreg defs as dead.
assert(CLI.Call && "No call instruction specified.");
CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
if (CLI.NumResultRegs && CLI.CS)
updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
return true;
}
bool FastISel::lowerCall(const CallInst *CI) {
ImmutableCallSite CS(CI);
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
Type *RetTy = FuncTy->getReturnType();
ArgListTy Args;
ArgListEntry Entry;
Args.reserve(CS.arg_size());
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
Value *V = *i;
// Skip empty types
if (V->getType()->isEmptyTy())
continue;
Entry.Val = V;
Entry.Ty = V->getType();
// Skip the first return-type Attribute to get to params.
Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
Args.push_back(Entry);
}
// Check if target-independent constraints permit a tail call here.
// Target-dependent constraints are checked within fastLowerCall.
bool IsTailCall = CI->isTailCall();
if (IsTailCall && !isInTailCallPosition(CS, TM))
IsTailCall = false;
CallLoweringInfo CLI;
CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
.setTailCall(IsTailCall);
return lowerCallTo(CLI);
}
bool FastISel::selectCall(const User *I) {
const CallInst *Call = cast<CallInst>(I);
// Handle simple inline asms.
if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
// If the inline asm has side effects, then make sure that no local value
// lives across by flushing the local value map.
if (IA->hasSideEffects())
flushLocalValueMap();
// Don't attempt to handle constraints.
if (!IA->getConstraintString().empty())
return false;
unsigned ExtraInfo = 0;
if (IA->hasSideEffects())
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::INLINEASM))
.addExternalSymbol(IA->getAsmString().c_str())
.addImm(ExtraInfo);
return true;
}
MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
ComputeUsesVAFloatArgument(*Call, &MMI);
// Handle intrinsic function calls.
if (const auto *II = dyn_cast<IntrinsicInst>(Call))
return selectIntrinsicCall(II);
// Usually, it does not make sense to initialize a value,
// make an unrelated function call and use the value, because
// it tends to be spilled on the stack. So, we move the pointer
// to the last local value to the beginning of the block, so that
// all the values which have already been materialized,
// appear after the call. It also makes sense to skip intrinsics
// since they tend to be inlined.
flushLocalValueMap();
return lowerCall(Call);
}
bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
switch (II->getIntrinsicID()) {
default:
break;
// At -O0 we don't care about the lifetime intrinsics.
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
// The donothing intrinsic does, well, nothing.
case Intrinsic::donothing:
return true;
case Intrinsic::eh_actions: {
unsigned ResultReg = getRegForValue(UndefValue::get(II->getType()));
if (!ResultReg)
return false;
updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
assert(DI->getVariable() && "Missing variable");
if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
const Value *Address = DI->getAddress();
if (!Address || isa<UndefValue>(Address)) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
unsigned Offset = 0;
Optional<MachineOperand> Op;
if (const auto *Arg = dyn_cast<Argument>(Address))
// Some arguments' frame index is recorded during argument lowering.
Offset = FuncInfo.getArgumentFrameIndex(Arg);
if (Offset)
Op = MachineOperand::CreateFI(Offset);
if (!Op)
if (unsigned Reg = lookUpRegForValue(Address))
Op = MachineOperand::CreateReg(Reg, false);
// If we have a VLA that has a "use" in a metadata node that's then used
// here but it has no other uses, then we have a problem. E.g.,
//
// int foo (const int *x) {
// char a[*x];
// return 0;
// }
//
// If we assign 'a' a vreg and fast isel later on has to use the selection
// DAG isel, it will want to copy the value to the vreg. However, there are
// no uses, which goes counter to what selection DAG isel expects.
if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
(!isa<AllocaInst>(Address) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
false);
if (Op) {
assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
"Expected inlined-at fields to agree");
if (Op->isReg()) {
Op->setIsDebug(true);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
DI->getVariable(), DI->getExpression());
} else
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE))
.addOperand(*Op)
.addImm(0)
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
return true;
}
case Intrinsic::dbg_value: {
// This form of DBG_VALUE is target-independent.
const DbgValueInst *DI = cast<DbgValueInst>(II);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
"Expected inlined-at fields to agree");
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(0U)
.addImm(DI->getOffset())
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
} else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addCImm(CI)
.addImm(DI->getOffset())
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
else
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(CI->getZExtValue())
.addImm(DI->getOffset())
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
} else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addFPImm(CF)
.addImm(DI->getOffset())
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
} else if (unsigned Reg = lookUpRegForValue(V)) {
// FIXME: This does not handle register-indirect values at offset 0.
bool IsIndirect = DI->getOffset() != 0;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
DI->getOffset(), DI->getVariable(), DI->getExpression());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
return true;
}
case Intrinsic::objectsize: {
ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
unsigned long long Res = CI->isZero() ? -1ULL : 0;
Constant *ResCI = ConstantInt::get(II->getType(), Res);
unsigned ResultReg = getRegForValue(ResCI);
if (!ResultReg)
return false;
updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::expect: {
unsigned ResultReg = getRegForValue(II->getArgOperand(0));
if (!ResultReg)
return false;
updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::experimental_stackmap:
return selectStackmap(II);
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
return selectPatchpoint(II);
}
return fastLowerIntrinsicCall(II);
}
bool FastISel::selectCast(const User *I, unsigned Opcode) {
EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(DL, I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
!DstVT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
// Check if the destination type is legal.
if (!TLI.isTypeLegal(DstVT))
return false;
// Check if the source operand is legal.
if (!TLI.isTypeLegal(SrcVT))
return false;
unsigned InputReg = getRegForValue(I->getOperand(0));
if (!InputReg)
// Unhandled operand. Halt "fast" selection and bail.
return false;
bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
Opcode, InputReg, InputRegIsKill);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectBitCast(const User *I) {
// If the bitcast doesn't change the type, just use the operand value.
if (I->getType() == I->getOperand(0)->getType()) {
unsigned Reg = getRegForValue(I->getOperand(0));
if (!Reg)
return false;
updateValueMap(I, Reg);
return true;
}
// Bitcasts of other values become reg-reg copies or BITCAST operators.
EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstEVT = TLI.getValueType(DL, I->getType());
if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
!TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
// Unhandled type. Halt "fast" selection and bail.
return false;
MVT SrcVT = SrcEVT.getSimpleVT();
MVT DstVT = DstEVT.getSimpleVT();
unsigned Op0 = getRegForValue(I->getOperand(0));
if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// First, try to perform the bitcast by inserting a reg-reg copy.
unsigned ResultReg = 0;
if (SrcVT == DstVT) {
const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
// Don't attempt a cross-class copy. It will likely fail.
if (SrcClass == DstClass) {
ResultReg = createResultReg(DstClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
}
}
// If the reg-reg copy failed, select a BITCAST opcode.
if (!ResultReg)
ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectInstruction(const Instruction *I) {
// Just before the terminator instruction, insert instructions to
// feed PHI nodes in successor blocks.
if (isa<TerminatorInst>(I))
if (!handlePHINodesInSuccessorBlocks(I->getParent()))
return false;
DbgLoc = I->getDebugLoc();
SavedInsertPt = FuncInfo.InsertPt;
if (const auto *Call = dyn_cast<CallInst>(I)) {
const Function *F = Call->getCalledFunction();
LibFunc::Func Func;
// As a special case, don't handle calls to builtin library functions that
// may be translated directly to target instructions.
if (F && !F->hasLocalLinkage() && F->hasName() &&
LibInfo->getLibFunc(F->getName(), Func) &&
LibInfo->hasOptimizedCodeGen(Func))
return false;
// Don't handle Intrinsic::trap if a trap funciton is specified.
if (F && F->getIntrinsicID() == Intrinsic::trap &&
Call->hasFnAttr("trap-func-name"))
return false;
}
// First, try doing target-independent selection.
if (!SkipTargetIndependentISel) {
if (selectOperator(I, I->getOpcode())) {
++NumFastIselSuccessIndependent;
DbgLoc = DebugLoc();
return true;
}
// Remove dead code.
recomputeInsertPt();
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
SavedInsertPt = FuncInfo.InsertPt;
}
// Next, try calling the target to attempt to handle the instruction.
if (fastSelectInstruction(I)) {
++NumFastIselSuccessTarget;
DbgLoc = DebugLoc();
return true;
}
// Remove dead code.
recomputeInsertPt();
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
DbgLoc = DebugLoc();
// Undo phi node updates, because they will be added again by SelectionDAG.
if (isa<TerminatorInst>(I))
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
return false;
}
/// Emit an unconditional branch to the given block, unless it is the immediate
/// (fall-through) successor, and update the CFG.
void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
// For more accurate line information if this is the only instruction
// in the block then emit it, otherwise we have the unconditional
// fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
SmallVector<MachineOperand, 0>(), DbgLoc);
}
uint32_t BranchWeight = 0;
if (FuncInfo.BPI)
BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
MSucc->getBasicBlock());
FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
}
/// Emit an FNeg operation.
bool FastISel::selectFNeg(const User *I) {
unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
if (!OpReg)
return false;
bool OpRegIsKill = hasTrivialKill(I);
// If the target has ISD::FNEG, use it.
EVT VT = TLI.getValueType(DL, I->getType());
unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
OpReg, OpRegIsKill);
if (ResultReg) {
updateValueMap(I, ResultReg);
return true;
}
// Bitcast the value to integer, twiddle the sign bit with xor,
// and then bitcast it back to floating-point.
if (VT.getSizeInBits() > 64)
return false;
EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
if (!TLI.isTypeLegal(IntVT))
return false;
unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::BITCAST, OpReg, OpRegIsKill);
if (!IntReg)
return false;
unsigned IntResultReg = fastEmit_ri_(
IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
if (!IntResultReg)
return false;
ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
IntResultReg, /*IsKill=*/true);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectExtractValue(const User *U) {
const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
if (!EVI)
return false;
// Make sure we only try to handle extracts with a legal result. But also
// allow i1 because it's easy.
EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
if (!RealVT.isSimple())
return false;
MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
return false;
const Value *Op0 = EVI->getOperand(0);
Type *AggTy = Op0->getType();
// Get the base result register.
unsigned ResultReg;
DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
if (I != FuncInfo.ValueMap.end())
ResultReg = I->second;
else if (isa<Instruction>(Op0))
ResultReg = FuncInfo.InitializeRegForValue(Op0);
else
return false; // fast-isel can't handle aggregate constants at the moment
// Get the actual result register, which is an offset from the base register.
unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
SmallVector<EVT, 4> AggValueVTs;
ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
for (unsigned i = 0; i < VTIndex; i++)
ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
updateValueMap(EVI, ResultReg);
return true;
}
bool FastISel::selectOperator(const User *I, unsigned Opcode) {
switch (Opcode) {
case Instruction::Add:
return selectBinaryOp(I, ISD::ADD);
case Instruction::FAdd:
return selectBinaryOp(I, ISD::FADD);
case Instruction::Sub:
return selectBinaryOp(I, ISD::SUB);
case Instruction::FSub:
// FNeg is currently represented in LLVM IR as a special case of FSub.
if (BinaryOperator::isFNeg(I))
return selectFNeg(I);
return selectBinaryOp(I, ISD::FSUB);
case Instruction::Mul:
return selectBinaryOp(I, ISD::MUL);
case Instruction::FMul:
return selectBinaryOp(I, ISD::FMUL);
case Instruction::SDiv:
return selectBinaryOp(I, ISD::SDIV);
case Instruction::UDiv:
return selectBinaryOp(I, ISD::UDIV);
case Instruction::FDiv:
return selectBinaryOp(I, ISD::FDIV);
case Instruction::SRem:
return selectBinaryOp(I, ISD::SREM);
case Instruction::URem:
return selectBinaryOp(I, ISD::UREM);
case Instruction::FRem:
return selectBinaryOp(I, ISD::FREM);
case Instruction::Shl:
return selectBinaryOp(I, ISD::SHL);
case Instruction::LShr:
return selectBinaryOp(I, ISD::SRL);
case Instruction::AShr:
return selectBinaryOp(I, ISD::SRA);
case Instruction::And:
return selectBinaryOp(I, ISD::AND);
case Instruction::Or:
return selectBinaryOp(I, ISD::OR);
case Instruction::Xor:
return selectBinaryOp(I, ISD::XOR);
case Instruction::GetElementPtr:
return selectGetElementPtr(I);
case Instruction::Br: {
const BranchInst *BI = cast<BranchInst>(I);
if (BI->isUnconditional()) {
const BasicBlock *LLVMSucc = BI->getSuccessor(0);
MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
fastEmitBranch(MSucc, BI->getDebugLoc());
return true;
}
// Conditional branches are not handed yet.
// Halt "fast" selection and bail.
return false;
}
case Instruction::Unreachable:
if (TM.Options.TrapUnreachable)
return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
else
return true;
case Instruction::Alloca:
// FunctionLowering has the static-sized case covered.
if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
return true;
// Dynamic-sized alloca is not handled yet.
return false;
case Instruction::Call:
return selectCall(I);
case Instruction::BitCast:
return selectBitCast(I);
case Instruction::FPToSI:
return selectCast(I, ISD::FP_TO_SINT);
case Instruction::ZExt:
return selectCast(I, ISD::ZERO_EXTEND);
case Instruction::SExt:
return selectCast(I, ISD::SIGN_EXTEND);
case Instruction::Trunc:
return selectCast(I, ISD::TRUNCATE);
case Instruction::SIToFP:
return selectCast(I, ISD::SINT_TO_FP);
case Instruction::IntToPtr: // Deliberate fall-through.
case Instruction::PtrToInt: {
EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(DL, I->getType());
if (DstVT.bitsGT(SrcVT))
return selectCast(I, ISD::ZERO_EXTEND);
if (DstVT.bitsLT(SrcVT))
return selectCast(I, ISD::TRUNCATE);
unsigned Reg = getRegForValue(I->getOperand(0));
if (!Reg)
return false;
updateValueMap(I, Reg);
return true;
}
case Instruction::ExtractValue:
return selectExtractValue(I);
case Instruction::PHI:
llvm_unreachable("FastISel shouldn't visit PHI nodes!");
default:
// Unhandled instruction. Halt "fast" selection and bail.
return false;
}
}
FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo,
bool SkipTargetIndependentISel)
: FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
TII(*MF->getSubtarget().getInstrInfo()),
TLI(*MF->getSubtarget().getTargetLowering()),
TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
SkipTargetIndependentISel(SkipTargetIndependentISel) {}
FastISel::~FastISel() {}
bool FastISel::fastLowerArguments() { return false; }
bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
return false;
}
unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/) {
return 0;
}
unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, unsigned /*Op1*/,
bool /*Op1IsKill*/) {
return 0;
}
unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
return 0;
}
unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
const ConstantFP * /*FPImm*/) {
return 0;
}
unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, uint64_t /*Imm*/) {
return 0;
}
unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/,
const ConstantFP * /*FPImm*/) {
return 0;
}
unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, unsigned /*Op1*/,
bool /*Op1IsKill*/, uint64_t /*Imm*/) {
return 0;
}
/// This method is a wrapper of fastEmit_ri. It first tries to emit an
/// instruction with an immediate operand using fastEmit_ri.
/// If that fails, it materializes the immediate into a register and try
/// fastEmit_rr instead.
unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm, MVT ImmType) {
// If this is a multiply by a power of two, emit this as a shift left.
if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
Opcode = ISD::SHL;
Imm = Log2_64(Imm);
} else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
// div x, 8 -> srl x, 3
Opcode = ISD::SRL;
Imm = Log2_64(Imm);
}
// Horrible hack (to be removed), check to make sure shift amounts are
// in-range.
if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
Imm >= VT.getSizeInBits())
return 0;
// First check if immediate type is legal. If not, we can't use the ri form.
unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
if (ResultReg)
return ResultReg;
unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
bool IsImmKill = true;
if (!MaterialReg) {
// This is a bit ugly/slow, but failing here means falling out of
// fast-isel, which would be very slow.
IntegerType *ITy =
IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
if (!MaterialReg)
return 0;
// FIXME: If the materialized register here has no uses yet then this
// will be the first use and we should be able to mark it as killed.
// However, the local value area for materialising constant expressions
// grows down, not up, which means that any constant expressions we generate
// later which also use 'Imm' could be after this instruction and therefore
// after this kill.
IsImmKill = false;
}
return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
}
unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
return MRI.createVirtualRegister(RC);
}
unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
unsigned OpNum) {
if (TargetRegisterInfo::isVirtualRegister(Op)) {
const TargetRegisterClass *RegClass =
TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
if (!MRI.constrainRegClass(Op, RegClass)) {
// If it's not legal to COPY between the register classes, something
// has gone very wrong before we got here.
unsigned NewOp = createResultReg(RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
return NewOp;
}
}
return Op;
}
unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
return ResultReg;
}
unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill));
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill));
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill, unsigned Op2,
bool Op2IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addReg(Op2, getKillRegState(Op2IsKill));
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addReg(Op2, getKillRegState(Op2IsKill));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1,
uint64_t Imm2) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm1)
.addImm(Imm2);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addImm(Imm1)
.addImm(Imm2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, const ConstantFP *FPImm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addFPImm(FPImm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addFPImm(FPImm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill, uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op1,
bool Op1IsKill, uint64_t Imm1,
uint64_t Imm2) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
unsigned ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addImm(Imm1)
.addImm(Imm2);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill))
.addReg(Op1, getKillRegState(Op1IsKill))
.addImm(Imm1)
.addImm(Imm2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, uint64_t Imm1,
uint64_t Imm2) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addImm(Imm1)
.addImm(Imm2);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1)
.addImm(Imm2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
bool Op0IsKill, uint32_t Idx) {
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
const TargetRegisterClass *RC = MRI.getRegClass(Op0);
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}
/// Emit MachineInstrs to compute the value of Op with all but the least
/// significant bit set to zero.
unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
}
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI
/// nodes as input. We cannot just directly add them, because expansion
/// might result in multiple MBB's for one BB. As such, the start of the
/// BB might correspond to a different MBB than the end.
bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
const TerminatorInst *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
// Check successor nodes' PHI nodes that expect a constant to be available
// from this block.
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin()))
continue;
MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
// If this terminator has multiple identical successors (common for
// switches), only handle each succ once.
if (!SuccsHandled.insert(SuccMBB).second)
continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
// At this point we know that there is a 1-1 correspondence between LLVM PHI
// nodes and Machine PHI nodes, but the incoming operands have not been
// emitted yet.
for (BasicBlock::const_iterator I = SuccBB->begin();
const auto *PN = dyn_cast<PHINode>(I); ++I) {
// Ignore dead phi's.
if (PN->use_empty())
continue;
// Only handle legal types. Two interesting things to note here. First,
// by bailing out early, we may leave behind some dead instructions,
// since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
// own moves. Second, this check is necessary because FastISel doesn't
// use CreateRegs to create registers, so it always creates
// exactly one register for each non-void instruction.
EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
// Handle integer promotions, though, because they're common and easy.
if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
return false;
}
}
const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
// Set the DebugLoc for the copy. Prefer the location of the operand
// if there is one; use the location of the PHI otherwise.
DbgLoc = PN->getDebugLoc();
if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
DbgLoc = Inst->getDebugLoc();
unsigned Reg = getRegForValue(PHIOp);
if (!Reg) {
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
return false;
}
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
DbgLoc = DebugLoc();
}
}
return true;
}
bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
assert(LI->hasOneUse() &&
"tryToFoldLoad expected a LoadInst with a single use");
// We know that the load has a single use, but don't know what it is. If it
// isn't one of the folded instructions, then we can't succeed here. Handle
// this by scanning the single-use users of the load until we get to FoldInst.
unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
const Instruction *TheUser = LI->user_back();
while (TheUser != FoldInst && // Scan up until we find FoldInst.
// Stay in the right block.
TheUser->getParent() == FoldInst->getParent() &&
--MaxUsers) { // Don't scan too far.
// If there are multiple or no uses of this instruction, then bail out.
if (!TheUser->hasOneUse())
return false;
TheUser = TheUser->user_back();
}
// If we didn't find the fold instruction, then we failed to collapse the
// sequence.
if (TheUser != FoldInst)
return false;
// Don't try to fold volatile loads. Target has to deal with alignment
// constraints.
if (LI->isVolatile())
return false;
// Figure out which vreg this is going into. If there is no assigned vreg yet
// then there actually was no reference to it. Perhaps the load is referenced
// by a dead instruction.
unsigned LoadReg = getRegForValue(LI);
if (!LoadReg)
return false;
// We can't fold if this vreg has no uses or more than one use. Multiple uses
// may mean that the instruction got lowered to multiple MIs, or the use of
// the loaded value ended up being multiple operands of the result.
if (!MRI.hasOneUse(LoadReg))
return false;
MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
MachineInstr *User = RI->getParent();
// Set the insertion point properly. Folding the load can cause generation of
// other random instructions (like sign extends) for addressing modes; make
// sure they get inserted in a logical place before the new instruction.
FuncInfo.InsertPt = User;
FuncInfo.MBB = User->getParent();
// Ask the target to try folding the load.
return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
}
bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
// Must be an add.
if (!isa<AddOperator>(Add))
return false;
// Type size needs to match.
if (DL.getTypeSizeInBits(GEP->getType()) !=
DL.getTypeSizeInBits(Add->getType()))
return false;
// Must be in the same basic block.
if (isa<Instruction>(Add) &&
FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
return false;
// Must have a constant operand.
return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
}
MachineMemOperand *
FastISel::createMachineMemOperandFor(const Instruction *I) const {
const Value *Ptr;
Type *ValTy;
unsigned Alignment;
unsigned Flags;
bool IsVolatile;
if (const auto *LI = dyn_cast<LoadInst>(I)) {
Alignment = LI->getAlignment();
IsVolatile = LI->isVolatile();
Flags = MachineMemOperand::MOLoad;
Ptr = LI->getPointerOperand();
ValTy = LI->getType();
} else if (const auto *SI = dyn_cast<StoreInst>(I)) {
Alignment = SI->getAlignment();
IsVolatile = SI->isVolatile();
Flags = MachineMemOperand::MOStore;
Ptr = SI->getPointerOperand();
ValTy = SI->getValueOperand()->getType();
} else
return nullptr;
bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
AAMDNodes AAInfo;
I->getAAMetadata(AAInfo);
if (Alignment == 0) // Ensure that codegen never sees alignment 0.
Alignment = DL.getABITypeAlignment(ValTy);
unsigned Size = DL.getTypeStoreSize(ValTy);
if (IsVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (IsNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
if (IsInvariant)
Flags |= MachineMemOperand::MOInvariant;
return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
Alignment, AAInfo, Ranges);
}
CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
// If both operands are the same, then try to optimize or fold the cmp.
CmpInst::Predicate Predicate = CI->getPredicate();
if (CI->getOperand(0) != CI->getOperand(1))
return Predicate;
switch (Predicate) {
default: llvm_unreachable("Invalid predicate!");
case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
}
return Predicate;
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp | //===- ScheduleDAGVLIW.cpp - SelectionDAG list scheduler for VLIW -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements a top-down list scheduler, using standard algorithms.
// The basic approach uses a priority queue of available nodes to schedule.
// One at a time, nodes are taken from the priority queue (thus in priority
// order), checked for legality to schedule, and emitted if legal.
//
// Nodes may not be legal to schedule either due to structural hazards (e.g.
// pipeline or resource constraints) or because an input to the instruction has
// not completed execution.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/ResourcePriorityQueue.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
#define DEBUG_TYPE "pre-RA-sched"
STATISTIC(NumNoops , "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
static RegisterScheduler
VLIWScheduler("vliw-td", "VLIW scheduler",
createVLIWDAGScheduler);
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGVLIW - The actual DFA list scheduler implementation. This
/// supports / top-down scheduling.
///
class ScheduleDAGVLIW : public ScheduleDAGSDNodes {
private:
/// AvailableQueue - The priority queue to use for the available SUnits.
///
SchedulingPriorityQueue *AvailableQueue;
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands become available, the instruction is
/// added to the AvailableQueue.
std::vector<SUnit*> PendingQueue;
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
/// AA - AliasAnalysis for making memory reference queries.
AliasAnalysis *AA;
public:
ScheduleDAGVLIW(MachineFunction &mf,
AliasAnalysis *aa,
SchedulingPriorityQueue *availqueue)
: ScheduleDAGSDNodes(mf), AvailableQueue(availqueue), AA(aa) {
const TargetSubtargetInfo &STI = mf.getSubtarget();
HazardRec = STI.getInstrInfo()->CreateTargetHazardRecognizer(&STI, this);
}
~ScheduleDAGVLIW() override {
delete HazardRec;
delete AvailableQueue;
}
void Schedule() override;
private:
void releaseSucc(SUnit *SU, const SDep &D);
void releaseSuccessors(SUnit *SU);
void scheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void listScheduleTopDown();
};
} // end anonymous namespace
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGVLIW::Schedule() {
DEBUG(dbgs()
<< "********** List Scheduling BB#" << BB->getNumber()
<< " '" << BB->getName() << "' **********\n");
// Build the scheduling graph.
BuildSchedGraph(AA);
AvailableQueue->initNodes(SUnits);
listScheduleTopDown();
AvailableQueue->releaseState();
}
//===----------------------------------------------------------------------===//
// Top-Down Scheduling
//===----------------------------------------------------------------------===//
/// releaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
/// the PendingQueue if the count reaches zero. Also update its cycle bound.
void ScheduleDAGVLIW::releaseSucc(SUnit *SU, const SDep &D) {
SUnit *SuccSU = D.getSUnit();
#ifndef NDEBUG
if (SuccSU->NumPredsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
assert(!D.isWeak() && "unexpected artificial DAG edge");
--SuccSU->NumPredsLeft;
SuccSU->setDepthToAtLeast(SU->getDepth() + D.getLatency());
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
PendingQueue.push_back(SuccSU);
}
}
void ScheduleDAGVLIW::releaseSuccessors(SUnit *SU) {
// Top down: release successors.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
assert(!I->isAssignedRegDep() &&
"The list-td scheduler doesn't yet support physreg dependencies!");
releaseSucc(SU, *I);
}
}
/// scheduleNodeTopDown - Add the node to the schedule. Decrement the pending
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGVLIW::scheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
Sequence.push_back(SU);
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
releaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue->scheduledNode(SU);
}
/// listScheduleTopDown - The main loop of list scheduling for top-down
/// schedulers.
void ScheduleDAGVLIW::listScheduleTopDown() {
unsigned CurCycle = 0;
// Release any successors of the special Entry node.
releaseSuccessors(&EntrySU);
// All leaves to AvailableQueue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
if (SUnits[i].Preds.empty()) {
AvailableQueue->push(&SUnits[i]);
SUnits[i].isAvailable = true;
}
}
// While AvailableQueue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;
Sequence.reserve(SUnits.size());
while (!AvailableQueue->empty() || !PendingQueue.empty()) {
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
if (PendingQueue[i]->getDepth() == CurCycle) {
AvailableQueue->push(PendingQueue[i]);
PendingQueue[i]->isAvailable = true;
PendingQueue[i] = PendingQueue.back();
PendingQueue.pop_back();
--i; --e;
}
else {
assert(PendingQueue[i]->getDepth() > CurCycle && "Negative latency?");
}
}
// If there are no instructions available, don't try to issue anything, and
// don't advance the hazard recognizer.
if (AvailableQueue->empty()) {
// Reset DFA state.
AvailableQueue->scheduledNode(nullptr);
++CurCycle;
continue;
}
SUnit *FoundSUnit = nullptr;
bool HasNoopHazards = false;
while (!AvailableQueue->empty()) {
SUnit *CurSUnit = AvailableQueue->pop();
ScheduleHazardRecognizer::HazardType HT =
HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
if (HT == ScheduleHazardRecognizer::NoHazard) {
FoundSUnit = CurSUnit;
break;
}
// Remember if this is a noop hazard.
HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
NotReady.push_back(CurSUnit);
}
// Add the nodes that aren't ready back onto the available list.
if (!NotReady.empty()) {
AvailableQueue->push_all(NotReady);
NotReady.clear();
}
// If we found a node to schedule, do it now.
if (FoundSUnit) {
scheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
// If this is a pseudo-op node, we don't want to increment the current
// cycle.
if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
++CurCycle;
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem, just advance
// the current cycle and try again.
DEBUG(dbgs() << "*** Advancing cycle, no work to do\n");
HazardRec->AdvanceCycle();
++NumStalls;
++CurCycle;
} else {
// Otherwise, we have no instructions to issue and we have instructions
// that will fault if we don't do this right. This is the case for
// processors without pipeline interlocks and other cases.
DEBUG(dbgs() << "*** Emitting noop\n");
HazardRec->EmitNoop();
Sequence.push_back(nullptr); // NULL here means noop
++NumNoops;
++CurCycle;
}
}
#ifndef NDEBUG
VerifyScheduledSequence(/*isBottomUp=*/false);
#endif
}
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
/// createVLIWDAGScheduler - This creates a top-down list scheduler.
ScheduleDAGSDNodes *
llvm::createVLIWDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
return new ScheduleDAGVLIW(*IS->MF, IS->AA, new ResourcePriorityQueue(IS));
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h | //===-- SelectionDAGBuilder.h - Selection-DAG building --------*- C++ -*---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements routines for translating from LLVM IR into SelectionDAG IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
#include "StatepointLowering.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetLowering.h"
#include <vector>
namespace llvm {
class AddrSpaceCastInst;
class AliasAnalysis;
class AllocaInst;
class BasicBlock;
class BitCastInst;
class BranchInst;
class CallInst;
class DbgValueInst;
class ExtractElementInst;
class ExtractValueInst;
class FCmpInst;
class FPExtInst;
class FPToSIInst;
class FPToUIInst;
class FPTruncInst;
class Function;
class FunctionLoweringInfo;
class GetElementPtrInst;
class GCFunctionInfo;
class ICmpInst;
class IntToPtrInst;
class IndirectBrInst;
class InvokeInst;
class InsertElementInst;
class InsertValueInst;
class Instruction;
class LoadInst;
class MachineBasicBlock;
class MachineInstr;
class MachineRegisterInfo;
class MDNode;
class MVT;
class PHINode;
class PtrToIntInst;
class ReturnInst;
class SDDbgValue;
class SExtInst;
class SelectInst;
class ShuffleVectorInst;
class SIToFPInst;
class StoreInst;
class SwitchInst;
class DataLayout;
class TargetLibraryInfo;
class TargetLowering;
class TruncInst;
class UIToFPInst;
class UnreachableInst;
class VAArgInst;
class ZExtInst;
// //
///////////////////////////////////////////////////////////////////////////////
/// SelectionDAGBuilder - This is the common target-independent lowering
/// implementation that is parameterized by a TargetLowering object.
///
class SelectionDAGBuilder {
/// CurInst - The current instruction being visited
const Instruction *CurInst;
DenseMap<const Value*, SDValue> NodeMap;
/// UnusedArgNodeMap - Maps argument value for unused arguments. This is used
/// to preserve debug information for incoming arguments.
DenseMap<const Value*, SDValue> UnusedArgNodeMap;
/// DanglingDebugInfo - Helper type for DanglingDebugInfoMap.
class DanglingDebugInfo {
const DbgValueInst* DI;
DebugLoc dl;
unsigned SDNodeOrder;
public:
DanglingDebugInfo() : DI(nullptr), dl(DebugLoc()), SDNodeOrder(0) { }
DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) :
DI(di), dl(DL), SDNodeOrder(SDNO) { }
const DbgValueInst* getDI() { return DI; }
DebugLoc getdl() { return dl; }
unsigned getSDNodeOrder() { return SDNodeOrder; }
};
/// DanglingDebugInfoMap - Keeps track of dbg_values for which we have not
/// yet seen the referent. We defer handling these until we do see it.
DenseMap<const Value*, DanglingDebugInfo> DanglingDebugInfoMap;
public:
/// PendingLoads - Loads are not emitted to the program immediately. We bunch
/// them up and then emit token factor nodes when possible. This allows us to
/// get simple disambiguation between loads without worrying about alias
/// analysis.
SmallVector<SDValue, 8> PendingLoads;
/// State used while lowering a statepoint sequence (gc_statepoint,
/// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details.
StatepointLoweringState StatepointLowering;
private:
/// PendingExports - CopyToReg nodes that copy values to virtual registers
/// for export to other blocks need to be emitted before any terminator
/// instruction, but they have no other ordering requirements. We bunch them
/// up and the emit a single tokenfactor for them just before terminator
/// instructions.
SmallVector<SDValue, 8> PendingExports;
/// SDNodeOrder - A unique monotonically increasing number used to order the
/// SDNodes we create.
unsigned SDNodeOrder;
enum CaseClusterKind {
/// A cluster of adjacent case labels with the same destination, or just one
/// case.
CC_Range,
/// A cluster of cases suitable for jump table lowering.
CC_JumpTable,
/// A cluster of cases suitable for bit test lowering.
CC_BitTests
};
/// A cluster of case labels.
struct CaseCluster {
CaseClusterKind Kind;
const ConstantInt *Low, *High;
union {
MachineBasicBlock *MBB;
unsigned JTCasesIndex;
unsigned BTCasesIndex;
};
uint32_t Weight;
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High,
MachineBasicBlock *MBB, uint32_t Weight) {
CaseCluster C;
C.Kind = CC_Range;
C.Low = Low;
C.High = High;
C.MBB = MBB;
C.Weight = Weight;
return C;
}
static CaseCluster jumpTable(const ConstantInt *Low,
const ConstantInt *High, unsigned JTCasesIndex,
uint32_t Weight) {
CaseCluster C;
C.Kind = CC_JumpTable;
C.Low = Low;
C.High = High;
C.JTCasesIndex = JTCasesIndex;
C.Weight = Weight;
return C;
}
static CaseCluster bitTests(const ConstantInt *Low, const ConstantInt *High,
unsigned BTCasesIndex, uint32_t Weight) {
CaseCluster C;
C.Kind = CC_BitTests;
C.Low = Low;
C.High = High;
C.BTCasesIndex = BTCasesIndex;
C.Weight = Weight;
return C;
}
};
typedef std::vector<CaseCluster> CaseClusterVector;
typedef CaseClusterVector::iterator CaseClusterIt;
struct CaseBits {
uint64_t Mask;
MachineBasicBlock* BB;
unsigned Bits;
uint32_t ExtraWeight;
CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits,
uint32_t Weight):
Mask(mask), BB(bb), Bits(bits), ExtraWeight(Weight) { }
CaseBits() : Mask(0), BB(nullptr), Bits(0), ExtraWeight(0) {}
};
typedef std::vector<CaseBits> CaseBitsVector;
/// Sort Clusters and merge adjacent cases.
void sortAndRangeify(CaseClusterVector &Clusters);
/// CaseBlock - This structure is used to communicate between
/// SelectionDAGBuilder and SDISel for the code generation of additional basic
/// blocks needed by multi-case switch statements.
struct CaseBlock {
CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs,
const Value *cmpmiddle,
MachineBasicBlock *truebb, MachineBasicBlock *falsebb,
MachineBasicBlock *me,
uint32_t trueweight = 0, uint32_t falseweight = 0)
: CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs),
TrueBB(truebb), FalseBB(falsebb), ThisBB(me),
TrueWeight(trueweight), FalseWeight(falseweight) { }
// CC - the condition code to use for the case block's setcc node
ISD::CondCode CC;
// CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit.
// Emit by default LHS op RHS. MHS is used for range comparisons:
// If MHS is not null: (LHS <= MHS) and (MHS <= RHS).
const Value *CmpLHS, *CmpMHS, *CmpRHS;
// TrueBB/FalseBB - the block to branch to if the setcc is true/false.
MachineBasicBlock *TrueBB, *FalseBB;
// ThisBB - the block into which to emit the code for the setcc and branches
MachineBasicBlock *ThisBB;
// TrueWeight/FalseWeight - branch weights.
uint32_t TrueWeight, FalseWeight;
};
struct JumpTable {
JumpTable(unsigned R, unsigned J, MachineBasicBlock *M,
MachineBasicBlock *D): Reg(R), JTI(J), MBB(M), Default(D) {}
/// Reg - the virtual register containing the index of the jump table entry
//. to jump to.
unsigned Reg;
/// JTI - the JumpTableIndex for this jump table in the function.
unsigned JTI;
/// MBB - the MBB into which to emit the code for the indirect jump.
MachineBasicBlock *MBB;
/// Default - the MBB of the default bb, which is a successor of the range
/// check MBB. This is when updating PHI nodes in successors.
MachineBasicBlock *Default;
};
struct JumpTableHeader {
JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H,
bool E = false):
First(F), Last(L), SValue(SV), HeaderBB(H), Emitted(E) {}
APInt First;
APInt Last;
const Value *SValue;
MachineBasicBlock *HeaderBB;
bool Emitted;
};
typedef std::pair<JumpTableHeader, JumpTable> JumpTableBlock;
struct BitTestCase {
BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr,
uint32_t Weight):
Mask(M), ThisBB(T), TargetBB(Tr), ExtraWeight(Weight) { }
uint64_t Mask;
MachineBasicBlock *ThisBB;
MachineBasicBlock *TargetBB;
uint32_t ExtraWeight;
};
typedef SmallVector<BitTestCase, 3> BitTestInfo;
struct BitTestBlock {
BitTestBlock(APInt F, APInt R, const Value* SV,
unsigned Rg, MVT RgVT, bool E,
MachineBasicBlock* P, MachineBasicBlock* D,
BitTestInfo C):
First(F), Range(R), SValue(SV), Reg(Rg), RegVT(RgVT), Emitted(E),
Parent(P), Default(D), Cases(std::move(C)) { }
APInt First;
APInt Range;
const Value *SValue;
unsigned Reg;
MVT RegVT;
bool Emitted;
MachineBasicBlock *Parent;
MachineBasicBlock *Default;
BitTestInfo Cases;
};
/// Minimum jump table density, in percent.
enum { MinJumpTableDensity = 40 };
/// Check whether a range of clusters is dense enough for a jump table.
bool isDense(const CaseClusterVector &Clusters, unsigned *TotalCases,
unsigned First, unsigned Last);
/// Build a jump table cluster from Clusters[First..Last]. Returns false if it
/// decides it's not a good idea.
bool buildJumpTable(CaseClusterVector &Clusters, unsigned First,
unsigned Last, const SwitchInst *SI,
MachineBasicBlock *DefaultMBB, CaseCluster &JTCluster);
/// Find clusters of cases suitable for jump table lowering.
void findJumpTables(CaseClusterVector &Clusters, const SwitchInst *SI,
MachineBasicBlock *DefaultMBB);
/// Check whether the range [Low,High] fits in a machine word.
bool rangeFitsInWord(const APInt &Low, const APInt &High);
/// Check whether these clusters are suitable for lowering with bit tests based
/// on the number of destinations, comparison metric, and range.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
const APInt &Low, const APInt &High);
/// Build a bit test cluster from Clusters[First..Last]. Returns false if it
/// decides it's not a good idea.
bool buildBitTests(CaseClusterVector &Clusters, unsigned First, unsigned Last,
const SwitchInst *SI, CaseCluster &BTCluster);
/// Find clusters of cases suitable for bit test lowering.
void findBitTestClusters(CaseClusterVector &Clusters, const SwitchInst *SI);
struct SwitchWorkListItem {
MachineBasicBlock *MBB;
CaseClusterIt FirstCluster;
CaseClusterIt LastCluster;
const ConstantInt *GE;
const ConstantInt *LT;
};
typedef SmallVector<SwitchWorkListItem, 4> SwitchWorkList;
/// Determine the rank by weight of CC in [First,Last]. If CC has more weight
/// than each cluster in the range, its rank is 0.
static unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First,
CaseClusterIt Last);
/// Emit comparison and split W into two subtrees.
void splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W,
Value *Cond, MachineBasicBlock *SwitchMBB);
/// Lower W.
void lowerWorkItem(SwitchWorkListItem W, Value *Cond,
MachineBasicBlock *SwitchMBB,
MachineBasicBlock *DefaultMBB);
/// A class which encapsulates all of the information needed to generate a
/// stack protector check and signals to isel via its state being initialized
/// that a stack protector needs to be generated.
///
/// *NOTE* The following is a high level documentation of SelectionDAG Stack
/// Protector Generation. The reason that it is placed here is for a lack of
/// other good places to stick it.
///
/// High Level Overview of SelectionDAG Stack Protector Generation:
///
/// Previously, generation of stack protectors was done exclusively in the
/// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
/// splitting basic blocks at the IR level to create the success/failure basic
/// blocks in the tail of the basic block in question. As a result of this,
/// calls that would have qualified for the sibling call optimization were no
/// longer eligible for optimization since said calls were no longer right in
/// the "tail position" (i.e. the immediate predecessor of a ReturnInst
/// instruction).
///
/// Then it was noticed that since the sibling call optimization causes the
/// callee to reuse the caller's stack, if we could delay the generation of
/// the stack protector check until later in CodeGen after the sibling call
/// decision was made, we get both the tail call optimization and the stack
/// protector check!
///
/// A few goals in solving this problem were:
///
/// 1. Preserve the architecture independence of stack protector generation.
///
/// 2. Preserve the normal IR level stack protector check for platforms like
/// OpenBSD for which we support platform-specific stack protector
/// generation.
///
/// The main problem that guided the present solution is that one can not
/// solve this problem in an architecture independent manner at the IR level
/// only. This is because:
///
/// 1. The decision on whether or not to perform a sibling call on certain
/// platforms (for instance i386) requires lower level information
/// related to available registers that can not be known at the IR level.
///
/// 2. Even if the previous point were not true, the decision on whether to
/// perform a tail call is done in LowerCallTo in SelectionDAG which
/// occurs after the Stack Protector Pass. As a result, one would need to
/// put the relevant callinst into the stack protector check success
/// basic block (where the return inst is placed) and then move it back
/// later at SelectionDAG/MI time before the stack protector check if the
/// tail call optimization failed. The MI level option was nixed
/// immediately since it would require platform-specific pattern
/// matching. The SelectionDAG level option was nixed because
/// SelectionDAG only processes one IR level basic block at a time
/// implying one could not create a DAG Combine to move the callinst.
///
/// To get around this problem a few things were realized:
///
/// 1. While one can not handle multiple IR level basic blocks at the
/// SelectionDAG Level, one can generate multiple machine basic blocks
/// for one IR level basic block. This is how we handle bit tests and
/// switches.
///
/// 2. At the MI level, tail calls are represented via a special return
/// MIInst called "tcreturn". Thus if we know the basic block in which we
/// wish to insert the stack protector check, we get the correct behavior
/// by always inserting the stack protector check right before the return
/// statement. This is a "magical transformation" since no matter where
/// the stack protector check intrinsic is, we always insert the stack
/// protector check code at the end of the BB.
///
/// Given the aforementioned constraints, the following solution was devised:
///
/// 1. On platforms that do not support SelectionDAG stack protector check
/// generation, allow for the normal IR level stack protector check
/// generation to continue.
///
/// 2. On platforms that do support SelectionDAG stack protector check
/// generation:
///
/// a. Use the IR level stack protector pass to decide if a stack
/// protector is required/which BB we insert the stack protector check
/// in by reusing the logic already therein. If we wish to generate a
/// stack protector check in a basic block, we place a special IR
/// intrinsic called llvm.stackprotectorcheck right before the BB's
/// returninst or if there is a callinst that could potentially be
/// sibling call optimized, before the call inst.
///
/// b. Then when a BB with said intrinsic is processed, we codegen the BB
/// normally via SelectBasicBlock. In said process, when we visit the
/// stack protector check, we do not actually emit anything into the
/// BB. Instead, we just initialize the stack protector descriptor
/// class (which involves stashing information/creating the success
/// mbbb and the failure mbb if we have not created one for this
/// function yet) and export the guard variable that we are going to
/// compare.
///
/// c. After we finish selecting the basic block, in FinishBasicBlock if
/// the StackProtectorDescriptor attached to the SelectionDAGBuilder is
/// initialized, we first find a splice point in the parent basic block
/// before the terminator and then splice the terminator of said basic
/// block into the success basic block. Then we code-gen a new tail for
/// the parent basic block consisting of the two loads, the comparison,
/// and finally two branches to the success/failure basic blocks. We
/// conclude by code-gening the failure basic block if we have not
/// code-gened it already (all stack protector checks we generate in
/// the same function, use the same failure basic block).
class StackProtectorDescriptor {
public:
StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr),
FailureMBB(nullptr), Guard(nullptr),
GuardReg(0) { }
/// Returns true if all fields of the stack protector descriptor are
/// initialized implying that we should/are ready to emit a stack protector.
bool shouldEmitStackProtector() const {
return ParentMBB && SuccessMBB && FailureMBB && Guard;
}
/// Initialize the stack protector descriptor structure for a new basic
/// block.
void initialize(const BasicBlock *BB,
MachineBasicBlock *MBB,
const CallInst &StackProtCheckCall) {
// Make sure we are not initialized yet.
assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
"already initialized!");
ParentMBB = MBB;
SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true);
FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
if (!Guard)
Guard = StackProtCheckCall.getArgOperand(0);
}
/// Reset state that changes when we handle different basic blocks.
///
/// This currently includes:
///
/// 1. The specific basic block we are generating a
/// stack protector for (ParentMBB).
///
/// 2. The successor machine basic block that will contain the tail of
/// parent mbb after we create the stack protector check (SuccessMBB). This
/// BB is visited only on stack protector check success.
void resetPerBBState() {
ParentMBB = nullptr;
SuccessMBB = nullptr;
}
/// Reset state that only changes when we switch functions.
///
/// This currently includes:
///
/// 1. FailureMBB since we reuse the failure code path for all stack
/// protector checks created in an individual function.
///
/// 2.The guard variable since the guard variable we are checking against is
/// always the same.
void resetPerFunctionState() {
FailureMBB = nullptr;
Guard = nullptr;
}
MachineBasicBlock *getParentMBB() { return ParentMBB; }
MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
MachineBasicBlock *getFailureMBB() { return FailureMBB; }
const Value *getGuard() { return Guard; }
unsigned getGuardReg() const { return GuardReg; }
void setGuardReg(unsigned R) { GuardReg = R; }
private:
/// The basic block for which we are generating the stack protector.
///
/// As a result of stack protector generation, we will splice the
/// terminators of this basic block into the successor mbb SuccessMBB and
/// replace it with a compare/branch to the successor mbbs
/// SuccessMBB/FailureMBB depending on whether or not the stack protector
/// was violated.
MachineBasicBlock *ParentMBB;
/// A basic block visited on stack protector check success that contains the
/// terminators of ParentMBB.
MachineBasicBlock *SuccessMBB;
/// This basic block visited on stack protector check failure that will
/// contain a call to __stack_chk_fail().
MachineBasicBlock *FailureMBB;
/// The guard variable which we will compare against the stored value in the
/// stack protector stack slot.
const Value *Guard;
/// The virtual register holding the stack guard value.
unsigned GuardReg;
/// Add a successor machine basic block to ParentMBB. If the successor mbb
/// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
/// block will be created. Assign a large weight if IsLikely is true.
MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB,
MachineBasicBlock *ParentMBB,
bool IsLikely,
MachineBasicBlock *SuccMBB = nullptr);
};
private:
const TargetMachine &TM;
public:
/// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
/// nodes without a corresponding SDNode.
static const unsigned LowestSDNodeOrder = 1;
SelectionDAG &DAG;
const DataLayout *DL;
AliasAnalysis *AA;
const TargetLibraryInfo *LibInfo;
/// SwitchCases - Vector of CaseBlock structures used to communicate
/// SwitchInst code generation information.
std::vector<CaseBlock> SwitchCases;
/// JTCases - Vector of JumpTable structures used to communicate
/// SwitchInst code generation information.
std::vector<JumpTableBlock> JTCases;
/// BitTestCases - Vector of BitTestBlock structures used to communicate
/// SwitchInst code generation information.
std::vector<BitTestBlock> BitTestCases;
/// A StackProtectorDescriptor structure used to communicate stack protector
/// information in between SelectBasicBlock and FinishBasicBlock.
StackProtectorDescriptor SPDescriptor;
// Emit PHI-node-operand constants only once even if used by multiple
// PHI nodes.
DenseMap<const Constant *, unsigned> ConstantsOut;
/// FuncInfo - Information about the function as a whole.
///
FunctionLoweringInfo &FuncInfo;
/// OptLevel - What optimization level we're generating code for.
///
CodeGenOpt::Level OptLevel;
/// GFI - Garbage collection metadata for the function.
GCFunctionInfo *GFI;
/// LPadToCallSiteMap - Map a landing pad to the call site indexes.
DenseMap<MachineBasicBlock*, SmallVector<unsigned, 4> > LPadToCallSiteMap;
/// HasTailCall - This is set to true if a call in the current
/// block has been translated as a tail call. In this case,
/// no subsequent DAG nodes should be created.
///
bool HasTailCall;
LLVMContext *Context;
SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
CodeGenOpt::Level ol)
: CurInst(nullptr), SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()),
DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
HasTailCall(false) {
}
void init(GCFunctionInfo *gfi, AliasAnalysis &aa,
const TargetLibraryInfo *li);
/// clear - Clear out the current SelectionDAG and the associated
/// state and prepare this SelectionDAGBuilder object to be used
/// for a new block. This doesn't clear out information about
/// additional blocks that are needed to complete switch lowering
/// or PHI node updating; that information is cleared out as it is
/// consumed.
void clear();
/// clearDanglingDebugInfo - Clear the dangling debug information
/// map. This function is separated from the clear so that debug
/// information that is dangling in a basic block can be properly
/// resolved in a different basic block. This allows the
/// SelectionDAG to resolve dangling debug information attached
/// to PHI nodes.
void clearDanglingDebugInfo();
/// getRoot - Return the current virtual root of the Selection DAG,
/// flushing any PendingLoad items. This must be done before emitting
/// a store or any other node that may need to be ordered after any
/// prior load instructions.
///
SDValue getRoot();
/// getControlRoot - Similar to getRoot, but instead of flushing all the
/// PendingLoad items, flush all the PendingExports items. It is necessary
/// to do this before emitting a terminator instruction.
///
SDValue getControlRoot();
SDLoc getCurSDLoc() const {
return SDLoc(CurInst, SDNodeOrder);
}
DebugLoc getCurDebugLoc() const {
return CurInst ? CurInst->getDebugLoc() : DebugLoc();
}
unsigned getSDNodeOrder() const { return SDNodeOrder; }
void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
void visit(const Instruction &I);
void visit(unsigned Opcode, const User &I);
/// getCopyFromRegs - If there was virtual register allocated for the value V
/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
SDValue getCopyFromRegs(const Value *V, Type *Ty);
// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
// generate the debug data structures now that we've seen its definition.
void resolveDanglingDebugInfo(const Value *V, SDValue Val);
SDValue getValue(const Value *V);
bool findValue(const Value *V) const;
SDValue getNonRegisterValue(const Value *V);
SDValue getValueImpl(const Value *V);
void setValue(const Value *V, SDValue NewN) {
SDValue &N = NodeMap[V];
assert(!N.getNode() && "Already set a value for this node!");
N = NewN;
}
void setUnusedArgValue(const Value *V, SDValue NewN) {
SDValue &N = UnusedArgNodeMap[V];
assert(!N.getNode() && "Already set a value for this node!");
N = NewN;
}
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB, unsigned Opc,
uint32_t TW, uint32_t FW);
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
uint32_t TW, uint32_t FW);
bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases);
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
void CopyToExportRegsIfNeeded(const Value *V);
void ExportFromCurrentBlock(const Value *V);
void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
MachineBasicBlock *LandingPad = nullptr);
std::pair<SDValue, SDValue> lowerCallOperands(
ImmutableCallSite CS,
unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
MachineBasicBlock *LandingPad = nullptr,
bool IsPatchPoint = false);
/// UpdateSplitBlock - When an MBB was split during scheduling, update the
/// references that need to refer to the last resulting block.
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
// This function is responsible for the whole statepoint lowering process.
// It uniformly handles invoke and call statepoints.
void LowerStatepoint(ImmutableStatepoint Statepoint,
MachineBasicBlock *LandingPad = nullptr);
private:
std::pair<SDValue, SDValue> lowerInvokable(
TargetLowering::CallLoweringInfo &CLI,
MachineBasicBlock *LandingPad);
// Terminator instructions.
void visitRet(const ReturnInst &I);
void visitBr(const BranchInst &I);
void visitSwitch(const SwitchInst &I);
void visitIndirectBr(const IndirectBrInst &I);
void visitUnreachable(const UnreachableInst &I);
uint32_t getEdgeWeight(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
void addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
uint32_t Weight = 0);
public:
void visitSwitchCase(CaseBlock &CB,
MachineBasicBlock *SwitchBB);
void visitSPDescriptorParent(StackProtectorDescriptor &SPD,
MachineBasicBlock *ParentBB);
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD);
void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB);
void visitBitTestCase(BitTestBlock &BB,
MachineBasicBlock* NextMBB,
uint32_t BranchWeightToNext,
unsigned Reg,
BitTestCase &B,
MachineBasicBlock *SwitchBB);
void visitJumpTable(JumpTable &JT);
void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH,
MachineBasicBlock *SwitchBB);
private:
// These all get lowered before this pass.
void visitInvoke(const InvokeInst &I);
void visitResume(const ResumeInst &I);
void visitBinary(const User &I, unsigned OpCode);
void visitShift(const User &I, unsigned Opcode);
void visitAdd(const User &I) { visitBinary(I, ISD::ADD); }
void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
void visitSub(const User &I) { visitBinary(I, ISD::SUB); }
void visitFSub(const User &I);
void visitMul(const User &I) { visitBinary(I, ISD::MUL); }
void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
void visitSRem(const User &I) { visitBinary(I, ISD::SREM); }
void visitFRem(const User &I) { visitBinary(I, ISD::FREM); }
void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); }
void visitSDiv(const User &I);
void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); }
void visitAnd (const User &I) { visitBinary(I, ISD::AND); }
void visitOr (const User &I) { visitBinary(I, ISD::OR); }
void visitXor (const User &I) { visitBinary(I, ISD::XOR); }
void visitShl (const User &I) { visitShift(I, ISD::SHL); }
void visitLShr(const User &I) { visitShift(I, ISD::SRL); }
void visitAShr(const User &I) { visitShift(I, ISD::SRA); }
void visitICmp(const User &I);
void visitFCmp(const User &I);
// Visit the conversion instructions
void visitTrunc(const User &I);
void visitZExt(const User &I);
void visitSExt(const User &I);
void visitFPTrunc(const User &I);
void visitFPExt(const User &I);
void visitFPToUI(const User &I);
void visitFPToSI(const User &I);
void visitUIToFP(const User &I);
void visitSIToFP(const User &I);
void visitPtrToInt(const User &I);
void visitIntToPtr(const User &I);
void visitBitCast(const User &I);
void visitAddrSpaceCast(const User &I);
void visitExtractElement(const User &I);
void visitInsertElement(const User &I);
void visitShuffleVector(const User &I);
void visitExtractValue(const ExtractValueInst &I);
void visitInsertValue(const InsertValueInst &I);
void visitLandingPad(const LandingPadInst &I);
void visitGetElementPtr(const User &I);
void visitSelect(const User &I);
void visitAlloca(const AllocaInst &I);
void visitLoad(const LoadInst &I);
void visitStore(const StoreInst &I);
void visitMaskedLoad(const CallInst &I);
void visitMaskedStore(const CallInst &I);
void visitMaskedGather(const CallInst &I);
void visitMaskedScatter(const CallInst &I);
void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
void visitAtomicRMW(const AtomicRMWInst &I);
void visitFence(const FenceInst &I);
void visitPHI(const PHINode &I);
void visitCall(const CallInst &I);
bool visitMemCmpCall(const CallInst &I);
bool visitMemChrCall(const CallInst &I);
bool visitStrCpyCall(const CallInst &I, bool isStpcpy);
bool visitStrCmpCall(const CallInst &I);
bool visitStrLenCall(const CallInst &I);
bool visitStrNLenCall(const CallInst &I);
bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode);
bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode);
void visitAtomicLoad(const LoadInst &I);
void visitAtomicStore(const StoreInst &I);
void visitInlineAsm(ImmutableCallSite CS);
const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
void visitVAStart(const CallInst &I);
void visitVAArg(const VAArgInst &I);
void visitVAEnd(const CallInst &I);
void visitVACopy(const CallInst &I);
void visitStackmap(const CallInst &I);
void visitPatchpoint(ImmutableCallSite CS,
MachineBasicBlock *LandingPad = nullptr);
// These three are implemented in StatepointLowering.cpp
void visitStatepoint(const CallInst &I);
void visitGCRelocate(const CallInst &I);
void visitGCResult(const CallInst &I);
void visitUserOp1(const Instruction &I) {
llvm_unreachable("UserOp1 should not exist at instruction selection time!");
}
void visitUserOp2(const Instruction &I) {
llvm_unreachable("UserOp2 should not exist at instruction selection time!");
}
void processIntegerCallValue(const Instruction &I,
SDValue Value, bool IsSigned);
void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
/// EmitFuncArgumentDbgValue - If V is an function argument then create
/// corresponding DBG_VALUE machine instruction for it now. At the end of
/// instruction selection, they will be inserted to the entry BB.
bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable,
DIExpression *Expr, DILocation *DL,
int64_t Offset, bool IsIndirect,
const SDValue &N);
/// Return the next block after MBB, or nullptr if there is none.
MachineBasicBlock *NextBlock(MachineBasicBlock *MBB);
/// Update the DAG and DAG builder with the relevant information after
/// a new root node has been created which could be a tail call.
void updateDAGForMaybeTailCall(SDValue MaybeTC);
};
/// RegsForValue - This struct represents the registers (physical or virtual)
/// that a particular set of values is assigned, and the type information about
/// the value. The most common situation is to represent one value at a time,
/// but struct or array values are handled element-wise as multiple values. The
/// splitting of aggregates is performed recursively, so that we never have
/// aggregate-typed registers. The values at this point do not necessarily have
/// legal types, so each value may require one or more registers of some legal
/// type.
///
struct RegsForValue {
/// ValueVTs - The value types of the values, which may not be legal, and
/// may need be promoted or synthesized from one or more registers.
///
SmallVector<EVT, 4> ValueVTs;
/// RegVTs - The value types of the registers. This is the same size as
/// ValueVTs and it records, for each value, what the type of the assigned
/// register or registers are. (Individual values are never synthesized
/// from more than one type of register.)
///
/// With virtual registers, the contents of RegVTs is redundant with TLI's
/// getRegisterType member function, however when with physical registers
/// it is necessary to have a separate record of the types.
///
SmallVector<MVT, 4> RegVTs;
/// Regs - This list holds the registers assigned to the values.
/// Each legal or promoted value requires one register, and each
/// expanded value requires multiple registers.
///
SmallVector<unsigned, 4> Regs;
RegsForValue();
RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt);
RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL, unsigned Reg, Type *Ty);
/// append - Add the specified values to this one.
void append(const RegsForValue &RHS) {
ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
Regs.append(RHS.Regs.begin(), RHS.Regs.end());
}
/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
/// this value and returns the result as a ValueVTs value. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
SDLoc dl,
SDValue &Chain, SDValue *Flag,
const Value *V = nullptr) const;
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the specified
/// value into the registers specified by this object. This uses Chain/Flag
/// as the input and updates them for the output Chain/Flag. If the Flag
/// pointer is nullptr, no flag is used. If V is not nullptr, then it is used
/// in printing better diagnostic messages on error.
void
getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl, SDValue &Chain,
SDValue *Flag, const Value *V = nullptr,
ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
/// operand list. This adds the code marker, matching input operand index
/// (if applicable), and includes the number of values added into it.
void AddInlineAsmOperands(unsigned Kind,
bool HasMatching, unsigned MatchingIdx, SDLoc dl,
SelectionDAG &DAG,
std::vector<SDValue> &Ops) const;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/legalizevectorops.cpp | //===-- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SelectionDAG::LegalizeVectors method.
//
// The vector legalizer looks for vector operations which might need to be
// scalarized and legalizes them. This is a separate step from Legalize because
// scalarizing can introduce illegal types. For example, suppose we have an
// ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition
// on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the
// operation, which introduces nodes with the illegal type i64 which must be
// expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC;
// the operation must be unrolled, which introduces nodes with the illegal
// type i8 which must be promoted.
//
// This does not legalize vector manipulations like ISD::BUILD_VECTOR,
// or operations that happen to take a vector which are custom-lowered;
// the legalization for such operations never produces nodes
// with illegal types, so it's okay to put off legalizing them until
// SelectionDAG::Legalize runs.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
using namespace llvm;
namespace {
class VectorLegalizer {
SelectionDAG &DAG;
const TargetLowering &TLI;
bool Changed; // Keep track of whether anything changed
/// For nodes that are of legal width, and that have more than one use, this
/// map indicates what regularized operand to use. This allows us to avoid
/// legalizing the same thing more than once.
SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes;
/// \brief Adds a node to the translation cache.
void AddLegalizedOperand(SDValue From, SDValue To) {
LegalizedNodes.insert(std::make_pair(From, To));
// If someone requests legalization of the new node, return itself.
if (From != To)
LegalizedNodes.insert(std::make_pair(To, To));
}
/// \brief Legalizes the given node.
SDValue LegalizeOp(SDValue Op);
/// \brief Assuming the node is legal, "legalize" the results.
SDValue TranslateLegalizeResults(SDValue Op, SDValue Result);
/// \brief Implements unrolling a VSETCC.
SDValue UnrollVSETCC(SDValue Op);
/// \brief Implement expand-based legalization of vector operations.
///
/// This is just a high-level routine to dispatch to specific code paths for
/// operations to legalize them.
SDValue Expand(SDValue Op);
/// \brief Implements expansion for FNEG; falls back to UnrollVectorOp if
/// FSUB isn't legal.
///
/// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if
/// SINT_TO_FLOAT and SHR on vectors isn't legal.
SDValue ExpandUINT_TO_FLOAT(SDValue Op);
/// \brief Implement expansion for SIGN_EXTEND_INREG using SRL and SRA.
SDValue ExpandSEXTINREG(SDValue Op);
/// \brief Implement expansion for ANY_EXTEND_VECTOR_INREG.
///
/// Shuffles the low lanes of the operand into place and bitcasts to the
/// proper type. The contents of the bits in the extended part of each element
/// are undef.
SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op);
/// \brief Implement expansion for SIGN_EXTEND_VECTOR_INREG.
///
/// Shuffles the low lanes of the operand into place, bitcasts to the proper
/// type, then shifts left and arithmetic shifts right to introduce a sign
/// extension.
SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op);
/// \brief Implement expansion for ZERO_EXTEND_VECTOR_INREG.
///
/// Shuffles the low lanes of the operand into place and blends zeros into
/// the remaining lanes, finally bitcasting to the proper type.
SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op);
/// \brief Expand bswap of vectors into a shuffle if legal.
SDValue ExpandBSWAP(SDValue Op);
/// \brief Implement vselect in terms of XOR, AND, OR when blend is not
/// supported by the target.
SDValue ExpandVSELECT(SDValue Op);
SDValue ExpandSELECT(SDValue Op);
SDValue ExpandLoad(SDValue Op);
SDValue ExpandStore(SDValue Op);
SDValue ExpandFNEG(SDValue Op);
/// \brief Implements vector promotion.
///
/// This is essentially just bitcasting the operands to a different type and
/// bitcasting the result back to the original type.
SDValue Promote(SDValue Op);
/// \brief Implements [SU]INT_TO_FP vector promotion.
///
/// This is a [zs]ext of the input operand to the next size up.
SDValue PromoteINT_TO_FP(SDValue Op);
/// \brief Implements FP_TO_[SU]INT vector promotion of the result type.
///
/// It is promoted to the next size up integer type. The result is then
/// truncated back to the original type.
SDValue PromoteFP_TO_INT(SDValue Op, bool isSigned);
public:
/// \brief Begin legalizer the vector operations in the DAG.
bool Run();
VectorLegalizer(SelectionDAG &dag)
: DAG(dag), TLI(dag.getTargetLoweringInfo()), Changed(false) {}
};
bool VectorLegalizer::Run() {
// Before we start legalizing vector nodes, check if there are any vectors.
bool HasVectors = false;
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = std::prev(DAG.allnodes_end());
I != std::next(E); ++I) {
// Check if the values of the nodes contain vectors. We don't need to check
// the operands because we are going to check their values at some point.
for (SDNode::value_iterator J = I->value_begin(), E = I->value_end();
J != E; ++J)
HasVectors |= J->isVector();
// If we found a vector node we can start the legalization.
if (HasVectors)
break;
}
// If this basic block has no vectors then no need to legalize vectors.
if (!HasVectors)
return false;
// The legalize process is inherently a bottom-up recursive process (users
// legalize their uses before themselves). Given infinite stack space, we
// could just start legalizing on the root and traverse the whole graph. In
// practice however, this causes us to run out of stack space on large basic
// blocks. To avoid this problem, compute an ordering of the nodes where each
// node is only legalized after all of its operands are legalized.
DAG.AssignTopologicalOrder();
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = std::prev(DAG.allnodes_end());
I != std::next(E); ++I)
LegalizeOp(SDValue(I, 0));
// Finally, it's possible the root changed. Get the new root.
SDValue OldRoot = DAG.getRoot();
assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?");
DAG.setRoot(LegalizedNodes[OldRoot]);
LegalizedNodes.clear();
// Remove dead nodes now.
DAG.RemoveDeadNodes();
return Changed;
}
SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDValue Result) {
// Generic legalization: just pass the operand through.
for (unsigned i = 0, e = Op.getNode()->getNumValues(); i != e; ++i)
AddLegalizedOperand(Op.getValue(i), Result.getValue(i));
return Result.getValue(Op.getResNo());
}
SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
// Note that LegalizeOp may be reentered even from single-use nodes, which
// means that we always must cache transformed nodes.
DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op);
if (I != LegalizedNodes.end())
return I->second;
SDNode *Node = Op.getNode();
// Legalize the operands
SmallVector<SDValue, 8> Ops;
for (const SDValue &Op : Node->op_values())
Ops.push_back(LegalizeOp(Op));
SDValue Result = SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops), 0);
bool HasVectorValue = false;
if (Op.getOpcode() == ISD::LOAD) {
LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
ISD::LoadExtType ExtType = LD->getExtensionType();
if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD)
switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0),
LD->getMemoryVT())) {
default:
llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
return TranslateLegalizeResults(Op, Result);
case TargetLowering::Custom:
if (SDValue Lowered = TLI.LowerOperation(Result, DAG)) {
if (Lowered == Result)
return TranslateLegalizeResults(Op, Lowered);
Changed = true;
if (Lowered->getNumValues() != Op->getNumValues()) {
// This expanded to something other than the load. Assume the
// lowering code took care of any chain values, and just handle the
// returned value.
assert(Result.getValue(1).use_empty() &&
"There are still live users of the old chain!");
return LegalizeOp(Lowered);
} else {
return TranslateLegalizeResults(Op, Lowered);
}
}
case TargetLowering::Expand:
Changed = true;
return LegalizeOp(ExpandLoad(Op));
}
} else if (Op.getOpcode() == ISD::STORE) {
StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
EVT StVT = ST->getMemoryVT();
MVT ValVT = ST->getValue().getSimpleValueType();
if (StVT.isVector() && ST->isTruncatingStore())
switch (TLI.getTruncStoreAction(ValVT, StVT.getSimpleVT())) {
default:
llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
return TranslateLegalizeResults(Op, Result);
case TargetLowering::Custom: {
SDValue Lowered = TLI.LowerOperation(Result, DAG);
Changed = Lowered != Result;
return TranslateLegalizeResults(Op, Lowered);
}
case TargetLowering::Expand:
Changed = true;
return LegalizeOp(ExpandStore(Op));
}
} else if (Op.getOpcode() == ISD::MSCATTER)
HasVectorValue = true;
for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end();
J != E; ++J)
HasVectorValue |= J->isVector();
if (!HasVectorValue)
return TranslateLegalizeResults(Op, Result);
EVT QueryType;
switch (Op.getOpcode()) {
default:
return TranslateLegalizeResults(Op, Result);
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM:
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR:
case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTTZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTPOP:
case ISD::SELECT:
case ISD::VSELECT:
case ISD::SELECT_CC:
case ISD::SETCC:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
case ISD::TRUNCATE:
case ISD::SIGN_EXTEND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::FNEG:
case ISD::FABS:
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::FCOPYSIGN:
case ISD::FSQRT:
case ISD::FSIN:
case ISD::FCOS:
case ISD::FPOWI:
case ISD::FPOW:
case ISD::FLOG:
case ISD::FLOG2:
case ISD::FLOG10:
case ISD::FEXP:
case ISD::FEXP2:
case ISD::FCEIL:
case ISD::FTRUNC:
case ISD::FRINT:
case ISD::FNEARBYINT:
case ISD::FROUND:
case ISD::FFLOOR:
case ISD::FP_ROUND:
case ISD::FP_EXTEND:
case ISD::FMA:
case ISD::SIGN_EXTEND_INREG:
case ISD::ANY_EXTEND_VECTOR_INREG:
case ISD::SIGN_EXTEND_VECTOR_INREG:
case ISD::ZERO_EXTEND_VECTOR_INREG:
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX:
QueryType = Node->getValueType(0);
break;
case ISD::FP_ROUND_INREG:
QueryType = cast<VTSDNode>(Node->getOperand(1))->getVT();
break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
QueryType = Node->getOperand(0).getValueType();
break;
case ISD::MSCATTER:
QueryType = cast<MaskedScatterSDNode>(Node)->getValue().getValueType();
break;
}
switch (TLI.getOperationAction(Node->getOpcode(), QueryType)) {
case TargetLowering::Promote:
Result = Promote(Op);
Changed = true;
break;
case TargetLowering::Legal:
break;
case TargetLowering::Custom: {
SDValue Tmp1 = TLI.LowerOperation(Op, DAG);
if (Tmp1.getNode()) {
Result = Tmp1;
break;
}
// FALL THROUGH
}
case TargetLowering::Expand:
Result = Expand(Op);
}
// Make sure that the generated code is itself legal.
if (Result != Op) {
Result = LegalizeOp(Result);
Changed = true;
}
// Note that LegalizeOp may be reentered even from single-use nodes, which
// means that we always must cache transformed nodes.
AddLegalizedOperand(Op, Result);
return Result;
}
SDValue VectorLegalizer::Promote(SDValue Op) {
// For a few operations there is a specific concept for promotion based on
// the operand's type.
switch (Op.getOpcode()) {
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
// "Promote" the operation by extending the operand.
return PromoteINT_TO_FP(Op);
case ISD::FP_TO_UINT:
case ISD::FP_TO_SINT:
// Promote the operation by extending the operand.
return PromoteFP_TO_INT(Op, Op->getOpcode() == ISD::FP_TO_SINT);
}
// There are currently two cases of vector promotion:
// 1) Bitcasting a vector of integers to a different type to a vector of the
// same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64.
// 2) Extending a vector of floats to a vector of the same number of larger
// floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32.
MVT VT = Op.getSimpleValueType();
assert(Op.getNode()->getNumValues() == 1 &&
"Can't promote a vector with multiple results!");
MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT);
SDLoc dl(Op);
SmallVector<SDValue, 4> Operands(Op.getNumOperands());
for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
if (Op.getOperand(j).getValueType().isVector())
if (Op.getOperand(j)
.getValueType()
.getVectorElementType()
.isFloatingPoint() &&
NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())
Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Op.getOperand(j));
else
Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j));
else
Operands[j] = Op.getOperand(j);
}
Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands);
if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) ||
(VT.isVector() && VT.getVectorElementType().isFloatingPoint() &&
NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()))
return DAG.getNode(ISD::FP_ROUND, dl, VT, Op, DAG.getIntPtrConstant(0, dl));
else
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
SDValue VectorLegalizer::PromoteINT_TO_FP(SDValue Op) {
// INT_TO_FP operations may require the input operand be promoted even
// when the type is otherwise legal.
EVT VT = Op.getOperand(0).getValueType();
assert(Op.getNode()->getNumValues() == 1 &&
"Can't promote a vector with multiple results!");
// Normal getTypeToPromoteTo() doesn't work here, as that will promote
// by widening the vector w/ the same element width and twice the number
// of elements. We want the other way around, the same number of elements,
// each twice the width.
//
// Increase the bitwidth of the element to the next pow-of-two
// (which is greater than 8 bits).
EVT NVT = VT.widenIntegerVectorElementType(*DAG.getContext());
assert(NVT.isSimple() && "Promoting to a non-simple vector type!");
SDLoc dl(Op);
SmallVector<SDValue, 4> Operands(Op.getNumOperands());
unsigned Opc =
Op.getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
if (Op.getOperand(j).getValueType().isVector())
Operands[j] = DAG.getNode(Opc, dl, NVT, Op.getOperand(j));
else
Operands[j] = Op.getOperand(j);
}
return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), Operands);
}
// For FP_TO_INT we promote the result type to a vector type with wider
// elements and then truncate the result. This is different from the default
// PromoteVector which uses bitcast to promote thus assumning that the
// promoted vector type has the same overall size.
SDValue VectorLegalizer::PromoteFP_TO_INT(SDValue Op, bool isSigned) {
assert(Op.getNode()->getNumValues() == 1 &&
"Can't promote a vector with multiple results!");
EVT VT = Op.getValueType();
EVT NewVT;
unsigned NewOpc;
while (1) {
NewVT = VT.widenIntegerVectorElementType(*DAG.getContext());
assert(NewVT.isSimple() && "Promoting to a non-simple vector type!");
if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewVT)) {
NewOpc = ISD::FP_TO_SINT;
break;
}
if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewVT)) {
NewOpc = ISD::FP_TO_UINT;
break;
}
}
SDLoc loc(Op);
SDValue promoted = DAG.getNode(NewOpc, SDLoc(Op), NewVT, Op.getOperand(0));
return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, promoted);
}
SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
SDLoc dl(Op);
LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
SDValue Chain = LD->getChain();
SDValue BasePTR = LD->getBasePtr();
EVT SrcVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = LD->getExtensionType();
SmallVector<SDValue, 8> Vals;
SmallVector<SDValue, 8> LoadChains;
unsigned NumElem = SrcVT.getVectorNumElements();
EVT SrcEltVT = SrcVT.getScalarType();
EVT DstEltVT = Op.getNode()->getValueType(0).getScalarType();
if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) {
// When elements in a vector is not byte-addressable, we cannot directly
// load each element by advancing pointer, which could only address bytes.
// Instead, we load all significant words, mask bits off, and concatenate
// them to form each element. Finally, they are extended to destination
// scalar type to build the destination vector.
EVT WideVT = TLI.getPointerTy(DAG.getDataLayout());
assert(WideVT.isRound() &&
"Could not handle the sophisticated case when the widest integer is"
" not power of 2.");
assert(WideVT.bitsGE(SrcEltVT) && "Type is not legalized?");
unsigned WideBytes = WideVT.getStoreSize();
unsigned Offset = 0;
unsigned RemainingBytes = SrcVT.getStoreSize();
SmallVector<SDValue, 8> LoadVals;
while (RemainingBytes > 0) {
SDValue ScalarLoad;
unsigned LoadBytes = WideBytes;
if (RemainingBytes >= LoadBytes) {
ScalarLoad = DAG.getLoad(
WideVT, dl, Chain, BasePTR,
LD->getPointerInfo().getWithOffset(Offset), LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(),
MinAlign(LD->getAlignment(), Offset), LD->getAAInfo());
} else {
EVT LoadVT = WideVT;
while (RemainingBytes < LoadBytes) {
LoadBytes >>= 1; // Reduce the load size by half.
LoadVT = EVT::getIntegerVT(*DAG.getContext(), LoadBytes << 3);
}
ScalarLoad = DAG.getExtLoad(
ISD::EXTLOAD, dl, WideVT, Chain, BasePTR,
LD->getPointerInfo().getWithOffset(Offset), LoadVT,
LD->isVolatile(), LD->isNonTemporal(), LD->isInvariant(),
MinAlign(LD->getAlignment(), Offset), LD->getAAInfo());
}
RemainingBytes -= LoadBytes;
Offset += LoadBytes;
BasePTR =
DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR,
DAG.getConstant(LoadBytes, dl, BasePTR.getValueType()));
LoadVals.push_back(ScalarLoad.getValue(0));
LoadChains.push_back(ScalarLoad.getValue(1));
}
// Extract bits, pack and extend/trunc them into destination type.
unsigned SrcEltBits = SrcEltVT.getSizeInBits();
SDValue SrcEltBitMask = DAG.getConstant((1U << SrcEltBits) - 1, dl, WideVT);
unsigned BitOffset = 0;
unsigned WideIdx = 0;
unsigned WideBits = WideVT.getSizeInBits();
for (unsigned Idx = 0; Idx != NumElem; ++Idx) {
SDValue Lo, Hi, ShAmt;
if (BitOffset < WideBits) {
ShAmt = DAG.getConstant(
BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt);
Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask);
}
BitOffset += SrcEltBits;
if (BitOffset >= WideBits) {
WideIdx++;
BitOffset -= WideBits;
if (BitOffset > 0) {
ShAmt = DAG.getConstant(
SrcEltBits - BitOffset, dl,
TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
Hi = DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt);
Hi = DAG.getNode(ISD::AND, dl, WideVT, Hi, SrcEltBitMask);
}
}
if (Hi.getNode())
Lo = DAG.getNode(ISD::OR, dl, WideVT, Lo, Hi);
switch (ExtType) {
default:
llvm_unreachable("Unknown extended-load op!");
case ISD::EXTLOAD:
Lo = DAG.getAnyExtOrTrunc(Lo, dl, DstEltVT);
break;
case ISD::ZEXTLOAD:
Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT);
break;
case ISD::SEXTLOAD:
ShAmt =
DAG.getConstant(WideBits - SrcEltBits, dl,
TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt);
Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt);
Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT);
break;
}
Vals.push_back(Lo);
}
} else {
unsigned Stride = SrcVT.getScalarType().getSizeInBits() / 8;
for (unsigned Idx = 0; Idx < NumElem; Idx++) {
SDValue ScalarLoad = DAG.getExtLoad(
ExtType, dl, Op.getNode()->getValueType(0).getScalarType(), Chain,
BasePTR, LD->getPointerInfo().getWithOffset(Idx * Stride),
SrcVT.getScalarType(), LD->isVolatile(), LD->isNonTemporal(),
LD->isInvariant(), MinAlign(LD->getAlignment(), Idx * Stride),
LD->getAAInfo());
BasePTR =
DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR,
DAG.getConstant(Stride, dl, BasePTR.getValueType()));
Vals.push_back(ScalarLoad.getValue(0));
LoadChains.push_back(ScalarLoad.getValue(1));
}
}
SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
SDValue Value =
DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getNode()->getValueType(0), Vals);
AddLegalizedOperand(Op.getValue(0), Value);
AddLegalizedOperand(Op.getValue(1), NewChain);
return (Op.getResNo() ? NewChain : Value);
}
SDValue VectorLegalizer::ExpandStore(SDValue Op) {
SDLoc dl(Op);
StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
SDValue Chain = ST->getChain();
SDValue BasePTR = ST->getBasePtr();
SDValue Value = ST->getValue();
EVT StVT = ST->getMemoryVT();
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
AAMDNodes AAInfo = ST->getAAInfo();
unsigned NumElem = StVT.getVectorNumElements();
// The type of the data we want to save
EVT RegVT = Value.getValueType();
EVT RegSclVT = RegVT.getScalarType();
// The type of data as saved in memory.
EVT MemSclVT = StVT.getScalarType();
// Cast floats into integers
unsigned ScalarSize = MemSclVT.getSizeInBits();
// Round odd types to the next pow of two.
if (!isPowerOf2_32(ScalarSize))
ScalarSize = NextPowerOf2(ScalarSize);
// Store Stride in bytes
unsigned Stride = ScalarSize / 8;
// Extract each of the elements from the original vector
// and save them into memory individually.
SmallVector<SDValue, 8> Stores;
for (unsigned Idx = 0; Idx < NumElem; Idx++) {
SDValue Ex = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, RegSclVT, Value,
DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
// This scalar TruncStore may be illegal, but we legalize it later.
SDValue Store = DAG.getTruncStore(
Chain, dl, Ex, BasePTR,
ST->getPointerInfo().getWithOffset(Idx * Stride), MemSclVT, isVolatile,
isNonTemporal, MinAlign(Alignment, Idx * Stride), AAInfo);
BasePTR = DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR,
DAG.getConstant(Stride, dl, BasePTR.getValueType()));
Stores.push_back(Store);
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
AddLegalizedOperand(Op, TF);
return TF;
}
SDValue VectorLegalizer::Expand(SDValue Op) {
switch (Op->getOpcode()) {
case ISD::SIGN_EXTEND_INREG:
return ExpandSEXTINREG(Op);
case ISD::ANY_EXTEND_VECTOR_INREG:
return ExpandANY_EXTEND_VECTOR_INREG(Op);
case ISD::SIGN_EXTEND_VECTOR_INREG:
return ExpandSIGN_EXTEND_VECTOR_INREG(Op);
case ISD::ZERO_EXTEND_VECTOR_INREG:
return ExpandZERO_EXTEND_VECTOR_INREG(Op);
case ISD::BSWAP:
return ExpandBSWAP(Op);
case ISD::VSELECT:
return ExpandVSELECT(Op);
case ISD::SELECT:
return ExpandSELECT(Op);
case ISD::UINT_TO_FP:
return ExpandUINT_TO_FLOAT(Op);
case ISD::FNEG:
return ExpandFNEG(Op);
case ISD::SETCC:
return UnrollVSETCC(Op);
default:
return DAG.UnrollVectorOp(Op.getNode());
}
}
SDValue VectorLegalizer::ExpandSELECT(SDValue Op) {
// Lower a select instruction where the condition is a scalar and the
// operands are vectors. Lower this select to VSELECT and implement it
// using XOR AND OR. The selector bit is broadcasted.
EVT VT = Op.getValueType();
SDLoc DL(Op);
SDValue Mask = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue Op2 = Op.getOperand(2);
assert(VT.isVector() && !Mask.getValueType().isVector() &&
Op1.getValueType() == Op2.getValueType() && "Invalid type");
unsigned NumElem = VT.getVectorNumElements();
// If we can't even use the basic vector operations of
// AND,OR,XOR, we will have to scalarize the op.
// Notice that the operation may be 'promoted' which means that it is
// 'bitcasted' to another type which is handled.
// Also, we need to be able to construct a splat vector using BUILD_VECTOR.
if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand)
return DAG.UnrollVectorOp(Op.getNode());
// Generate a mask operand.
EVT MaskTy = VT.changeVectorElementTypeToInteger();
// What is the size of each element in the vector mask.
EVT BitTy = MaskTy.getScalarType();
Mask = DAG.getSelect(
DL, BitTy, Mask,
DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, BitTy),
DAG.getConstant(0, DL, BitTy));
// Broadcast the mask so that the entire vector is all-one or all zero.
SmallVector<SDValue, 8> Ops(NumElem, Mask);
Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskTy, Ops);
// Bitcast the operands to be the same type as the mask.
// This is needed when we select between FP types because
// the mask is a vector of integers.
Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1);
Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2);
SDValue AllOnes = DAG.getConstant(
APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy);
SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes);
Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask);
Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask);
SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2);
return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val);
}
SDValue VectorLegalizer::ExpandSEXTINREG(SDValue Op) {
EVT VT = Op.getValueType();
// Make sure that the SRA and SHL instructions are available.
if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand)
return DAG.UnrollVectorOp(Op.getNode());
SDLoc DL(Op);
EVT OrigTy = cast<VTSDNode>(Op->getOperand(1))->getVT();
unsigned BW = VT.getScalarType().getSizeInBits();
unsigned OrigBW = OrigTy.getScalarType().getSizeInBits();
SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT);
Op = Op.getOperand(0);
Op = DAG.getNode(ISD::SHL, DL, VT, Op, ShiftSz);
return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz);
}
// Generically expand a vector anyext in register to a shuffle of the relevant
// lanes into the appropriate locations, with other lanes left undef.
SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) {
SDLoc DL(Op);
EVT VT = Op.getValueType();
int NumElements = VT.getVectorNumElements();
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
int NumSrcElements = SrcVT.getVectorNumElements();
// Build a base mask of undef shuffles.
SmallVector<int, 16> ShuffleMask;
ShuffleMask.resize(NumSrcElements, -1);
// Place the extended lanes into the correct locations.
int ExtLaneScale = NumSrcElements / NumElements;
int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
for (int i = 0; i < NumElements; ++i)
ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
return DAG.getNode(
ISD::BITCAST, DL, VT,
DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask));
}
SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op) {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
// First build an any-extend node which can be legalized above when we
// recurse through it.
Op = DAG.getAnyExtendVectorInReg(Src, DL, VT);
// Now we need sign extend. Do this by shifting the elements. Even if these
// aren't legal operations, they have a better chance of being legalized
// without full scalarization than the sign extension does.
unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
unsigned SrcEltWidth = SrcVT.getVectorElementType().getSizeInBits();
SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT);
return DAG.getNode(ISD::SRA, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
ShiftAmount);
}
// Generically expand a vector zext in register to a shuffle of the relevant
// lanes into the appropriate locations, a blend of zero into the high bits,
// and a bitcast to the wider element type.
SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) {
SDLoc DL(Op);
EVT VT = Op.getValueType();
int NumElements = VT.getVectorNumElements();
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
int NumSrcElements = SrcVT.getVectorNumElements();
// Build up a zero vector to blend into this one.
EVT SrcScalarVT = SrcVT.getScalarType();
SDValue ScalarZero = DAG.getTargetConstant(0, DL, SrcScalarVT);
SmallVector<SDValue, 4> BuildVectorOperands(NumSrcElements, ScalarZero);
SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, DL, SrcVT, BuildVectorOperands);
// Shuffle the incoming lanes into the correct position, and pull all other
// lanes from the zero vector.
SmallVector<int, 16> ShuffleMask;
ShuffleMask.reserve(NumSrcElements);
for (int i = 0; i < NumSrcElements; ++i)
ShuffleMask.push_back(i);
int ExtLaneScale = NumSrcElements / NumElements;
int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
for (int i = 0; i < NumElements; ++i)
ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask));
}
SDValue VectorLegalizer::ExpandBSWAP(SDValue Op) {
EVT VT = Op.getValueType();
// Generate a byte wise shuffle mask for the BSWAP.
SmallVector<int, 16> ShuffleMask;
int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I)
for (int J = ScalarSizeInBytes - 1; J >= 0; --J)
ShuffleMask.push_back((I * ScalarSizeInBytes) + J);
EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size());
// Only emit a shuffle if the mask is legal.
if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT))
return DAG.UnrollVectorOp(Op.getNode());
SDLoc DL(Op);
Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0));
Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT),
ShuffleMask.data());
return DAG.getNode(ISD::BITCAST, DL, VT, Op);
}
SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
// Implement VSELECT in terms of XOR, AND, OR
// on platforms which do not support blend natively.
SDLoc DL(Op);
SDValue Mask = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue Op2 = Op.getOperand(2);
EVT VT = Mask.getValueType();
// If we can't even use the basic vector operations of
// AND,OR,XOR, we will have to scalarize the op.
// Notice that the operation may be 'promoted' which means that it is
// 'bitcasted' to another type which is handled.
// This operation also isn't safe with AND, OR, XOR when the boolean
// type is 0/1 as we need an all ones vector constant to mask with.
// FIXME: Sign extend 1 to all ones if thats legal on the target.
if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
TLI.getBooleanContents(Op1.getValueType()) !=
TargetLowering::ZeroOrNegativeOneBooleanContent)
return DAG.UnrollVectorOp(Op.getNode());
// If the mask and the type are different sizes, unroll the vector op. This
// can occur when getSetCCResultType returns something that is different in
// size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8.
if (VT.getSizeInBits() != Op1.getValueType().getSizeInBits())
return DAG.UnrollVectorOp(Op.getNode());
// Bitcast the operands to be the same type as the mask.
// This is needed when we select between FP types because
// the mask is a vector of integers.
Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1);
Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2);
SDValue AllOnes = DAG.getConstant(
APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()), DL, VT);
SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes);
Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask);
SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2);
return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val);
}
SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) {
EVT VT = Op.getOperand(0).getValueType();
SDLoc DL(Op);
// Make sure that the SINT_TO_FP and SRL instructions are available.
if (TLI.getOperationAction(ISD::SINT_TO_FP, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand)
return DAG.UnrollVectorOp(Op.getNode());
EVT SVT = VT.getScalarType();
assert((SVT.getSizeInBits() == 64 || SVT.getSizeInBits() == 32) &&
"Elements in vector-UINT_TO_FP must be 32 or 64 bits wide");
unsigned BW = SVT.getSizeInBits();
SDValue HalfWord = DAG.getConstant(BW / 2, DL, VT);
// Constants to clear the upper part of the word.
// Notice that we can also use SHL+SHR, but using a constant is slightly
// faster on x86.
uint64_t HWMask =
(SVT.getSizeInBits() == 64) ? 0x00000000FFFFFFFF : 0x0000FFFF;
SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT);
// Two to the power of half-word-size.
SDValue TWOHW =
DAG.getConstantFP((((uint64_t)1) << (BW / 2)), DL,
Op.getValueType()); // HLSL Change: do the 64-bit
// conversion before shift not after
// Clear upper part of LO, lower HI
SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Op.getOperand(0), HalfWord);
SDValue LO = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), HalfWordMask);
// Convert hi and lo to floats
// Convert the hi part back to the upper values
SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), HI);
fHI = DAG.getNode(ISD::FMUL, DL, Op.getValueType(), fHI, TWOHW);
SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), LO);
// Add the two halves
return DAG.getNode(ISD::FADD, DL, Op.getValueType(), fHI, fLO);
}
SDValue VectorLegalizer::ExpandFNEG(SDValue Op) {
if (TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) {
SDLoc DL(Op);
SDValue Zero = DAG.getConstantFP(-0.0, DL, Op.getValueType());
return DAG.getNode(ISD::FSUB, DL, Op.getValueType(), Zero,
Op.getOperand(0));
}
return DAG.UnrollVectorOp(Op.getNode());
}
SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
EVT VT = Op.getValueType();
unsigned NumElems = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1), CC = Op.getOperand(2);
EVT TmpEltVT = LHS.getValueType().getVectorElementType();
SDLoc dl(Op);
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
SDValue LHSElem = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue RHSElem = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Ops[i] = DAG.getNode(ISD::SETCC, dl,
TLI.getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), TmpEltVT),
LHSElem, RHSElem, CC);
Ops[i] = DAG.getSelect(
dl, EltVT, Ops[i],
DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), dl,
EltVT),
DAG.getConstant(0, dl, EltVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
} // namespace
bool SelectionDAG::LegalizeVectors() { return VectorLegalizer(*this).Run(); }
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp | //===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the ScheduleDAG class, which is a base class used by
// scheduling implementation classes.
//
//===----------------------------------------------------------------------===//
#include "ScheduleDAGSDNodes.h"
#include "InstrEmitter.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "pre-RA-sched"
STATISTIC(LoadsClustered, "Number of loads clustered together");
// This allows the latency-based scheduler to notice high latency instructions
// without a target itinerary. The choice of number here has more to do with
// balancing scheduler heuristics than with the actual machine latency.
static cl::opt<int> HighLatencyCycles(
"sched-high-latency-cycles", cl::Hidden, cl::init(10),
cl::desc("Roughly estimate the number of cycles that 'long latency'"
"instructions take for targets with no itinerary"));
ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
: ScheduleDAG(mf), BB(nullptr), DAG(nullptr),
InstrItins(mf.getSubtarget().getInstrItineraryData()) {}
/// Run - perform scheduling.
///
void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) {
BB = bb;
DAG = dag;
// Clear the scheduler's SUnit DAG.
ScheduleDAG::clearDAG();
Sequence.clear();
// Invoke the target's selection of scheduler.
Schedule();
}
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) {
#ifndef NDEBUG
const SUnit *Addr = nullptr;
if (!SUnits.empty())
Addr = &SUnits[0];
#endif
SUnits.emplace_back(N, (unsigned)SUnits.size());
assert((Addr == nullptr || Addr == &SUnits[0]) &&
"SUnits std::vector reallocated on the fly!");
SUnits.back().OrigNode = &SUnits.back();
SUnit *SU = &SUnits.back();
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
if (!N ||
(N->isMachineOpcode() &&
N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF))
SU->SchedulingPref = Sched::None;
else
SU->SchedulingPref = TLI.getSchedulingPreference(N);
return SU;
}
SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
SUnit *SU = newSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode;
SU->Latency = Old->Latency;
SU->isVRegCycle = Old->isVRegCycle;
SU->isCall = Old->isCall;
SU->isCallOp = Old->isCallOp;
SU->isTwoAddress = Old->isTwoAddress;
SU->isCommutable = Old->isCommutable;
SU->hasPhysRegDefs = Old->hasPhysRegDefs;
SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
SU->isScheduleHigh = Old->isScheduleHigh;
SU->isScheduleLow = Old->isScheduleLow;
SU->SchedulingPref = Old->SchedulingPref;
Old->isCloned = true;
return SU;
}
/// CheckForPhysRegDependency - Check if the dependency between def and use of
/// a specified operand is a physical register dependency. If so, returns the
/// register and the cost of copying the register.
static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
const TargetRegisterInfo *TRI,
const TargetInstrInfo *TII,
unsigned &PhysReg, int &Cost) {
if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
return;
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
return;
unsigned ResNo = User->getOperand(2).getResNo();
if (Def->getOpcode() == ISD::CopyFromReg &&
cast<RegisterSDNode>(Def->getOperand(1))->getReg() == Reg) {
PhysReg = Reg;
} else if (Def->isMachineOpcode()) {
const MCInstrDesc &II = TII->get(Def->getMachineOpcode());
if (ResNo >= II.getNumDefs() &&
II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg)
PhysReg = Reg;
}
if (PhysReg != 0) {
const TargetRegisterClass *RC =
TRI->getMinimalPhysRegClass(Reg, Def->getSimpleValueType(ResNo));
Cost = RC->getCopyCost();
}
}
// Helper for AddGlue to clone node operands.
static void CloneNodeWithValues(SDNode *N, SelectionDAG *DAG, ArrayRef<EVT> VTs,
SDValue ExtraOper = SDValue()) {
SmallVector<SDValue, 8> Ops(N->op_begin(), N->op_end());
if (ExtraOper.getNode())
Ops.push_back(ExtraOper);
SDVTList VTList = DAG->getVTList(VTs);
MachineSDNode::mmo_iterator Begin = nullptr, End = nullptr;
MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
// Store memory references.
if (MN) {
Begin = MN->memoperands_begin();
End = MN->memoperands_end();
}
DAG->MorphNodeTo(N, N->getOpcode(), VTList, Ops);
// Reset the memory references
if (MN)
MN->setMemRefs(Begin, End);
}
static bool AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) {
SDNode *GlueDestNode = Glue.getNode();
// Don't add glue from a node to itself.
if (GlueDestNode == N) return false;
// Don't add a glue operand to something that already uses glue.
if (GlueDestNode &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
return false;
}
// Don't add glue to something that already has a glue value.
if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return false;
SmallVector<EVT, 4> VTs(N->value_begin(), N->value_end());
if (AddGlue)
VTs.push_back(MVT::Glue);
CloneNodeWithValues(N, DAG, VTs, Glue);
return true;
}
// Cleanup after unsuccessful AddGlue. Use the standard method of morphing the
// node even though simply shrinking the value list is sufficient.
static void RemoveUnusedGlue(SDNode *N, SelectionDAG *DAG) {
assert((N->getValueType(N->getNumValues() - 1) == MVT::Glue &&
!N->hasAnyUseOfValue(N->getNumValues() - 1)) &&
"expected an unused glue value");
CloneNodeWithValues(N, DAG,
makeArrayRef(N->value_begin(), N->getNumValues() - 1));
}
/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them.
/// This function finds loads of the same base and different offsets. If the
/// offsets are not far apart (target specific), it add MVT::Glue inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
SDNode *Chain = nullptr;
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
Chain = Node->getOperand(NumOps-1).getNode();
if (!Chain)
return;
// Look for other loads of the same chain. Find loads that are loading from
// the same base pointer and different offsets.
SmallPtrSet<SDNode*, 16> Visited;
SmallVector<int64_t, 4> Offsets;
DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
bool Cluster = false;
SDNode *Base = Node;
// This algorithm requires a reasonably low use count before finding a match
// to avoid uselessly blowing up compile time in large blocks.
unsigned UseCount = 0;
for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
I != E && UseCount < 100; ++I, ++UseCount) {
SDNode *User = *I;
if (User == Node || !Visited.insert(User).second)
continue;
int64_t Offset1, Offset2;
if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
Offset1 == Offset2)
// FIXME: Should be ok if they addresses are identical. But earlier
// optimizations really should have eliminated one of the loads.
continue;
if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
Offsets.push_back(Offset1);
O2SMap.insert(std::make_pair(Offset2, User));
Offsets.push_back(Offset2);
if (Offset2 < Offset1)
Base = User;
Cluster = true;
// Reset UseCount to allow more matches.
UseCount = 0;
}
if (!Cluster)
return;
// Sort them in increasing order.
std::sort(Offsets.begin(), Offsets.end());
// Check if the loads are close enough.
SmallVector<SDNode*, 4> Loads;
unsigned NumLoads = 0;
int64_t BaseOff = Offsets[0];
SDNode *BaseLoad = O2SMap[BaseOff];
Loads.push_back(BaseLoad);
for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
int64_t Offset = Offsets[i];
SDNode *Load = O2SMap[Offset];
if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
break; // Stop right here. Ignore loads that are further away.
Loads.push_back(Load);
++NumLoads;
}
if (NumLoads == 0)
return;
// Cluster loads by adding MVT::Glue outputs and inputs. This also
// ensure they are scheduled in order of increasing addresses.
SDNode *Lead = Loads[0];
SDValue InGlue = SDValue(nullptr, 0);
if (AddGlue(Lead, InGlue, true, DAG))
InGlue = SDValue(Lead, Lead->getNumValues() - 1);
for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
bool OutGlue = I < E - 1;
SDNode *Load = Loads[I];
// If AddGlue fails, we could leave an unsused glue value. This should not
// cause any
if (AddGlue(Load, InGlue, OutGlue, DAG)) {
if (OutGlue)
InGlue = SDValue(Load, Load->getNumValues() - 1);
++LoadsClustered;
}
else if (!OutGlue && InGlue.getNode())
RemoveUnusedGlue(InGlue.getNode(), DAG);
}
}
/// ClusterNodes - Cluster certain nodes which should be scheduled together.
///
void ScheduleDAGSDNodes::ClusterNodes() {
for (SDNode &NI : DAG->allnodes()) {
SDNode *Node = &NI;
if (!Node || !Node->isMachineOpcode())
continue;
unsigned Opc = Node->getMachineOpcode();
const MCInstrDesc &MCID = TII->get(Opc);
if (MCID.mayLoad())
// Cluster loads from "near" addresses into combined SUnits.
ClusterNeighboringLoads(Node);
}
}
void ScheduleDAGSDNodes::BuildSchedUnits() {
// During scheduling, the NodeId field of SDNode is used to map SDNodes
// to their associated SUnits by holding SUnits table indices. A value
// of -1 means the SDNode does not yet have an associated SUnit.
unsigned NumNodes = 0;
for (SDNode &NI : DAG->allnodes()) {
NI.setNodeId(-1);
++NumNodes;
}
// Reserve entries in the vector for each of the SUnits we are creating. This
// ensure that reallocation of the vector won't happen, so SUnit*'s won't get
// invalidated.
// FIXME: Multiply by 2 because we may clone nodes during scheduling.
// This is a temporary workaround.
SUnits.reserve(NumNodes * 2);
// Add all nodes in depth first order.
SmallVector<SDNode*, 64> Worklist;
SmallPtrSet<SDNode*, 64> Visited;
Worklist.push_back(DAG->getRoot().getNode());
Visited.insert(DAG->getRoot().getNode());
SmallVector<SUnit*, 8> CallSUnits;
while (!Worklist.empty()) {
SDNode *NI = Worklist.pop_back_val();
// Add all operands to the worklist unless they've already been added.
for (const SDValue &Op : NI->op_values())
if (Visited.insert(Op.getNode()).second)
Worklist.push_back(Op.getNode());
if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
continue;
// If this node has already been processed, stop now.
if (NI->getNodeId() != -1) continue;
SUnit *NodeSUnit = newSUnit(NI);
// See if anything is glued to this node, if so, add them to glued
// nodes. Nodes can have at most one glue input and one glue output. Glue
// is required to be the last operand and result of a node.
// Scan up to find glued preds.
SDNode *N = NI;
while (N->getNumOperands() &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
N = N->getOperand(N->getNumOperands()-1).getNode();
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
NodeSUnit->isCall = true;
}
// Scan down to find any glued succs.
N = NI;
while (N->getValueType(N->getNumValues()-1) == MVT::Glue) {
SDValue GlueVal(N, N->getNumValues()-1);
// There are either zero or one users of the Glue result.
bool HasGlueUse = false;
for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
UI != E; ++UI)
if (GlueVal.isOperandOf(*UI)) {
HasGlueUse = true;
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
N = *UI;
if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
NodeSUnit->isCall = true;
break;
}
if (!HasGlueUse) break;
}
if (NodeSUnit->isCall)
CallSUnits.push_back(NodeSUnit);
// Schedule zero-latency TokenFactor below any nodes that may increase the
// schedule height. Otherwise, ancestors of the TokenFactor may appear to
// have false stalls.
if (NI->getOpcode() == ISD::TokenFactor)
NodeSUnit->isScheduleLow = true;
// If there are glue operands involved, N is now the bottom-most node
// of the sequence of nodes that are glued together.
// Update the SUnit.
NodeSUnit->setNode(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
// Compute NumRegDefsLeft. This must be done before AddSchedEdges.
InitNumRegDefsLeft(NodeSUnit);
// Assign the Latency field of NodeSUnit using target-provided information.
computeLatency(NodeSUnit);
}
// Find all call operands.
while (!CallSUnits.empty()) {
SUnit *SU = CallSUnits.pop_back_val();
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (SUNode->getOpcode() != ISD::CopyToReg)
continue;
SDNode *SrcN = SUNode->getOperand(2).getNode();
if (isPassiveNode(SrcN)) continue; // Not scheduled.
SUnit *SrcSU = &SUnits[SrcN->getNodeId()];
SrcSU->isCallOp = true;
}
}
}
void ScheduleDAGSDNodes::AddSchedEdges() {
const TargetSubtargetInfo &ST = MF.getSubtarget();
// Check to see if the scheduler cares about latencies.
bool UnitLatencies = forceUnitLatencies();
// Pass 2: add the preds, succs, etc.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
SUnit *SU = &SUnits[su];
SDNode *MainNode = SU->getNode();
if (MainNode->isMachineOpcode()) {
unsigned Opc = MainNode->getMachineOpcode();
const MCInstrDesc &MCID = TII->get(Opc);
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
SU->isTwoAddress = true;
break;
}
}
if (MCID.isCommutable())
SU->isCommutable = true;
}
// Find all predecessors and successors of the group.
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
if (N->isMachineOpcode() &&
TII->get(N->getMachineOpcode()).getImplicitDefs()) {
SU->hasPhysRegClobbers = true;
unsigned NumUsed = InstrEmitter::CountResults(N);
while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
--NumUsed; // Skip over unused values at the end.
if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
SU->hasPhysRegDefs = true;
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDNode *OpN = N->getOperand(i).getNode();
if (isPassiveNode(OpN)) continue; // Not scheduled.
SUnit *OpSU = &SUnits[OpN->getNodeId()];
assert(OpSU && "Node has no SUnit!");
if (OpSU == SU) continue; // In the same group.
EVT OpVT = N->getOperand(i).getValueType();
assert(OpVT != MVT::Glue && "Glued nodes should be in same sunit!");
bool isChain = OpVT == MVT::Other;
unsigned PhysReg = 0;
int Cost = 1;
// Determine if this is a physical register dependency.
CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
assert((PhysReg == 0 || !isChain) &&
"Chain dependence via physreg data?");
// FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
// emits a copy from the physical register to a virtual register unless
// it requires a cross class copy (cost < 0). That means we are only
// treating "expensive to copy" register dependency as physical register
// dependency. This may change in the future though.
if (Cost >= 0 && !StressSched)
PhysReg = 0;
// If this is a ctrl dep, latency is 1.
unsigned OpLatency = isChain ? 1 : OpSU->Latency;
// Special-case TokenFactor chains as zero-latency.
if(isChain && OpN->getOpcode() == ISD::TokenFactor)
OpLatency = 0;
SDep Dep = isChain ? SDep(OpSU, SDep::Barrier)
: SDep(OpSU, SDep::Data, PhysReg);
Dep.setLatency(OpLatency);
if (!isChain && !UnitLatencies) {
computeOperandLatency(OpN, N, i, Dep);
ST.adjustSchedDependency(OpSU, SU, Dep);
}
if (!SU->addPred(Dep) && !Dep.isCtrl() && OpSU->NumRegDefsLeft > 1) {
// Multiple register uses are combined in the same SUnit. For example,
// we could have a set of glued nodes with all their defs consumed by
// another set of glued nodes. Register pressure tracking sees this as
// a single use, so to keep pressure balanced we reduce the defs.
//
// We can't tell (without more book-keeping) if this results from
// glued nodes or duplicate operands. As long as we don't reduce
// NumRegDefsLeft to zero, we handle the common cases well.
--OpSU->NumRegDefsLeft;
}
}
}
}
}
/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
/// glued together nodes with a single SUnit.
void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
// Cluster certain nodes which should be scheduled together.
ClusterNodes();
// Populate the SUnits array.
BuildSchedUnits();
// Compute all the scheduling dependencies between nodes.
AddSchedEdges();
}
// Initialize NumNodeDefs for the current Node's opcode.
void ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs() {
// Check for phys reg copy.
if (!Node)
return;
if (!Node->isMachineOpcode()) {
if (Node->getOpcode() == ISD::CopyFromReg)
NodeNumDefs = 1;
else
NodeNumDefs = 0;
return;
}
unsigned POpc = Node->getMachineOpcode();
if (POpc == TargetOpcode::IMPLICIT_DEF) {
// No register need be allocated for this.
NodeNumDefs = 0;
return;
}
if (POpc == TargetOpcode::PATCHPOINT &&
Node->getValueType(0) == MVT::Other) {
// PATCHPOINT is defined to have one result, but it might really have none
// if we're not using CallingConv::AnyReg. Don't mistake the chain for a
// real definition.
NodeNumDefs = 0;
return;
}
unsigned NRegDefs = SchedDAG->TII->get(Node->getMachineOpcode()).getNumDefs();
// Some instructions define regs that are not represented in the selection DAG
// (e.g. unused flags). See tMOVi8. Make sure we don't access past NumValues.
NodeNumDefs = std::min(Node->getNumValues(), NRegDefs);
DefIdx = 0;
}
// Construct a RegDefIter for this SUnit and find the first valid value.
ScheduleDAGSDNodes::RegDefIter::RegDefIter(const SUnit *SU,
const ScheduleDAGSDNodes *SD)
: SchedDAG(SD), Node(SU->getNode()), DefIdx(0), NodeNumDefs(0) {
InitNodeNumDefs();
Advance();
}
// Advance to the next valid value defined by the SUnit.
void ScheduleDAGSDNodes::RegDefIter::Advance() {
for (;Node;) { // Visit all glued nodes.
for (;DefIdx < NodeNumDefs; ++DefIdx) {
if (!Node->hasAnyUseOfValue(DefIdx))
continue;
ValueType = Node->getSimpleValueType(DefIdx);
++DefIdx;
return; // Found a normal regdef.
}
Node = Node->getGluedNode();
if (!Node) {
return; // No values left to visit.
}
InitNodeNumDefs();
}
}
void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) {
assert(SU->NumRegDefsLeft == 0 && "expect a new node");
for (RegDefIter I(SU, this); I.IsValid(); I.Advance()) {
assert(SU->NumRegDefsLeft < USHRT_MAX && "overflow is ok but unexpected");
++SU->NumRegDefsLeft;
}
}
void ScheduleDAGSDNodes::computeLatency(SUnit *SU) {
SDNode *N = SU->getNode();
// TokenFactor operands are considered zero latency, and some schedulers
// (e.g. Top-Down list) may rely on the fact that operand latency is nonzero
// whenever node latency is nonzero.
if (N && N->getOpcode() == ISD::TokenFactor) {
SU->Latency = 0;
return;
}
// Check to see if the scheduler cares about latencies.
if (forceUnitLatencies()) {
SU->Latency = 1;
return;
}
if (!InstrItins || InstrItins->isEmpty()) {
if (N && N->isMachineOpcode() &&
TII->isHighLatencyDef(N->getMachineOpcode()))
SU->Latency = HighLatencyCycles;
else
SU->Latency = 1;
return;
}
// Compute the latency for the node. We use the sum of the latencies for
// all nodes glued together into this SUnit.
SU->Latency = 0;
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
if (N->isMachineOpcode())
SU->Latency += TII->getInstrLatency(InstrItins, N);
}
void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const{
// Check to see if the scheduler cares about latencies.
if (forceUnitLatencies())
return;
if (dep.getKind() != SDep::Data)
return;
unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
if (Use->isMachineOpcode())
// Adjust the use operand index by num of defs.
OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs();
int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx);
if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg &&
!BB->succ_empty()) {
unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
// This copy is a liveout value. It is likely coalesced, so reduce the
// latency so not to penalize the def.
// FIXME: need target specific adjustment here?
Latency = (Latency > 1) ? Latency - 1 : 1;
}
if (Latency >= 0)
dep.setLatency(Latency);
}
void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
if (!SU->getNode()) {
dbgs() << "PHYS REG COPY\n";
return;
}
SU->getNode()->dump(DAG);
dbgs() << "\n";
SmallVector<SDNode *, 4> GluedNodes;
for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode())
GluedNodes.push_back(N);
while (!GluedNodes.empty()) {
dbgs() << " ";
GluedNodes.back()->dump(DAG);
dbgs() << "\n";
GluedNodes.pop_back();
}
#endif
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ScheduleDAGSDNodes::dumpSchedule() const {
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
if (SUnit *SU = Sequence[i])
SU->dump(this);
else
dbgs() << "**** NOOP ****\n";
}
}
#endif
#ifndef NDEBUG
/// VerifyScheduledSequence - Verify that all SUnits were scheduled and that
/// their state is consistent with the nodes listed in Sequence.
///
void ScheduleDAGSDNodes::VerifyScheduledSequence(bool isBottomUp) {
unsigned ScheduledNodes = ScheduleDAG::VerifyScheduledDAG(isBottomUp);
unsigned Noops = 0;
for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
if (!Sequence[i])
++Noops;
assert(Sequence.size() - Noops == ScheduledNodes &&
"The number of nodes scheduled doesn't match the expected number!");
}
#endif // NDEBUG
/// ProcessSDDbgValues - Process SDDbgValues associated with this node.
static void
ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
DenseMap<SDValue, unsigned> &VRBaseMap, unsigned Order) {
if (!N->getHasDebugValue())
return;
// Opportunistically insert immediate dbg_value uses, i.e. those with source
// order number right after the N.
MachineBasicBlock *BB = Emitter.getBlock();
MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
ArrayRef<SDDbgValue*> DVs = DAG->GetDbgValues(N);
for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
if (DVs[i]->isInvalidated())
continue;
unsigned DVOrder = DVs[i]->getOrder();
if (!Order || DVOrder == ++Order) {
MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
if (DbgMI) {
Orders.push_back(std::make_pair(DVOrder, DbgMI));
BB->insert(InsertPos, DbgMI);
}
DVs[i]->setIsInvalidated();
}
}
}
// ProcessSourceNode - Process nodes with source order numbers. These are added
// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
static void
ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
DenseMap<SDValue, unsigned> &VRBaseMap,
SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
SmallSet<unsigned, 8> &Seen) {
unsigned Order = N->getIROrder();
if (!Order || !Seen.insert(Order).second) {
// Process any valid SDDbgValues even if node does not have any order
// assigned.
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0);
return;
}
MachineBasicBlock *BB = Emitter.getBlock();
if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI() ||
// Fast-isel may have inserted some instructions, in which case the
// BB->back().isPHI() test will not fire when we want it to.
std::prev(Emitter.getInsertPos())->isPHI()) {
// Did not insert any instruction.
Orders.push_back(std::make_pair(Order, (MachineInstr*)nullptr));
return;
}
Orders.push_back(std::make_pair(Order, std::prev(Emitter.getInsertPos())));
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
}
void ScheduleDAGSDNodes::
EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
MachineBasicBlock::iterator InsertPos) {
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue; // ignore chain preds
if (I->getSUnit()->CopyDstRC) {
// Copy to physical register.
DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->getSUnit());
assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
// Find the destination physical register.
unsigned Reg = 0;
for (SUnit::const_succ_iterator II = SU->Succs.begin(),
EE = SU->Succs.end(); II != EE; ++II) {
if (II->isCtrl()) continue; // ignore chain preds
if (II->getReg()) {
Reg = II->getReg();
break;
}
}
BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
.addReg(VRI->second);
} else {
// Copy from physical register.
assert(I->getReg() && "Unknown physical register!");
unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
.addReg(I->getReg());
}
break;
}
}
/// EmitSchedule - Emit the machine code in scheduled order. Return the new
/// InsertPos and MachineBasicBlock that contains this insertion
/// point. ScheduleDAGSDNodes holds a BB pointer for convenience, but this does
/// not necessarily refer to returned BB. The emitter may split blocks.
MachineBasicBlock *ScheduleDAGSDNodes::
EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
InstrEmitter Emitter(BB, InsertPos);
DenseMap<SDValue, unsigned> VRBaseMap;
DenseMap<SUnit*, unsigned> CopyVRBaseMap;
SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
SmallSet<unsigned, 8> Seen;
bool HasDbg = DAG->hasDebugValues();
// If this is the first BB, emit byval parameter dbg_value's.
if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
for (; PDI != PDE; ++PDI) {
MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
if (DbgMI)
BB->insert(InsertPos, DbgMI);
}
}
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
SUnit *SU = Sequence[i];
if (!SU) {
// Null SUnit* is a noop.
TII->insertNoop(*Emitter.getBlock(), InsertPos);
continue;
}
// For pre-regalloc scheduling, create instructions corresponding to the
// SDNode and any glued SDNodes and append them to the block.
if (!SU->getNode()) {
// Emit a copy.
EmitPhysRegCopy(SU, CopyVRBaseMap, InsertPos);
continue;
}
SmallVector<SDNode *, 4> GluedNodes;
for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode())
GluedNodes.push_back(N);
while (!GluedNodes.empty()) {
SDNode *N = GluedNodes.back();
Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned,
VRBaseMap);
// Remember the source order of the inserted instruction.
if (HasDbg)
ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
GluedNodes.pop_back();
}
Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
VRBaseMap);
// Remember the source order of the inserted instruction.
if (HasDbg)
ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
Seen);
}
// Insert all the dbg_values which have not already been inserted in source
// order sequence.
if (HasDbg) {
MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();
// Sort the source order instructions and use the order to insert debug
// values.
std::sort(Orders.begin(), Orders.end(), less_first());
SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
// Now emit the rest according to source order.
unsigned LastOrder = 0;
for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
unsigned Order = Orders[i].first;
MachineInstr *MI = Orders[i].second;
// Insert all SDDbgValue's whose order(s) are before "Order".
if (!MI)
continue;
for (; DI != DE &&
(*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
if ((*DI)->isInvalidated())
continue;
MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
if (DbgMI) {
if (!LastOrder)
// Insert to start of the BB (after PHIs).
BB->insert(BBBegin, DbgMI);
else {
// Insert at the instruction, which may be in a different
// block, if the block was split by a custom inserter.
MachineBasicBlock::iterator Pos = MI;
MI->getParent()->insert(Pos, DbgMI);
}
}
}
LastOrder = Order;
}
// Add trailing DbgValue's before the terminator. FIXME: May want to add
// some of them before one or more conditional branches?
SmallVector<MachineInstr*, 8> DbgMIs;
while (DI != DE) {
if (!(*DI)->isInvalidated())
if (MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap))
DbgMIs.push_back(DbgMI);
++DI;
}
MachineBasicBlock *InsertBB = Emitter.getBlock();
MachineBasicBlock::iterator Pos = InsertBB->getFirstTerminator();
InsertBB->insert(Pos, DbgMIs.begin(), DbgMIs.end());
}
InsertPos = Emitter.getInsertPos();
return Emitter.getBlock();
}
/// Return the basic block label.
std::string ScheduleDAGSDNodes::getDAGName() const {
return "sunit-dag." + BB->getFullName();
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp | //===- ResourcePriorityQueue.cpp - A DFA-oriented priority queue -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the ResourcePriorityQueue class, which is a
// SchedulingPriorityQueue that prioritizes instructions using DFA state to
// reduce the length of the critical path through the basic block
// on VLIW platforms.
// The scheduler is basically a top-down adaptable list scheduler with DFA
// resource tracking added to the cost function.
// DFA is queried as a state machine to model "packets/bundles" during
// schedule. Currently packets/bundles are discarded at the end of
// scheduling, affecting only order of instructions.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/ResourcePriorityQueue.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "scheduler"
static cl::opt<bool> DisableDFASched("disable-dfa-sched", cl::Hidden,
cl::ZeroOrMore, cl::init(false),
cl::desc("Disable use of DFA during scheduling"));
static cl::opt<signed> RegPressureThreshold(
"dfa-sched-reg-pressure-threshold", cl::Hidden, cl::ZeroOrMore, cl::init(5),
cl::desc("Track reg pressure and switch priority to in-depth"));
ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS)
: Picker(this), InstrItins(IS->MF->getSubtarget().getInstrItineraryData()) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
TRI = STI.getRegisterInfo();
TLI = IS->TLI;
TII = STI.getInstrInfo();
ResourcesModel.reset(TII->CreateTargetScheduleState(STI));
// This hard requirement could be relaxed, but for now
// do not let it procede.
assert(ResourcesModel && "Unimplemented CreateTargetScheduleState.");
unsigned NumRC = TRI->getNumRegClasses();
RegLimit.resize(NumRC);
RegPressure.resize(NumRC);
std::fill(RegLimit.begin(), RegLimit.end(), 0);
std::fill(RegPressure.begin(), RegPressure.end(), 0);
for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
E = TRI->regclass_end();
I != E; ++I)
RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, *IS->MF);
ParallelLiveRanges = 0;
HorizontalVerticalBalance = 0;
}
unsigned
ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
unsigned NumberDeps = 0;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
continue;
SUnit *PredSU = I->getSUnit();
const SDNode *ScegN = PredSU->getNode();
if (!ScegN)
continue;
// If value is passed to CopyToReg, it is probably
// live outside BB.
switch (ScegN->getOpcode()) {
default: break;
case ISD::TokenFactor: break;
case ISD::CopyFromReg: NumberDeps++; break;
case ISD::CopyToReg: break;
case ISD::INLINEASM: break;
}
if (!ScegN->isMachineOpcode())
continue;
for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) {
MVT VT = ScegN->getSimpleValueType(i);
if (TLI->isTypeLegal(VT)
&& (TLI->getRegClassFor(VT)->getID() == RCId)) {
NumberDeps++;
break;
}
}
}
return NumberDeps;
}
unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
unsigned RCId) {
unsigned NumberDeps = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl())
continue;
SUnit *SuccSU = I->getSUnit();
const SDNode *ScegN = SuccSU->getNode();
if (!ScegN)
continue;
// If value is passed to CopyToReg, it is probably
// live outside BB.
switch (ScegN->getOpcode()) {
default: break;
case ISD::TokenFactor: break;
case ISD::CopyFromReg: break;
case ISD::CopyToReg: NumberDeps++; break;
case ISD::INLINEASM: break;
}
if (!ScegN->isMachineOpcode())
continue;
for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) {
const SDValue &Op = ScegN->getOperand(i);
MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
if (TLI->isTypeLegal(VT)
&& (TLI->getRegClassFor(VT)->getID() == RCId)) {
NumberDeps++;
break;
}
}
}
return NumberDeps;
}
static unsigned numberCtrlDepsInSU(SUnit *SU) {
unsigned NumberDeps = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I)
if (I->isCtrl())
NumberDeps++;
return NumberDeps;
}
static unsigned numberCtrlPredInSU(SUnit *SU) {
unsigned NumberDeps = 0;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I)
if (I->isCtrl())
NumberDeps++;
return NumberDeps;
}
///
/// Initialize nodes.
///
void ResourcePriorityQueue::initNodes(std::vector<SUnit> &sunits) {
SUnits = &sunits;
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
SUnit *SU = &(*SUnits)[i];
initNumRegDefsLeft(SU);
SU->NodeQueueId = 0;
}
}
/// This heuristic is used if DFA scheduling is not desired
/// for some VLIW platform.
bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
// The isScheduleHigh flag allows nodes with wraparound dependencies that
// cannot easily be modeled as edges with latencies to be scheduled as
// soon as possible in a top-down schedule.
if (LHS->isScheduleHigh && !RHS->isScheduleHigh)
return false;
if (!LHS->isScheduleHigh && RHS->isScheduleHigh)
return true;
unsigned LHSNum = LHS->NodeNum;
unsigned RHSNum = RHS->NodeNum;
// The most important heuristic is scheduling the critical path.
unsigned LHSLatency = PQ->getLatency(LHSNum);
unsigned RHSLatency = PQ->getLatency(RHSNum);
if (LHSLatency < RHSLatency) return true;
if (LHSLatency > RHSLatency) return false;
// After that, if two nodes have identical latencies, look to see if one will
// unblock more other nodes than the other.
unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
if (LHSBlocked < RHSBlocked) return true;
if (LHSBlocked > RHSBlocked) return false;
// Finally, just to provide a stable ordering, use the node number as a
// deciding factor.
return LHSNum < RHSNum;
}
/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
/// of SU, return it, otherwise return null.
SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
SUnit *OnlyAvailablePred = nullptr;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit &Pred = *I->getSUnit();
if (!Pred.isScheduled) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
return nullptr;
OnlyAvailablePred = &Pred;
}
}
return OnlyAvailablePred;
}
void ResourcePriorityQueue::push(SUnit *SU) {
// Look at all of the successors of this node. Count the number of nodes that
// this node is the sole unscheduled node for.
unsigned NumNodesBlocking = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I)
if (getSingleUnscheduledPred(I->getSUnit()) == SU)
++NumNodesBlocking;
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
Queue.push_back(SU);
}
/// Check if scheduling of this SU is possible
/// in the current packet.
bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) {
if (!SU || !SU->getNode())
return false;
// If this is a compound instruction,
// it is likely to be a call. Do not delay it.
if (SU->getNode()->getGluedNode())
return true;
// First see if the pipeline could receive this instruction
// in the current cycle.
if (SU->getNode()->isMachineOpcode())
switch (SU->getNode()->getMachineOpcode()) {
default:
if (!ResourcesModel->canReserveResources(&TII->get(
SU->getNode()->getMachineOpcode())))
return false;
case TargetOpcode::EXTRACT_SUBREG:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
case TargetOpcode::IMPLICIT_DEF:
break;
}
// Now see if there are no other dependencies
// to instructions alredy in the packet.
for (unsigned i = 0, e = Packet.size(); i != e; ++i)
for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
E = Packet[i]->Succs.end(); I != E; ++I) {
// Since we do not add pseudos to packets, might as well
// ignor order deps.
if (I->isCtrl())
continue;
if (I->getSUnit() == SU)
return false;
}
return true;
}
/// Keep track of available resources.
void ResourcePriorityQueue::reserveResources(SUnit *SU) {
// If this SU does not fit in the packet
// start a new one.
if (!isResourceAvailable(SU) || SU->getNode()->getGluedNode()) {
ResourcesModel->clearResources();
Packet.clear();
}
if (SU->getNode() && SU->getNode()->isMachineOpcode()) {
switch (SU->getNode()->getMachineOpcode()) {
default:
ResourcesModel->reserveResources(&TII->get(
SU->getNode()->getMachineOpcode()));
break;
case TargetOpcode::EXTRACT_SUBREG:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
case TargetOpcode::IMPLICIT_DEF:
break;
}
Packet.push_back(SU);
}
// Forcefully end packet for PseudoOps.
else {
ResourcesModel->clearResources();
Packet.clear();
}
// If packet is now full, reset the state so in the next cycle
// we start fresh.
if (Packet.size() >= InstrItins->SchedModel.IssueWidth) {
ResourcesModel->clearResources();
Packet.clear();
}
}
signed ResourcePriorityQueue::rawRegPressureDelta(SUnit *SU, unsigned RCId) {
signed RegBalance = 0;
if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode())
return RegBalance;
// Gen estimate.
for (unsigned i = 0, e = SU->getNode()->getNumValues(); i != e; ++i) {
MVT VT = SU->getNode()->getSimpleValueType(i);
if (TLI->isTypeLegal(VT)
&& TLI->getRegClassFor(VT)
&& TLI->getRegClassFor(VT)->getID() == RCId)
RegBalance += numberRCValSuccInSU(SU, RCId);
}
// Kill estimate.
for (unsigned i = 0, e = SU->getNode()->getNumOperands(); i != e; ++i) {
const SDValue &Op = SU->getNode()->getOperand(i);
MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
if (isa<ConstantSDNode>(Op.getNode()))
continue;
if (TLI->isTypeLegal(VT) && TLI->getRegClassFor(VT)
&& TLI->getRegClassFor(VT)->getID() == RCId)
RegBalance -= numberRCValPredInSU(SU, RCId);
}
return RegBalance;
}
/// Estimates change in reg pressure from this SU.
/// It is achieved by trivial tracking of defined
/// and used vregs in dependent instructions.
/// The RawPressure flag makes this function to ignore
/// existing reg file sizes, and report raw def/use
/// balance.
signed ResourcePriorityQueue::regPressureDelta(SUnit *SU, bool RawPressure) {
signed RegBalance = 0;
if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode())
return RegBalance;
if (RawPressure) {
for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
E = TRI->regclass_end(); I != E; ++I) {
const TargetRegisterClass *RC = *I;
RegBalance += rawRegPressureDelta(SU, RC->getID());
}
}
else {
for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
E = TRI->regclass_end(); I != E; ++I) {
const TargetRegisterClass *RC = *I;
if ((RegPressure[RC->getID()] +
rawRegPressureDelta(SU, RC->getID()) > 0) &&
(RegPressure[RC->getID()] +
rawRegPressureDelta(SU, RC->getID()) >= RegLimit[RC->getID()]))
RegBalance += rawRegPressureDelta(SU, RC->getID());
}
}
return RegBalance;
}
// Constants used to denote relative importance of
// heuristic components for cost computation.
static const unsigned PriorityOne = 200;
static const unsigned PriorityTwo = 50;
static const unsigned PriorityThree = 15;
static const unsigned PriorityFour = 5;
static const unsigned ScaleOne = 20;
static const unsigned ScaleTwo = 10;
static const unsigned ScaleThree = 5;
static const unsigned FactorOne = 2;
/// Returns single number reflecting benefit of scheduling SU
/// in the current cycle.
signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) {
// Initial trivial priority.
signed ResCount = 1;
// Do not waste time on a node that is already scheduled.
if (SU->isScheduled)
return ResCount;
// Forced priority is high.
if (SU->isScheduleHigh)
ResCount += PriorityOne;
// Adaptable scheduling
// A small, but very parallel
// region, where reg pressure is an issue.
if (HorizontalVerticalBalance > RegPressureThreshold) {
// Critical path first
ResCount += (SU->getHeight() * ScaleTwo);
// If resources are available for it, multiply the
// chance of scheduling.
if (isResourceAvailable(SU))
ResCount <<= FactorOne;
// Consider change to reg pressure from scheduling
// this SU.
ResCount -= (regPressureDelta(SU,true) * ScaleOne);
}
// Default heuristic, greeady and
// critical path driven.
else {
// Critical path first.
ResCount += (SU->getHeight() * ScaleTwo);
// Now see how many instructions is blocked by this SU.
ResCount += (NumNodesSolelyBlocking[SU->NodeNum] * ScaleTwo);
// If resources are available for it, multiply the
// chance of scheduling.
if (isResourceAvailable(SU))
ResCount <<= FactorOne;
ResCount -= (regPressureDelta(SU) * ScaleTwo);
}
// These are platform-specific things.
// Will need to go into the back end
// and accessed from here via a hook.
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
if (N->isMachineOpcode()) {
const MCInstrDesc &TID = TII->get(N->getMachineOpcode());
if (TID.isCall())
ResCount += (PriorityTwo + (ScaleThree*N->getNumValues()));
}
else
switch (N->getOpcode()) {
default: break;
case ISD::TokenFactor:
case ISD::CopyFromReg:
case ISD::CopyToReg:
ResCount += PriorityFour;
break;
case ISD::INLINEASM:
ResCount += PriorityThree;
break;
}
}
return ResCount;
}
/// Main resource tracking point.
void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
// Use NULL entry as an event marker to reset
// the DFA state.
if (!SU) {
ResourcesModel->clearResources();
Packet.clear();
return;
}
const SDNode *ScegN = SU->getNode();
// Update reg pressure tracking.
// First update current node.
if (ScegN->isMachineOpcode()) {
// Estimate generated regs.
for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) {
MVT VT = ScegN->getSimpleValueType(i);
if (TLI->isTypeLegal(VT)) {
const TargetRegisterClass *RC = TLI->getRegClassFor(VT);
if (RC)
RegPressure[RC->getID()] += numberRCValSuccInSU(SU, RC->getID());
}
}
// Estimate killed regs.
for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) {
const SDValue &Op = ScegN->getOperand(i);
MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
if (TLI->isTypeLegal(VT)) {
const TargetRegisterClass *RC = TLI->getRegClassFor(VT);
if (RC) {
if (RegPressure[RC->getID()] >
(numberRCValPredInSU(SU, RC->getID())))
RegPressure[RC->getID()] -= numberRCValPredInSU(SU, RC->getID());
else RegPressure[RC->getID()] = 0;
}
}
}
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0))
continue;
--I->getSUnit()->NumRegDefsLeft;
}
}
// Reserve resources for this SU.
reserveResources(SU);
// Adjust number of parallel live ranges.
// Heuristic is simple - node with no data successors reduces
// number of live ranges. All others, increase it.
unsigned NumberNonControlDeps = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
adjustPriorityOfUnscheduledPreds(I->getSUnit());
if (!I->isCtrl())
NumberNonControlDeps++;
}
if (!NumberNonControlDeps) {
if (ParallelLiveRanges >= SU->NumPreds)
ParallelLiveRanges -= SU->NumPreds;
else
ParallelLiveRanges = 0;
}
else
ParallelLiveRanges += SU->NumRegDefsLeft;
// Track parallel live chains.
HorizontalVerticalBalance += (SU->Succs.size() - numberCtrlDepsInSU(SU));
HorizontalVerticalBalance -= (SU->Preds.size() - numberCtrlPredInSU(SU));
}
void ResourcePriorityQueue::initNumRegDefsLeft(SUnit *SU) {
unsigned NodeNumDefs = 0;
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
if (N->isMachineOpcode()) {
const MCInstrDesc &TID = TII->get(N->getMachineOpcode());
// No register need be allocated for this.
if (N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
NodeNumDefs = 0;
break;
}
NodeNumDefs = std::min(N->getNumValues(), TID.getNumDefs());
}
else
switch(N->getOpcode()) {
default: break;
case ISD::CopyFromReg:
NodeNumDefs++;
break;
case ISD::INLINEASM:
NodeNumDefs++;
break;
}
SU->NumRegDefsLeft = NodeNumDefs;
}
/// adjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
/// scheduled. If SU is not itself available, then there is at least one
/// predecessor node that has not been scheduled yet. If SU has exactly ONE
/// unscheduled predecessor, we want to increase its priority: it getting
/// scheduled will make this node available, so it is better than some other
/// node of the same priority that will not make a node available.
void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) {
if (SU->isAvailable) return; // All preds scheduled.
SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable)
return;
// Okay, we found a single predecessor that is available, but not scheduled.
// Since it is available, it must be in the priority queue. First remove it.
remove(OnlyAvailablePred);
// Reinsert the node into the priority queue, which recomputes its
// NumNodesSolelyBlocking value.
push(OnlyAvailablePred);
}
/// Main access point - returns next instructions
/// to be placed in scheduling sequence.
SUnit *ResourcePriorityQueue::pop() {
if (empty())
return nullptr;
std::vector<SUnit *>::iterator Best = Queue.begin();
if (!DisableDFASched) {
signed BestCost = SUSchedulingCost(*Best);
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I) {
if (SUSchedulingCost(*I) > BestCost) {
BestCost = SUSchedulingCost(*I);
Best = I;
}
}
}
// Use default TD scheduling mechanism.
else {
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
}
SUnit *V = *Best;
if (Best != std::prev(Queue.end()))
std::swap(*Best, Queue.back());
Queue.pop_back();
return V;
}
void ResourcePriorityQueue::remove(SUnit *SU) {
assert(!Queue.empty() && "Queue is empty!");
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
if (I != std::prev(Queue.end()))
std::swap(*I, Queue.back());
Queue.pop_back();
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp | //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements bottom-up and top-down register pressure reduction list
// schedulers, using standard algorithms. The basic approach uses a priority
// queue of available nodes to schedule. One at a time, nodes are taken from
// the priority queue (thus in priority order), checked for legality to
// schedule, and emitted if legal.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
#define DEBUG_TYPE "pre-RA-sched"
STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
STATISTIC(NumUnfolds, "Number of nodes unfolded");
STATISTIC(NumDups, "Number of duplicated nodes");
STATISTIC(NumPRCopies, "Number of physical register copies");
static RegisterScheduler
burrListDAGScheduler("list-burr",
"Bottom-up register reduction list scheduling",
createBURRListDAGScheduler);
static RegisterScheduler
sourceListDAGScheduler("source",
"Similar to list-burr but schedules in source "
"order when possible",
createSourceListDAGScheduler);
static RegisterScheduler
hybridListDAGScheduler("list-hybrid",
"Bottom-up register pressure aware list scheduling "
"which tries to balance latency and register pressure",
createHybridListDAGScheduler);
static RegisterScheduler
ILPListDAGScheduler("list-ilp",
"Bottom-up register pressure aware list scheduling "
"which tries to balance ILP and register pressure",
createILPListDAGScheduler);
static cl::opt<bool> DisableSchedCycles(
"disable-sched-cycles", cl::Hidden, cl::init(false),
cl::desc("Disable cycle-level precision during preRA scheduling"));
// Temporary sched=list-ilp flags until the heuristics are robust.
// Some options are also available under sched=list-hybrid.
static cl::opt<bool> DisableSchedRegPressure(
"disable-sched-reg-pressure", cl::Hidden, cl::init(false),
cl::desc("Disable regpressure priority in sched=list-ilp"));
static cl::opt<bool> DisableSchedLiveUses(
"disable-sched-live-uses", cl::Hidden, cl::init(true),
cl::desc("Disable live use priority in sched=list-ilp"));
static cl::opt<bool> DisableSchedVRegCycle(
"disable-sched-vrcycle", cl::Hidden, cl::init(false),
cl::desc("Disable virtual register cycle interference checks"));
static cl::opt<bool> DisableSchedPhysRegJoin(
"disable-sched-physreg-join", cl::Hidden, cl::init(false),
cl::desc("Disable physreg def-use affinity"));
static cl::opt<bool> DisableSchedStalls(
"disable-sched-stalls", cl::Hidden, cl::init(true),
cl::desc("Disable no-stall priority in sched=list-ilp"));
static cl::opt<bool> DisableSchedCriticalPath(
"disable-sched-critical-path", cl::Hidden, cl::init(false),
cl::desc("Disable critical path priority in sched=list-ilp"));
static cl::opt<bool> DisableSchedHeight(
"disable-sched-height", cl::Hidden, cl::init(false),
cl::desc("Disable scheduled-height priority in sched=list-ilp"));
static cl::opt<bool> Disable2AddrHack(
"disable-2addr-hack", cl::Hidden, cl::init(true),
cl::desc("Disable scheduler's two-address hack"));
static cl::opt<int> MaxReorderWindow(
"max-sched-reorder", cl::Hidden, cl::init(6),
cl::desc("Number of instructions to allow ahead of the critical path "
"in sched=list-ilp"));
static cl::opt<unsigned> AvgIPC(
"sched-avg-ipc", cl::Hidden, cl::init(1),
cl::desc("Average inst/cycle whan no target itinerary exists."));
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGRRList - The actual register reduction list scheduler
/// implementation. This supports both top-down and bottom-up scheduling.
///
class ScheduleDAGRRList : public ScheduleDAGSDNodes {
private:
/// NeedLatency - True if the scheduler will make use of latency information.
///
bool NeedLatency;
/// AvailableQueue - The priority queue to use for the available SUnits.
SchedulingPriorityQueue *AvailableQueue;
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands becomes available, the instruction is
/// added to the AvailableQueue.
std::vector<SUnit*> PendingQueue;
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
/// CurCycle - The current scheduler state corresponds to this cycle.
unsigned CurCycle;
/// MinAvailableCycle - Cycle of the soonest available instruction.
unsigned MinAvailableCycle;
/// IssueCount - Count instructions issued in this cycle
/// Currently valid only for bottom-up scheduling.
unsigned IssueCount;
/// LiveRegDefs - A set of physical registers and their definition
/// that are "live". These nodes must be scheduled before any other nodes that
/// modifies the registers can be scheduled.
unsigned NumLiveRegs;
std::vector<SUnit*> LiveRegDefs;
std::vector<SUnit*> LiveRegGens;
// Collect interferences between physical register use/defs.
// Each interference is an SUnit and set of physical registers.
SmallVector<SUnit*, 4> Interferences;
typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
LRegsMapT LRegsMap;
/// Topo - A topological ordering for SUnits which permits fast IsReachable
/// and similar queries.
ScheduleDAGTopologicalSort Topo;
// Hack to keep track of the inverse of FindCallSeqStart without more crazy
// DAG crawling.
DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
public:
ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
SchedulingPriorityQueue *availqueue,
CodeGenOpt::Level OptLevel)
: ScheduleDAGSDNodes(mf),
NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
Topo(SUnits, nullptr) {
const TargetSubtargetInfo &STI = mf.getSubtarget();
if (DisableSchedCycles || !NeedLatency)
HazardRec = new ScheduleHazardRecognizer();
else
HazardRec = STI.getInstrInfo()->CreateTargetHazardRecognizer(&STI, this);
}
~ScheduleDAGRRList() override {
delete HazardRec;
delete AvailableQueue;
}
void Schedule() override;
ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
/// IsReachable - Checks if SU is reachable from TargetSU.
bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
return Topo.IsReachable(SU, TargetSU);
}
/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
/// create a cycle.
bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
return Topo.WillCreateCycle(SU, TargetSU);
}
/// AddPred - adds a predecessor edge to SUnit SU.
/// This returns true if this is a new predecessor.
/// Updates the topological ordering if required.
void AddPred(SUnit *SU, const SDep &D) {
Topo.AddPred(SU, D.getSUnit());
SU->addPred(D);
}
/// RemovePred - removes a predecessor edge from SUnit SU.
/// This returns true if an edge was removed.
/// Updates the topological ordering if required.
void RemovePred(SUnit *SU, const SDep &D) {
Topo.RemovePred(SU, D.getSUnit());
SU->removePred(D);
}
private:
bool isReady(SUnit *SU) {
return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
AvailableQueue->isReady(SU);
}
void ReleasePred(SUnit *SU, const SDep *PredEdge);
void ReleasePredecessors(SUnit *SU);
void ReleasePending();
void AdvanceToCycle(unsigned NextCycle);
void AdvancePastStalls(SUnit *SU);
void EmitNode(SUnit *SU);
void ScheduleNodeBottomUp(SUnit*);
void CapturePred(SDep *PredEdge);
void UnscheduleNodeBottomUp(SUnit*);
void RestoreHazardCheckerBottomUp();
void BacktrackBottomUp(SUnit*, SUnit*);
SUnit *CopyAndMoveSuccessors(SUnit*);
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
const TargetRegisterClass*,
const TargetRegisterClass*,
SmallVectorImpl<SUnit*>&);
bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
void releaseInterferences(unsigned Reg = 0);
SUnit *PickNodeToScheduleBottomUp();
void ListScheduleBottomUp();
/// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
/// Updates the topological ordering if required.
SUnit *CreateNewSUnit(SDNode *N) {
unsigned NumSUnits = SUnits.size();
SUnit *NewNode = newSUnit(N);
// Update the topological ordering.
if (NewNode->NodeNum >= NumSUnits)
Topo.InitDAGTopologicalSorting();
return NewNode;
}
/// CreateClone - Creates a new SUnit from an existing one.
/// Updates the topological ordering if required.
SUnit *CreateClone(SUnit *N) {
unsigned NumSUnits = SUnits.size();
SUnit *NewNode = Clone(N);
// Update the topological ordering.
if (NewNode->NodeNum >= NumSUnits)
Topo.InitDAGTopologicalSorting();
return NewNode;
}
/// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
/// need actual latency information but the hybrid scheduler does.
bool forceUnitLatencies() const override {
return !NeedLatency;
}
};
} // end anonymous namespace
/// GetCostForDef - Looks up the register class and cost for a given definition.
/// Typically this just means looking up the representative register class,
/// but for untyped values (MVT::Untyped) it means inspecting the node's
/// opcode to determine what register class is being generated.
static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
const TargetLowering *TLI,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI,
unsigned &RegClass, unsigned &Cost,
const MachineFunction &MF) {
MVT VT = RegDefPos.GetValue();
// Special handling for untyped values. These values can only come from
// the expansion of custom DAG-to-DAG patterns.
if (VT == MVT::Untyped) {
const SDNode *Node = RegDefPos.GetNode();
// Special handling for CopyFromReg of untyped values.
if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
RegClass = RC->getID();
Cost = 1;
return;
}
unsigned Opcode = Node->getMachineOpcode();
if (Opcode == TargetOpcode::REG_SEQUENCE) {
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
RegClass = RC->getID();
Cost = 1;
return;
}
unsigned Idx = RegDefPos.GetIdx();
const MCInstrDesc Desc = TII->get(Opcode);
const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
RegClass = RC->getID();
// FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
// better way to determine it.
Cost = 1;
} else {
RegClass = TLI->getRepRegClassFor(VT)->getID();
Cost = TLI->getRepRegClassCostFor(VT);
}
}
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGRRList::Schedule() {
DEBUG(dbgs()
<< "********** List Scheduling BB#" << BB->getNumber()
<< " '" << BB->getName() << "' **********\n");
CurCycle = 0;
IssueCount = 0;
MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
NumLiveRegs = 0;
// Allocate slots for each physical register, plus one for a special register
// to track the virtual resource of a calling sequence.
LiveRegDefs.resize(TRI->getNumRegs() + 1, nullptr);
LiveRegGens.resize(TRI->getNumRegs() + 1, nullptr);
CallSeqEndForStart.clear();
assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
// Build the scheduling graph.
BuildSchedGraph(nullptr);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
Topo.InitDAGTopologicalSorting();
AvailableQueue->initNodes(SUnits);
HazardRec->Reset();
// Execute the actual scheduling loop.
ListScheduleBottomUp();
AvailableQueue->releaseState();
DEBUG({
dbgs() << "*** Final schedule ***\n";
dumpSchedule();
dbgs() << '\n';
});
}
//===----------------------------------------------------------------------===//
// Bottom-Up Scheduling
//===----------------------------------------------------------------------===//
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
#ifndef NDEBUG
if (PredSU->NumSuccsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
--PredSU->NumSuccsLeft;
if (!forceUnitLatencies()) {
// Updating predecessor's height. This is now the cycle when the
// predecessor can be scheduled without causing a pipeline stall.
PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
}
// If all the node's successors are scheduled, this node is ready
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
PredSU->isAvailable = true;
unsigned Height = PredSU->getHeight();
if (Height < MinAvailableCycle)
MinAvailableCycle = Height;
if (isReady(PredSU)) {
AvailableQueue->push(PredSU);
}
// CapturePred and others may have left the node in the pending queue, avoid
// adding it twice.
else if (!PredSU->isPending) {
PredSU->isPending = true;
PendingQueue.push_back(PredSU);
}
}
}
/// IsChainDependent - Test if Outer is reachable from Inner through
/// chain dependencies.
static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
unsigned NestLevel,
const TargetInstrInfo *TII) {
SDNode *N = Outer;
for (;;) {
if (N == Inner)
return true;
// For a TokenFactor, examine each operand. There may be multiple ways
// to get to the CALLSEQ_BEGIN, but we need to find the path with the
// most nesting in order to ensure that we find the corresponding match.
if (N->getOpcode() == ISD::TokenFactor) {
for (const SDValue &Op : N->op_values())
if (IsChainDependent(Op.getNode(), Inner, NestLevel, TII))
return true;
return false;
}
// Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
if (N->isMachineOpcode()) {
if (N->getMachineOpcode() ==
(unsigned)TII->getCallFrameDestroyOpcode()) {
++NestLevel;
} else if (N->getMachineOpcode() ==
(unsigned)TII->getCallFrameSetupOpcode()) {
if (NestLevel == 0)
return false;
--NestLevel;
}
}
// Otherwise, find the chain and continue climbing.
for (const SDValue &Op : N->op_values())
if (Op.getValueType() == MVT::Other) {
N = Op.getNode();
goto found_chain_operand;
}
return false;
found_chain_operand:;
if (N->getOpcode() == ISD::EntryToken)
return false;
}
}
/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
/// the corresponding (lowered) CALLSEQ_BEGIN node.
///
/// NestLevel and MaxNested are used in recursion to indcate the current level
/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
/// level seen so far.
///
/// TODO: It would be better to give CALLSEQ_END an explicit operand to point
/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
static SDNode *
FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
const TargetInstrInfo *TII) {
for (;;) {
// For a TokenFactor, examine each operand. There may be multiple ways
// to get to the CALLSEQ_BEGIN, but we need to find the path with the
// most nesting in order to ensure that we find the corresponding match.
if (N->getOpcode() == ISD::TokenFactor) {
SDNode *Best = nullptr;
unsigned BestMaxNest = MaxNest;
for (const SDValue &Op : N->op_values()) {
unsigned MyNestLevel = NestLevel;
unsigned MyMaxNest = MaxNest;
if (SDNode *New = FindCallSeqStart(Op.getNode(),
MyNestLevel, MyMaxNest, TII))
if (!Best || (MyMaxNest > BestMaxNest)) {
Best = New;
BestMaxNest = MyMaxNest;
}
}
assert(Best);
MaxNest = BestMaxNest;
return Best;
}
// Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
if (N->isMachineOpcode()) {
if (N->getMachineOpcode() ==
(unsigned)TII->getCallFrameDestroyOpcode()) {
++NestLevel;
MaxNest = std::max(MaxNest, NestLevel);
} else if (N->getMachineOpcode() ==
(unsigned)TII->getCallFrameSetupOpcode()) {
assert(NestLevel != 0);
--NestLevel;
if (NestLevel == 0)
return N;
}
}
// Otherwise, find the chain and continue climbing.
for (const SDValue &Op : N->op_values())
if (Op.getValueType() == MVT::Other) {
N = Op.getNode();
goto found_chain_operand;
}
return nullptr;
found_chain_operand:;
if (N->getOpcode() == ISD::EntryToken)
return nullptr;
}
}
/// Call ReleasePred for each predecessor, then update register live def/gen.
/// Always update LiveRegDefs for a register dependence even if the current SU
/// also defines the register. This effectively create one large live range
/// across a sequence of two-address node. This is important because the
/// entire chain must be scheduled together. Example:
///
/// flags = (3) add
/// flags = (2) addc flags
/// flags = (1) addc flags
///
/// results in
///
/// LiveRegDefs[flags] = 3
/// LiveRegGens[flags] = 1
///
/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
/// interference on flags.
void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
ReleasePred(SU, &*I);
if (I->isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
"interference on register dependence");
LiveRegDefs[I->getReg()] = I->getSUnit();
if (!LiveRegGens[I->getReg()]) {
++NumLiveRegs;
LiveRegGens[I->getReg()] = SU;
}
}
}
// If we're scheduling a lowered CALLSEQ_END, find the corresponding
// CALLSEQ_BEGIN. Inject an artificial physical register dependence between
// these nodes, to prevent other calls from being interscheduled with them.
unsigned CallResource = TRI->getNumRegs();
if (!LiveRegDefs[CallResource])
for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
if (Node->isMachineOpcode() &&
Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
unsigned NestLevel = 0;
unsigned MaxNest = 0;
SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
SUnit *Def = &SUnits[N->getNodeId()];
CallSeqEndForStart[Def] = SU;
++NumLiveRegs;
LiveRegDefs[CallResource] = Def;
LiveRegGens[CallResource] = SU;
break;
}
}
/// Check to see if any of the pending instructions are ready to issue. If
/// so, add them to the available queue.
void ScheduleDAGRRList::ReleasePending() {
if (DisableSchedCycles) {
assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
return;
}
// If the available queue is empty, it is safe to reset MinAvailableCycle.
if (AvailableQueue->empty())
MinAvailableCycle = UINT_MAX;
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
unsigned ReadyCycle = PendingQueue[i]->getHeight();
if (ReadyCycle < MinAvailableCycle)
MinAvailableCycle = ReadyCycle;
if (PendingQueue[i]->isAvailable) {
if (!isReady(PendingQueue[i]))
continue;
AvailableQueue->push(PendingQueue[i]);
}
PendingQueue[i]->isPending = false;
PendingQueue[i] = PendingQueue.back();
PendingQueue.pop_back();
--i; --e;
}
}
/// Move the scheduler state forward by the specified number of Cycles.
void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
if (NextCycle <= CurCycle)
return;
IssueCount = 0;
AvailableQueue->setCurCycle(NextCycle);
if (!HazardRec->isEnabled()) {
// Bypass lots of virtual calls in case of long latency.
CurCycle = NextCycle;
}
else {
for (; CurCycle != NextCycle; ++CurCycle) {
HazardRec->RecedeCycle();
}
}
// FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
// available Q to release pending nodes at least once before popping.
ReleasePending();
}
/// Move the scheduler state forward until the specified node's dependents are
/// ready and can be scheduled with no resource conflicts.
void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
if (DisableSchedCycles)
return;
// FIXME: Nodes such as CopyFromReg probably should not advance the current
// cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
// has predecessors the cycle will be advanced when they are scheduled.
// But given the crude nature of modeling latency though such nodes, we
// currently need to treat these nodes like real instructions.
// if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
unsigned ReadyCycle = SU->getHeight();
// Bump CurCycle to account for latency. We assume the latency of other
// available instructions may be hidden by the stall (not a full pipe stall).
// This updates the hazard recognizer's cycle before reserving resources for
// this instruction.
AdvanceToCycle(ReadyCycle);
// Calls are scheduled in their preceding cycle, so don't conflict with
// hazards from instructions after the call. EmitNode will reset the
// scoreboard state before emitting the call.
if (SU->isCall)
return;
// FIXME: For resource conflicts in very long non-pipelined stages, we
// should probably skip ahead here to avoid useless scoreboard checks.
int Stalls = 0;
while (true) {
ScheduleHazardRecognizer::HazardType HT =
HazardRec->getHazardType(SU, -Stalls);
if (HT == ScheduleHazardRecognizer::NoHazard)
break;
++Stalls;
}
AdvanceToCycle(CurCycle + Stalls);
}
/// Record this SUnit in the HazardRecognizer.
/// Does not update CurCycle.
void ScheduleDAGRRList::EmitNode(SUnit *SU) {
if (!HazardRec->isEnabled())
return;
// Check for phys reg copy.
if (!SU->getNode())
return;
switch (SU->getNode()->getOpcode()) {
default:
assert(SU->getNode()->isMachineOpcode() &&
"This target-independent node should not be scheduled.");
break;
case ISD::MERGE_VALUES:
case ISD::TokenFactor:
case ISD::LIFETIME_START:
case ISD::LIFETIME_END:
case ISD::CopyToReg:
case ISD::CopyFromReg:
case ISD::EH_LABEL:
// Noops don't affect the scoreboard state. Copies are likely to be
// removed.
return;
case ISD::INLINEASM:
// For inline asm, clear the pipeline state.
HazardRec->Reset();
return;
}
if (SU->isCall) {
// Calls are scheduled with their preceding instructions. For bottom-up
// scheduling, clear the pipeline state before emitting.
HazardRec->Reset();
}
HazardRec->EmitInstruction(SU);
}
static void resetVRegCycle(SUnit *SU);
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
#ifndef NDEBUG
if (CurCycle < SU->getHeight())
DEBUG(dbgs() << " Height [" << SU->getHeight()
<< "] pipeline stall!\n");
#endif
// FIXME: Do not modify node height. It may interfere with
// backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
// node its ready cycle can aid heuristics, and after scheduling it can
// indicate the scheduled cycle.
SU->setHeightToAtLeast(CurCycle);
// Reserve resources for the scheduled instruction.
EmitNode(SU);
Sequence.push_back(SU);
AvailableQueue->scheduledNode(SU);
// If HazardRec is disabled, and each inst counts as one cycle, then
// advance CurCycle before ReleasePredecessors to avoid useless pushes to
// PendingQueue for schedulers that implement HasReadyFilter.
if (!HazardRec->isEnabled() && AvgIPC < 2)
AdvanceToCycle(CurCycle + 1);
// Update liveness of predecessors before successors to avoid treating a
// two-address node as a live range def.
ReleasePredecessors(SU);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
// LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
LiveRegDefs[I->getReg()] = nullptr;
LiveRegGens[I->getReg()] = nullptr;
releaseInterferences(I->getReg());
}
}
// Release the special call resource dependence, if this is the beginning
// of a call.
unsigned CallResource = TRI->getNumRegs();
if (LiveRegDefs[CallResource] == SU)
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (SUNode->isMachineOpcode() &&
SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
LiveRegDefs[CallResource] = nullptr;
LiveRegGens[CallResource] = nullptr;
releaseInterferences(CallResource);
}
}
resetVRegCycle(SU);
SU->isScheduled = true;
// Conditions under which the scheduler should eagerly advance the cycle:
// (1) No available instructions
// (2) All pipelines full, so available instructions must have hazards.
//
// If HazardRec is disabled, the cycle was pre-advanced before calling
// ReleasePredecessors. In that case, IssueCount should remain 0.
//
// Check AvailableQueue after ReleasePredecessors in case of zero latency.
if (HazardRec->isEnabled() || AvgIPC > 1) {
if (SU->getNode() && SU->getNode()->isMachineOpcode())
++IssueCount;
if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
|| (!HazardRec->isEnabled() && IssueCount == AvgIPC))
AdvanceToCycle(CurCycle + 1);
}
}
/// CapturePred - This does the opposite of ReleasePred. Since SU is being
/// unscheduled, incrcease the succ left count of its predecessors. Remove
/// them from AvailableQueue if necessary.
void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
if (PredSU->isAvailable) {
PredSU->isAvailable = false;
if (!PredSU->isPending)
AvailableQueue->remove(PredSU);
}
assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
++PredSU->NumSuccsLeft;
}
/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
/// its predecessor states to reflect the change.
void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
DEBUG(SU->dump(this));
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
CapturePred(&*I);
if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
"Physical register dependency violated?");
--NumLiveRegs;
LiveRegDefs[I->getReg()] = nullptr;
LiveRegGens[I->getReg()] = nullptr;
releaseInterferences(I->getReg());
}
}
// Reclaim the special call resource dependence, if this is the beginning
// of a call.
unsigned CallResource = TRI->getNumRegs();
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (SUNode->isMachineOpcode() &&
SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
++NumLiveRegs;
LiveRegDefs[CallResource] = SU;
LiveRegGens[CallResource] = CallSeqEndForStart[SU];
}
}
// Release the special call resource dependence, if this is the end
// of a call.
if (LiveRegGens[CallResource] == SU)
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (SUNode->isMachineOpcode() &&
SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
LiveRegDefs[CallResource] = nullptr;
LiveRegGens[CallResource] = nullptr;
releaseInterferences(CallResource);
}
}
for (auto &Succ : SU->Succs) {
if (Succ.isAssignedRegDep()) {
auto Reg = Succ.getReg();
if (!LiveRegDefs[Reg])
++NumLiveRegs;
// This becomes the nearest def. Note that an earlier def may still be
// pending if this is a two-address node.
LiveRegDefs[Reg] = SU;
// Update LiveRegGen only if was empty before this unscheduling.
// This is to avoid incorrect updating LiveRegGen set in previous run.
if (!LiveRegGens[Reg]) {
// Find the successor with the lowest height.
LiveRegGens[Reg] = Succ.getSUnit();
for (auto &Succ2 : SU->Succs) {
if (Succ2.isAssignedRegDep() && Succ2.getReg() == Reg &&
Succ2.getSUnit()->getHeight() < LiveRegGens[Reg]->getHeight())
LiveRegGens[Reg] = Succ2.getSUnit();
}
}
}
}
if (SU->getHeight() < MinAvailableCycle)
MinAvailableCycle = SU->getHeight();
SU->setHeightDirty();
SU->isScheduled = false;
SU->isAvailable = true;
if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
// Don't make available until backtracking is complete.
SU->isPending = true;
PendingQueue.push_back(SU);
}
else {
AvailableQueue->push(SU);
}
AvailableQueue->unscheduledNode(SU);
}
/// After backtracking, the hazard checker needs to be restored to a state
/// corresponding the current cycle.
void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
HazardRec->Reset();
unsigned LookAhead = std::min((unsigned)Sequence.size(),
HazardRec->getMaxLookAhead());
if (LookAhead == 0)
return;
std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
unsigned HazardCycle = (*I)->getHeight();
for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
SUnit *SU = *I;
for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
HazardRec->RecedeCycle();
}
EmitNode(SU);
}
}
/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
/// BTCycle in order to schedule a specific node.
void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
SUnit *OldSU = Sequence.back();
while (true) {
Sequence.pop_back();
// FIXME: use ready cycle instead of height
CurCycle = OldSU->getHeight();
UnscheduleNodeBottomUp(OldSU);
AvailableQueue->setCurCycle(CurCycle);
if (OldSU == BtSU)
break;
OldSU = Sequence.back();
}
assert(!SU->isSucc(OldSU) && "Something is wrong!");
RestoreHazardCheckerBottomUp();
ReleasePending();
++NumBacktracks;
}
static bool isOperandOf(const SUnit *SU, SDNode *N) {
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (SUNode->isOperandOf(N))
return true;
}
return false;
}
/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
/// successors to the newly created node.
SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
SDNode *N = SU->getNode();
if (!N)
return nullptr;
if (SU->getNode()->getGluedNode())
return nullptr;
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
MVT VT = N->getSimpleValueType(i);
if (VT == MVT::Glue)
return nullptr;
else if (VT == MVT::Other)
TryUnfold = true;
}
for (const SDValue &Op : N->op_values()) {
MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
if (VT == MVT::Glue)
return nullptr;
}
if (TryUnfold) {
SmallVector<SDNode*, 2> NewNodes;
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return nullptr;
// unfolding an x86 DEC64m operation results in store, dec, load which
// can't be handled here so quit
if (NewNodes.size() == 3)
return nullptr;
DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
N = NewNodes[1];
SDNode *LoadNode = NewNodes[0];
unsigned NumVals = N->getNumValues();
unsigned OldNumVals = SU->getNode()->getNumValues();
for (unsigned i = 0; i != NumVals; ++i)
DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
SDValue(LoadNode, 1));
// LoadNode may already exist. This can happen when there is another
// load from the same location and producing the same type of value
// but it has different alignment or volatileness.
bool isNewLoad = true;
SUnit *LoadSU;
if (LoadNode->getNodeId() != -1) {
LoadSU = &SUnits[LoadNode->getNodeId()];
isNewLoad = false;
} else {
LoadSU = CreateNewSUnit(LoadNode);
LoadNode->setNodeId(LoadSU->NodeNum);
InitNumRegDefsLeft(LoadSU);
computeLatency(LoadSU);
}
SUnit *NewSU = CreateNewSUnit(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum);
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
NewSU->isTwoAddress = true;
break;
}
}
if (MCID.isCommutable())
NewSU->isCommutable = true;
InitNumRegDefsLeft(NewSU);
computeLatency(NewSU);
// Record all the edges to and from the old SU, by category.
SmallVector<SDep, 4> ChainPreds;
SmallVector<SDep, 4> ChainSuccs;
SmallVector<SDep, 4> LoadPreds;
SmallVector<SDep, 4> NodePreds;
SmallVector<SDep, 4> NodeSuccs;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
ChainPreds.push_back(*I);
else if (isOperandOf(I->getSUnit(), LoadNode))
LoadPreds.push_back(*I);
else
NodePreds.push_back(*I);
}
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl())
ChainSuccs.push_back(*I);
else
NodeSuccs.push_back(*I);
}
// Now assign edges to the newly-created nodes.
for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
const SDep &Pred = ChainPreds[i];
RemovePred(SU, Pred);
if (isNewLoad)
AddPred(LoadSU, Pred);
}
for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
const SDep &Pred = LoadPreds[i];
RemovePred(SU, Pred);
if (isNewLoad)
AddPred(LoadSU, Pred);
}
for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
const SDep &Pred = NodePreds[i];
RemovePred(SU, Pred);
AddPred(NewSU, Pred);
}
for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
SDep D = NodeSuccs[i];
SUnit *SuccDep = D.getSUnit();
D.setSUnit(SU);
RemovePred(SuccDep, D);
D.setSUnit(NewSU);
AddPred(SuccDep, D);
// Balance register pressure.
if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
&& !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
--NewSU->NumRegDefsLeft;
}
for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
SDep D = ChainSuccs[i];
SUnit *SuccDep = D.getSUnit();
D.setSUnit(SU);
RemovePred(SuccDep, D);
if (isNewLoad) {
D.setSUnit(LoadSU);
AddPred(SuccDep, D);
}
}
// Add a data dependency to reflect that NewSU reads the value defined
// by LoadSU.
SDep D(LoadSU, SDep::Data, 0);
D.setLatency(LoadSU->Latency);
AddPred(NewSU, D);
if (isNewLoad)
AvailableQueue->addNode(LoadSU);
AvailableQueue->addNode(NewSU);
++NumUnfolds;
if (NewSU->NumSuccsLeft == 0) {
NewSU->isAvailable = true;
return NewSU;
}
SU = NewSU;
}
DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
NewSU = CreateClone(SU);
// New SUnit has the exact same predecessors.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I)
if (!I->isArtificial())
AddPred(NewSU, *I);
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isArtificial())
continue;
SUnit *SuccSU = I->getSUnit();
if (SuccSU->isScheduled) {
SDep D = *I;
D.setSUnit(NewSU);
AddPred(SuccSU, D);
D.setSUnit(SU);
DelDeps.push_back(std::make_pair(SuccSU, D));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
RemovePred(DelDeps[i].first, DelDeps[i].second);
AvailableQueue->updateNode(SU);
AvailableQueue->addNode(NewSU);
++NumDups;
return NewSU;
}
/// InsertCopiesAndMoveSuccs - Insert register copies and move all
/// scheduled successors of the given SUnit to the last copy.
void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
SmallVectorImpl<SUnit*> &Copies) {
SUnit *CopyFromSU = CreateNewSUnit(nullptr);
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
SUnit *CopyToSU = CreateNewSUnit(nullptr);
CopyToSU->CopySrcRC = DestRC;
CopyToSU->CopyDstRC = SrcRC;
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isArtificial())
continue;
SUnit *SuccSU = I->getSUnit();
if (SuccSU->isScheduled) {
SDep D = *I;
D.setSUnit(CopyToSU);
AddPred(SuccSU, D);
DelDeps.push_back(std::make_pair(SuccSU, *I));
}
else {
// Avoid scheduling the def-side copy before other successors. Otherwise
// we could introduce another physreg interference on the copy and
// continue inserting copies indefinitely.
AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
RemovePred(DelDeps[i].first, DelDeps[i].second);
SDep FromDep(SU, SDep::Data, Reg);
FromDep.setLatency(SU->Latency);
AddPred(CopyFromSU, FromDep);
SDep ToDep(CopyFromSU, SDep::Data, 0);
ToDep.setLatency(CopyFromSU->Latency);
AddPred(CopyToSU, ToDep);
AvailableQueue->updateNode(SU);
AvailableQueue->addNode(CopyFromSU);
AvailableQueue->addNode(CopyToSU);
Copies.push_back(CopyFromSU);
Copies.push_back(CopyToSU);
++NumPRCopies;
}
/// getPhysicalRegisterVT - Returns the ValueType of the physical register
/// definition of the specified node.
/// FIXME: Move to SelectionDAG?
static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const TargetInstrInfo *TII) {
unsigned NumRes;
if (N->getOpcode() == ISD::CopyFromReg) {
// CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
NumRes = 1;
} else {
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
NumRes = MCID.getNumDefs();
for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
if (Reg == *ImpDef)
break;
++NumRes;
}
}
return N->getSimpleValueType(NumRes);
}
/// CheckForLiveRegDef - Return true and update live register vector if the
/// specified register def of the specified SUnit clobbers any "live" registers.
static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
SmallVectorImpl<unsigned> &LRegs,
const TargetRegisterInfo *TRI) {
for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
// Check if Ref is live.
if (!LiveRegDefs[*AliasI]) continue;
// Allow multiple uses of the same def.
if (LiveRegDefs[*AliasI] == SU) continue;
// Add Reg to the set of interfering live regs.
if (RegAdded.insert(*AliasI).second) {
LRegs.push_back(*AliasI);
}
}
}
/// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
/// by RegMask, and add them to LRegs.
static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
SmallVectorImpl<unsigned> &LRegs) {
// Look at all live registers. Skip Reg0 and the special CallResource.
for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
if (!LiveRegDefs[i]) continue;
if (LiveRegDefs[i] == SU) continue;
if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
if (RegAdded.insert(i).second)
LRegs.push_back(i);
}
}
/// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
static const uint32_t *getNodeRegMask(const SDNode *N) {
for (const SDValue &Op : N->op_values())
if (const auto *RegOp = dyn_cast<RegisterMaskSDNode>(Op.getNode()))
return RegOp->getRegMask();
return nullptr;
}
/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
/// scheduling of the given node to satisfy live physical register dependencies.
/// If the specific node is the last one that's available to schedule, do
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
bool ScheduleDAGRRList::
DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
if (NumLiveRegs == 0)
return false;
SmallSet<unsigned, 4> RegAdded;
// If this node would clobber any "live" register, then it's not ready.
//
// If SU is the currently live definition of the same register that it uses,
// then we are free to schedule it.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
RegAdded, LRegs, TRI);
}
for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
if (Node->getOpcode() == ISD::INLINEASM) {
// Inline asm can clobber physical defs.
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
--NumOps; // Ignore the glue operand.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
++i; // Skip the ID value.
if (InlineAsm::isRegDefKind(Flags) ||
InlineAsm::isRegDefEarlyClobberKind(Flags) ||
InlineAsm::isClobberKind(Flags)) {
// Check for def of register or earlyclobber register.
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
} else
i += NumVals;
}
continue;
}
if (!Node->isMachineOpcode())
continue;
// If we're in the middle of scheduling a call, don't begin scheduling
// another call. Also, don't allow any physical registers to be live across
// the call.
if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
// Check the special calling-sequence resource.
unsigned CallResource = TRI->getNumRegs();
if (LiveRegDefs[CallResource]) {
SDNode *Gen = LiveRegGens[CallResource]->getNode();
while (SDNode *Glued = Gen->getGluedNode())
Gen = Glued;
if (!IsChainDependent(Gen, Node, 0, TII) &&
RegAdded.insert(CallResource).second)
LRegs.push_back(CallResource);
}
}
if (const uint32_t *RegMask = getNodeRegMask(Node))
CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs);
const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
if (!MCID.ImplicitDefs)
continue;
for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
return !LRegs.empty();
}
void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
// Add the nodes that aren't ready back onto the available list.
for (unsigned i = Interferences.size(); i > 0; --i) {
SUnit *SU = Interferences[i-1];
LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
if (Reg) {
SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
continue;
}
SU->isPending = false;
// The interfering node may no longer be available due to backtracking.
// Furthermore, it may have been made available again, in which case it is
// now already in the AvailableQueue.
if (SU->isAvailable && !SU->NodeQueueId) {
DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n');
AvailableQueue->push(SU);
}
if (i < Interferences.size())
Interferences[i-1] = Interferences.back();
Interferences.pop_back();
LRegsMap.erase(LRegsPos);
}
}
/// Return a node that can be scheduled in this cycle. Requirements:
/// (1) Ready: latency has been satisfied
/// (2) No Hazards: resources are available
/// (3) No Interferences: may unschedule to break register interferences.
SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
while (CurSU) {
SmallVector<unsigned, 4> LRegs;
if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
break;
DEBUG(dbgs() << " Interfering reg " <<
(LRegs[0] == TRI->getNumRegs() ? "CallResource"
: TRI->getName(LRegs[0]))
<< " SU #" << CurSU->NodeNum << '\n');
std::pair<LRegsMapT::iterator, bool> LRegsPair =
LRegsMap.insert(std::make_pair(CurSU, LRegs));
if (LRegsPair.second) {
CurSU->isPending = true; // This SU is not in AvailableQueue right now.
Interferences.push_back(CurSU);
}
else {
assert(CurSU->isPending && "Interferences are pending");
// Update the interference with current live regs.
LRegsPair.first->second = LRegs;
}
CurSU = AvailableQueue->pop();
}
if (CurSU)
return CurSU;
// All candidates are delayed due to live physical reg dependencies.
// Try backtracking, code duplication, or inserting cross class copies
// to resolve it.
for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
SUnit *TrySU = Interferences[i];
SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
// Try unscheduling up to the point where it's safe to schedule
// this node.
SUnit *BtSU = nullptr;
unsigned LiveCycle = UINT_MAX;
for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
unsigned Reg = LRegs[j];
if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
BtSU = LiveRegGens[Reg];
LiveCycle = BtSU->getHeight();
}
}
if (!WillCreateCycle(TrySU, BtSU)) {
// BacktrackBottomUp mutates Interferences!
BacktrackBottomUp(TrySU, BtSU);
// Force the current node to be scheduled before the node that
// requires the physical reg dep.
if (BtSU->isAvailable) {
BtSU->isAvailable = false;
if (!BtSU->isPending)
AvailableQueue->remove(BtSU);
}
DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("
<< TrySU->NodeNum << ")\n");
AddPred(TrySU, SDep(BtSU, SDep::Artificial));
// If one or more successors has been unscheduled, then the current
// node is no longer available.
if (!TrySU->isAvailable || !TrySU->NodeQueueId)
CurSU = AvailableQueue->pop();
else {
// Available and in AvailableQueue
AvailableQueue->remove(TrySU);
CurSU = TrySU;
}
// Interferences has been mutated. We must break.
break;
}
}
if (!CurSU) {
// Can't backtrack. If it's too expensive to copy the value, then try
// duplicate the nodes that produces these "too expensive to copy"
// values to break the dependency. In case even that doesn't work,
// insert cross class copies.
// If it's not too expensive, i.e. cost != -1, issue copies.
SUnit *TrySU = Interferences[0];
SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
TRI->getMinimalPhysRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
// If cross copy register class is the same as RC, then it must be possible
// copy the value directly. Do not try duplicate the def.
// If cross copy register class is not the same as RC, then it's possible to
// copy the value but it require cross register class copies and it is
// expensive.
// If cross copy register class is null, then it's not possible to copy
// the value at all.
SUnit *NewDef = nullptr;
if (DestRC != RC) {
NewDef = CopyAndMoveSuccessors(LRDef);
if (!DestRC && !NewDef)
report_fatal_error("Can't handle live physical register dependency!");
}
if (!NewDef) {
// Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
<< " to SU #" << Copies.front()->NodeNum << "\n");
AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
NewDef = Copies.back();
}
DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
<< " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
AddPred(NewDef, SDep(TrySU, SDep::Artificial));
TrySU->isAvailable = false;
CurSU = NewDef;
}
assert(CurSU && "Unable to resolve live physical register dependencies!");
return CurSU;
}
/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
/// schedulers.
void ScheduleDAGRRList::ListScheduleBottomUp() {
// Release any predecessors of the special Exit node.
ReleasePredecessors(&ExitSU);
// Add root to Available queue.
if (!SUnits.empty()) {
SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
RootSU->isAvailable = true;
AvailableQueue->push(RootSU);
}
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
Sequence.reserve(SUnits.size());
while (!AvailableQueue->empty() || !Interferences.empty()) {
DEBUG(dbgs() << "\nExamining Available:\n";
AvailableQueue->dump(this));
// Pick the best node to schedule taking all constraints into
// consideration.
SUnit *SU = PickNodeToScheduleBottomUp();
AdvancePastStalls(SU);
ScheduleNodeBottomUp(SU);
while (AvailableQueue->empty() && !PendingQueue.empty()) {
// Advance the cycle to free resources. Skip ahead to the next ready SU.
assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
}
}
// Reverse the order if it is bottom up.
std::reverse(Sequence.begin(), Sequence.end());
#ifndef NDEBUG
VerifyScheduledSequence(/*isBottomUp=*/true);
#endif
}
//===----------------------------------------------------------------------===//
// RegReductionPriorityQueue Definition
//===----------------------------------------------------------------------===//
//
// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
// to reduce register pressure.
//
namespace {
class RegReductionPQBase;
struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
};
#ifndef NDEBUG
template<class SF>
struct reverse_sort : public queue_sort {
SF &SortFunc;
reverse_sort(SF &sf) : SortFunc(sf) {}
bool operator()(SUnit* left, SUnit* right) const {
// reverse left/right rather than simply !SortFunc(left, right)
// to expose different paths in the comparison logic.
return SortFunc(right, left);
}
};
#endif // NDEBUG
/// bu_ls_rr_sort - Priority function for bottom up register pressure
// reduction scheduler.
struct bu_ls_rr_sort : public queue_sort {
enum {
IsBottomUp = true,
HasReadyFilter = false
};
RegReductionPQBase *SPQ;
bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
bool operator()(SUnit* left, SUnit* right) const;
};
// src_ls_rr_sort - Priority function for source order scheduler.
struct src_ls_rr_sort : public queue_sort {
enum {
IsBottomUp = true,
HasReadyFilter = false
};
RegReductionPQBase *SPQ;
src_ls_rr_sort(RegReductionPQBase *spq)
: SPQ(spq) {}
bool operator()(SUnit* left, SUnit* right) const;
};
// hybrid_ls_rr_sort - Priority function for hybrid scheduler.
struct hybrid_ls_rr_sort : public queue_sort {
enum {
IsBottomUp = true,
HasReadyFilter = false
};
RegReductionPQBase *SPQ;
hybrid_ls_rr_sort(RegReductionPQBase *spq)
: SPQ(spq) {}
bool isReady(SUnit *SU, unsigned CurCycle) const;
bool operator()(SUnit* left, SUnit* right) const;
};
// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
// scheduler.
struct ilp_ls_rr_sort : public queue_sort {
enum {
IsBottomUp = true,
HasReadyFilter = false
};
RegReductionPQBase *SPQ;
ilp_ls_rr_sort(RegReductionPQBase *spq)
: SPQ(spq) {}
bool isReady(SUnit *SU, unsigned CurCycle) const;
bool operator()(SUnit* left, SUnit* right) const;
};
class RegReductionPQBase : public SchedulingPriorityQueue {
protected:
std::vector<SUnit*> Queue;
unsigned CurQueueId;
bool TracksRegPressure;
bool SrcOrder;
// SUnits - The SUnits for the current graph.
std::vector<SUnit> *SUnits;
MachineFunction &MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const TargetLowering *TLI;
ScheduleDAGRRList *scheduleDAG;
// SethiUllmanNumbers - The SethiUllman number for each node.
std::vector<unsigned> SethiUllmanNumbers;
/// RegPressure - Tracking current reg pressure per register class.
///
std::vector<unsigned> RegPressure;
/// RegLimit - Tracking the number of allocatable registers per register
/// class.
std::vector<unsigned> RegLimit;
public:
RegReductionPQBase(MachineFunction &mf,
bool hasReadyFilter,
bool tracksrp,
bool srcorder,
const TargetInstrInfo *tii,
const TargetRegisterInfo *tri,
const TargetLowering *tli)
: SchedulingPriorityQueue(hasReadyFilter),
CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
if (TracksRegPressure) {
unsigned NumRC = TRI->getNumRegClasses();
RegLimit.resize(NumRC);
RegPressure.resize(NumRC);
std::fill(RegLimit.begin(), RegLimit.end(), 0);
std::fill(RegPressure.begin(), RegPressure.end(), 0);
for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
E = TRI->regclass_end(); I != E; ++I)
RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
}
}
void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
scheduleDAG = scheduleDag;
}
ScheduleHazardRecognizer* getHazardRec() {
return scheduleDAG->getHazardRec();
}
void initNodes(std::vector<SUnit> &sunits) override;
void addNode(const SUnit *SU) override;
void updateNode(const SUnit *SU) override;
void releaseState() override {
SUnits = nullptr;
SethiUllmanNumbers.clear();
std::fill(RegPressure.begin(), RegPressure.end(), 0);
}
unsigned getNodePriority(const SUnit *SU) const;
unsigned getNodeOrdering(const SUnit *SU) const {
if (!SU->getNode()) return 0;
return SU->getNode()->getIROrder();
}
bool empty() const override { return Queue.empty(); }
void push(SUnit *U) override {
assert(!U->NodeQueueId && "Node in the queue already");
U->NodeQueueId = ++CurQueueId;
Queue.push_back(U);
}
void remove(SUnit *SU) override {
assert(!Queue.empty() && "Queue is empty!");
assert(SU->NodeQueueId != 0 && "Not in queue!");
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
SU);
if (I != std::prev(Queue.end()))
std::swap(*I, Queue.back());
Queue.pop_back();
SU->NodeQueueId = 0;
}
bool tracksRegPressure() const override { return TracksRegPressure; }
void dumpRegPressure() const;
bool HighRegPressure(const SUnit *SU) const;
bool MayReduceRegPressure(SUnit *SU) const;
int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
void scheduledNode(SUnit *SU) override;
void unscheduledNode(SUnit *SU) override;
protected:
bool canClobber(const SUnit *SU, const SUnit *Op);
void AddPseudoTwoAddrDeps();
void PrescheduleNodesWithMultipleUses();
void CalculateSethiUllmanNumbers();
};
template<class SF>
static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
std::vector<SUnit *>::iterator Best = Q.begin();
for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
E = Q.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
SUnit *V = *Best;
if (Best != std::prev(Q.end()))
std::swap(*Best, Q.back());
Q.pop_back();
return V;
}
template<class SF>
SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
#ifndef NDEBUG
if (DAG->StressSched) {
reverse_sort<SF> RPicker(Picker);
return popFromQueueImpl(Q, RPicker);
}
#endif
(void)DAG;
return popFromQueueImpl(Q, Picker);
}
template<class SF>
class RegReductionPriorityQueue : public RegReductionPQBase {
SF Picker;
public:
RegReductionPriorityQueue(MachineFunction &mf,
bool tracksrp,
bool srcorder,
const TargetInstrInfo *tii,
const TargetRegisterInfo *tri,
const TargetLowering *tli)
: RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
tii, tri, tli),
Picker(this) {}
bool isBottomUp() const override { return SF::IsBottomUp; }
bool isReady(SUnit *U) const override {
return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
}
SUnit *pop() override {
if (Queue.empty()) return nullptr;
SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
V->NodeQueueId = 0;
return V;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void dump(ScheduleDAG *DAG) const override {
// Emulate pop() without clobbering NodeQueueIds.
std::vector<SUnit*> DumpQueue = Queue;
SF DumpPicker = Picker;
while (!DumpQueue.empty()) {
SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
dbgs() << "Height " << SU->getHeight() << ": ";
SU->dump(DAG);
}
}
#endif
};
typedef RegReductionPriorityQueue<bu_ls_rr_sort>
BURegReductionPriorityQueue;
typedef RegReductionPriorityQueue<src_ls_rr_sort>
SrcRegReductionPriorityQueue;
typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
HybridBURRPriorityQueue;
typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
ILPBURRPriorityQueue;
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Static Node Priority for Register Pressure Reduction
//===----------------------------------------------------------------------===//
// Check for special nodes that bypass scheduling heuristics.
// Currently this pushes TokenFactor nodes down, but may be used for other
// pseudo-ops as well.
//
// Return -1 to schedule right above left, 1 for left above right.
// Return 0 if no bias exists.
static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
bool LSchedLow = left->isScheduleLow;
bool RSchedLow = right->isScheduleLow;
if (LSchedLow != RSchedLow)
return LSchedLow < RSchedLow ? 1 : -1;
return 0;
}
/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
/// Smaller number is the higher priority.
static unsigned
CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
if (SethiUllmanNumber != 0)
return SethiUllmanNumber;
unsigned Extra = 0;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue; // ignore chain preds
SUnit *PredSU = I->getSUnit();
unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
if (PredSethiUllman > SethiUllmanNumber) {
SethiUllmanNumber = PredSethiUllman;
Extra = 0;
} else if (PredSethiUllman == SethiUllmanNumber)
++Extra;
}
SethiUllmanNumber += Extra;
if (SethiUllmanNumber == 0)
SethiUllmanNumber = 1;
return SethiUllmanNumber;
}
/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
/// scheduling units.
void RegReductionPQBase::CalculateSethiUllmanNumbers() {
SethiUllmanNumbers.assign(SUnits->size(), 0);
for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
}
void RegReductionPQBase::addNode(const SUnit *SU) {
unsigned SUSize = SethiUllmanNumbers.size();
if (SUnits->size() > SUSize)
SethiUllmanNumbers.resize(SUSize*2, 0);
CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
}
void RegReductionPQBase::updateNode(const SUnit *SU) {
SethiUllmanNumbers[SU->NodeNum] = 0;
CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
}
// Lower priority means schedule further down. For bottom-up scheduling, lower
// priority SUs are scheduled before higher priority SUs.
unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
assert(SU->NodeNum < SethiUllmanNumbers.size());
unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
// CopyToReg should be close to its uses to facilitate coalescing and
// avoid spilling.
return 0;
if (Opc == TargetOpcode::EXTRACT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG ||
Opc == TargetOpcode::INSERT_SUBREG)
// EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
// close to their uses to facilitate coalescing.
return 0;
if (SU->NumSuccs == 0 && SU->NumPreds != 0)
// If SU does not have a register use, i.e. it doesn't produce a value
// that would be consumed (e.g. store), then it terminates a chain of
// computation. Give it a large SethiUllman number so it will be
// scheduled right before its predecessors that it doesn't lengthen
// their live ranges.
return 0xffff;
if (SU->NumPreds == 0 && SU->NumSuccs != 0)
// If SU does not have a register def, schedule it close to its uses
// because it does not lengthen any live ranges.
return 0;
#if 1
return SethiUllmanNumbers[SU->NodeNum];
#else
unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
if (SU->isCallOp) {
// FIXME: This assumes all of the defs are used as call operands.
int NP = (int)Priority - SU->getNode()->getNumValues();
return (NP > 0) ? NP : 0;
}
return Priority;
#endif
}
//===----------------------------------------------------------------------===//
// Register Pressure Tracking
//===----------------------------------------------------------------------===//
void RegReductionPQBase::dumpRegPressure() const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
E = TRI->regclass_end(); I != E; ++I) {
const TargetRegisterClass *RC = *I;
unsigned Id = RC->getID();
unsigned RP = RegPressure[Id];
if (!RP) continue;
DEBUG(dbgs() << TRI->getRegClassName(RC) << ": " << RP << " / "
<< RegLimit[Id] << '\n');
}
#endif
}
bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
if (!TLI)
return false;
for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
continue;
SUnit *PredSU = I->getSUnit();
// NumRegDefsLeft is zero when enough uses of this node have been scheduled
// to cover the number of registers defined (they are all live).
if (PredSU->NumRegDefsLeft == 0) {
continue;
}
for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
RegDefPos.IsValid(); RegDefPos.Advance()) {
unsigned RCId, Cost;
GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
return true;
}
}
return false;
}
bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
const SDNode *N = SU->getNode();
if (!N->isMachineOpcode() || !SU->NumSuccs)
return false;
unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
for (unsigned i = 0; i != NumDefs; ++i) {
MVT VT = N->getSimpleValueType(i);
if (!N->hasAnyUseOfValue(i))
continue;
unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
if (RegPressure[RCId] >= RegLimit[RCId])
return true;
}
return false;
}
// Compute the register pressure contribution by this instruction by count up
// for uses that are not live and down for defs. Only count register classes
// that are already under high pressure. As a side effect, compute the number of
// uses of registers that are already live.
//
// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
// so could probably be factored.
int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
LiveUses = 0;
int PDiff = 0;
for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
continue;
SUnit *PredSU = I->getSUnit();
// NumRegDefsLeft is zero when enough uses of this node have been scheduled
// to cover the number of registers defined (they are all live).
if (PredSU->NumRegDefsLeft == 0) {
if (PredSU->getNode()->isMachineOpcode())
++LiveUses;
continue;
}
for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
RegDefPos.IsValid(); RegDefPos.Advance()) {
MVT VT = RegDefPos.GetValue();
unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
if (RegPressure[RCId] >= RegLimit[RCId])
++PDiff;
}
}
const SDNode *N = SU->getNode();
if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
return PDiff;
unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
for (unsigned i = 0; i != NumDefs; ++i) {
MVT VT = N->getSimpleValueType(i);
if (!N->hasAnyUseOfValue(i))
continue;
unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
if (RegPressure[RCId] >= RegLimit[RCId])
--PDiff;
}
return PDiff;
}
void RegReductionPQBase::scheduledNode(SUnit *SU) {
if (!TracksRegPressure)
return;
if (!SU->getNode())
return;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
continue;
SUnit *PredSU = I->getSUnit();
// NumRegDefsLeft is zero when enough uses of this node have been scheduled
// to cover the number of registers defined (they are all live).
if (PredSU->NumRegDefsLeft == 0) {
continue;
}
// FIXME: The ScheduleDAG currently loses information about which of a
// node's values is consumed by each dependence. Consequently, if the node
// defines multiple register classes, we don't know which to pressurize
// here. Instead the following loop consumes the register defs in an
// arbitrary order. At least it handles the common case of clustered loads
// to the same class. For precise liveness, each SDep needs to indicate the
// result number. But that tightly couples the ScheduleDAG with the
// SelectionDAG making updates tricky. A simpler hack would be to attach a
// value type or register class to SDep.
//
// The most important aspect of register tracking is balancing the increase
// here with the reduction further below. Note that this SU may use multiple
// defs in PredSU. The can't be determined here, but we've already
// compensated by reducing NumRegDefsLeft in PredSU during
// ScheduleDAGSDNodes::AddSchedEdges.
--PredSU->NumRegDefsLeft;
unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
if (SkipRegDefs)
continue;
unsigned RCId, Cost;
GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
RegPressure[RCId] += Cost;
break;
}
}
// We should have this assert, but there may be dead SDNodes that never
// materialize as SUnits, so they don't appear to generate liveness.
//assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
int SkipRegDefs = (int)SU->NumRegDefsLeft;
for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
if (SkipRegDefs > 0)
continue;
unsigned RCId, Cost;
GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
if (RegPressure[RCId] < Cost) {
// Register pressure tracking is imprecise. This can happen. But we try
// hard not to let it happen because it likely results in poor scheduling.
DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
RegPressure[RCId] = 0;
}
else {
RegPressure[RCId] -= Cost;
}
}
dumpRegPressure();
}
void RegReductionPQBase::unscheduledNode(SUnit *SU) {
if (!TracksRegPressure)
return;
const SDNode *N = SU->getNode();
if (!N) return;
if (!N->isMachineOpcode()) {
if (N->getOpcode() != ISD::CopyToReg)
return;
} else {
unsigned Opc = N->getMachineOpcode();
if (Opc == TargetOpcode::EXTRACT_SUBREG ||
Opc == TargetOpcode::INSERT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG ||
Opc == TargetOpcode::REG_SEQUENCE ||
Opc == TargetOpcode::IMPLICIT_DEF)
return;
}
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
continue;
SUnit *PredSU = I->getSUnit();
// NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
// counts data deps.
if (PredSU->NumSuccsLeft != PredSU->Succs.size())
continue;
const SDNode *PN = PredSU->getNode();
if (!PN->isMachineOpcode()) {
if (PN->getOpcode() == ISD::CopyFromReg) {
MVT VT = PN->getSimpleValueType(0);
unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
}
continue;
}
unsigned POpc = PN->getMachineOpcode();
if (POpc == TargetOpcode::IMPLICIT_DEF)
continue;
if (POpc == TargetOpcode::EXTRACT_SUBREG ||
POpc == TargetOpcode::INSERT_SUBREG ||
POpc == TargetOpcode::SUBREG_TO_REG) {
MVT VT = PN->getSimpleValueType(0);
unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
continue;
}
unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
for (unsigned i = 0; i != NumDefs; ++i) {
MVT VT = PN->getSimpleValueType(i);
if (!PN->hasAnyUseOfValue(i))
continue;
unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
// Register pressure tracking is imprecise. This can happen.
RegPressure[RCId] = 0;
else
RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
}
}
// Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
// may transfer data dependencies to CopyToReg.
if (SU->NumSuccs && N->isMachineOpcode()) {
unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
MVT VT = N->getSimpleValueType(i);
if (VT == MVT::Glue || VT == MVT::Other)
continue;
if (!N->hasAnyUseOfValue(i))
continue;
unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
}
}
dumpRegPressure();
}
//===----------------------------------------------------------------------===//
// Dynamic Node Priority for Register Pressure Reduction
//===----------------------------------------------------------------------===//
/// closestSucc - Returns the scheduled cycle of the successor which is
/// closest to the current cycle.
static unsigned closestSucc(const SUnit *SU) {
unsigned MaxHeight = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl()) continue; // ignore chain succs
unsigned Height = I->getSUnit()->getHeight();
// If there are bunch of CopyToRegs stacked up, they should be considered
// to be at the same position.
if (I->getSUnit()->getNode() &&
I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
Height = closestSucc(I->getSUnit())+1;
if (Height > MaxHeight)
MaxHeight = Height;
}
return MaxHeight;
}
/// calcMaxScratches - Returns an cost estimate of the worse case requirement
/// for scratch registers, i.e. number of data dependencies.
static unsigned calcMaxScratches(const SUnit *SU) {
unsigned Scratches = 0;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue; // ignore chain preds
Scratches++;
}
return Scratches;
}
/// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
/// CopyFromReg from a virtual register.
static bool hasOnlyLiveInOpers(const SUnit *SU) {
bool RetVal = false;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue;
const SUnit *PredSU = I->getSUnit();
if (PredSU->getNode() &&
PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
unsigned Reg =
cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
RetVal = true;
continue;
}
}
return false;
}
return RetVal;
}
/// hasOnlyLiveOutUses - Return true if SU has only value successors that are
/// CopyToReg to a virtual register. This SU def is probably a liveout and
/// it has no other use. It should be scheduled closer to the terminator.
static bool hasOnlyLiveOutUses(const SUnit *SU) {
bool RetVal = false;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl()) continue;
const SUnit *SuccSU = I->getSUnit();
if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
unsigned Reg =
cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
RetVal = true;
continue;
}
}
return false;
}
return RetVal;
}
// Set isVRegCycle for a node with only live in opers and live out uses. Also
// set isVRegCycle for its CopyFromReg operands.
//
// This is only relevant for single-block loops, in which case the VRegCycle
// node is likely an induction variable in which the operand and target virtual
// registers should be coalesced (e.g. pre/post increment values). Setting the
// isVRegCycle flag helps the scheduler prioritize other uses of the same
// CopyFromReg so that this node becomes the virtual register "kill". This
// avoids interference between the values live in and out of the block and
// eliminates a copy inside the loop.
static void initVRegCycle(SUnit *SU) {
if (DisableSchedVRegCycle)
return;
if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
return;
DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
SU->isVRegCycle = true;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue;
I->getSUnit()->isVRegCycle = true;
}
}
// After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
// CopyFromReg operands. We should no longer penalize other uses of this VReg.
static void resetVRegCycle(SUnit *SU) {
if (!SU->isVRegCycle)
return;
for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue; // ignore chain preds
SUnit *PredSU = I->getSUnit();
if (PredSU->isVRegCycle) {
assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
"VRegCycle def must be CopyFromReg");
I->getSUnit()->isVRegCycle = 0;
}
}
}
// Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
// means a node that defines the VRegCycle has not been scheduled yet.
static bool hasVRegCycleUse(const SUnit *SU) {
// If this SU also defines the VReg, don't hoist it as a "use".
if (SU->isVRegCycle)
return false;
for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl()) continue; // ignore chain preds
if (I->getSUnit()->isVRegCycle &&
I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
return true;
}
}
return false;
}
// Check for either a dependence (latency) or resource (hazard) stall.
//
// Note: The ScheduleHazardRecognizer interface requires a non-const SU.
static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
if ((int)SPQ->getCurCycle() < Height) return true;
if (SPQ->getHazardRec()->getHazardType(SU, 0)
!= ScheduleHazardRecognizer::NoHazard)
return true;
return false;
}
// Return -1 if left has higher priority, 1 if right has higher priority.
// Return 0 if latency-based priority is equivalent.
static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
RegReductionPQBase *SPQ) {
// Scheduling an instruction that uses a VReg whose postincrement has not yet
// been scheduled will induce a copy. Model this as an extra cycle of latency.
int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
int LHeight = (int)left->getHeight() + LPenalty;
int RHeight = (int)right->getHeight() + RPenalty;
bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
BUHasStall(left, LHeight, SPQ);
bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
BUHasStall(right, RHeight, SPQ);
// If scheduling one of the node will cause a pipeline stall, delay it.
// If scheduling either one of the node will cause a pipeline stall, sort
// them according to their height.
if (LStall) {
if (!RStall)
return 1;
if (LHeight != RHeight)
return LHeight > RHeight ? 1 : -1;
} else if (RStall)
return -1;
// If either node is scheduling for latency, sort them by height/depth
// and latency.
if (!checkPref || (left->SchedulingPref == Sched::ILP ||
right->SchedulingPref == Sched::ILP)) {
// If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
// is enabled, grouping instructions by cycle, then its height is already
// covered so only its depth matters. We also reach this point if both stall
// but have the same height.
if (!SPQ->getHazardRec()->isEnabled()) {
if (LHeight != RHeight)
return LHeight > RHeight ? 1 : -1;
}
int LDepth = left->getDepth() - LPenalty;
int RDepth = right->getDepth() - RPenalty;
if (LDepth != RDepth) {
DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
<< ") depth " << LDepth << " vs SU (" << right->NodeNum
<< ") depth " << RDepth << "\n");
return LDepth < RDepth ? 1 : -1;
}
if (left->Latency != right->Latency)
return left->Latency > right->Latency ? 1 : -1;
}
return 0;
}
static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
// Schedule physical register definitions close to their use. This is
// motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
// long as shortening physreg live ranges is generally good, we can defer
// creating a subtarget hook.
if (!DisableSchedPhysRegJoin) {
bool LHasPhysReg = left->hasPhysRegDefs;
bool RHasPhysReg = right->hasPhysRegDefs;
if (LHasPhysReg != RHasPhysReg) {
#ifndef NDEBUG
static const char *const PhysRegMsg[] = { " has no physreg",
" defines a physreg" };
#endif
DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
<< PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
<< PhysRegMsg[RHasPhysReg] << "\n");
return LHasPhysReg < RHasPhysReg;
}
}
// Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
unsigned LPriority = SPQ->getNodePriority(left);
unsigned RPriority = SPQ->getNodePriority(right);
// Be really careful about hoisting call operands above previous calls.
// Only allows it if it would reduce register pressure.
if (left->isCall && right->isCallOp) {
unsigned RNumVals = right->getNode()->getNumValues();
RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
}
if (right->isCall && left->isCallOp) {
unsigned LNumVals = left->getNode()->getNumValues();
LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
}
if (LPriority != RPriority)
return LPriority > RPriority;
// One or both of the nodes are calls and their sethi-ullman numbers are the
// same, then keep source order.
if (left->isCall || right->isCall) {
unsigned LOrder = SPQ->getNodeOrdering(left);
unsigned ROrder = SPQ->getNodeOrdering(right);
// Prefer an ordering where the lower the non-zero order number, the higher
// the preference.
if ((LOrder || ROrder) && LOrder != ROrder)
return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
}
// Try schedule def + use closer when Sethi-Ullman numbers are the same.
// e.g.
// t1 = op t2, c1
// t3 = op t4, c2
//
// and the following instructions are both ready.
// t2 = op c3
// t4 = op c4
//
// Then schedule t2 = op first.
// i.e.
// t4 = op c4
// t2 = op c3
// t1 = op t2, c1
// t3 = op t4, c2
//
// This creates more short live intervals.
unsigned LDist = closestSucc(left);
unsigned RDist = closestSucc(right);
if (LDist != RDist)
return LDist < RDist;
// How many registers becomes live when the node is scheduled.
unsigned LScratch = calcMaxScratches(left);
unsigned RScratch = calcMaxScratches(right);
if (LScratch != RScratch)
return LScratch > RScratch;
// Comparing latency against a call makes little sense unless the node
// is register pressure-neutral.
if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
return (left->NodeQueueId > right->NodeQueueId);
// Do not compare latencies when one or both of the nodes are calls.
if (!DisableSchedCycles &&
!(left->isCall || right->isCall)) {
int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
if (result != 0)
return result > 0;
}
else {
if (left->getHeight() != right->getHeight())
return left->getHeight() > right->getHeight();
if (left->getDepth() != right->getDepth())
return left->getDepth() < right->getDepth();
}
assert(left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero");
return (left->NodeQueueId > right->NodeQueueId);
}
// Bottom up
bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
if (int res = checkSpecialNodes(left, right))
return res > 0;
return BURRSort(left, right, SPQ);
}
// Source order, otherwise bottom up.
bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
if (int res = checkSpecialNodes(left, right))
return res > 0;
unsigned LOrder = SPQ->getNodeOrdering(left);
unsigned ROrder = SPQ->getNodeOrdering(right);
// Prefer an ordering where the lower the non-zero order number, the higher
// the preference.
if ((LOrder || ROrder) && LOrder != ROrder)
return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
return BURRSort(left, right, SPQ);
}
// If the time between now and when the instruction will be ready can cover
// the spill code, then avoid adding it to the ready queue. This gives long
// stalls highest priority and allows hoisting across calls. It should also
// speed up processing the available queue.
bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
static const unsigned ReadyDelay = 3;
if (SPQ->MayReduceRegPressure(SU)) return true;
if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
!= ScheduleHazardRecognizer::NoHazard)
return false;
return true;
}
// Return true if right should be scheduled with higher priority than left.
bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
if (int res = checkSpecialNodes(left, right))
return res > 0;
if (left->isCall || right->isCall)
// No way to compute latency of calls.
return BURRSort(left, right, SPQ);
bool LHigh = SPQ->HighRegPressure(left);
bool RHigh = SPQ->HighRegPressure(right);
// Avoid causing spills. If register pressure is high, schedule for
// register pressure reduction.
if (LHigh && !RHigh) {
DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
<< right->NodeNum << ")\n");
return true;
}
else if (!LHigh && RHigh) {
DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
<< left->NodeNum << ")\n");
return false;
}
if (!LHigh && !RHigh) {
int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
if (result != 0)
return result > 0;
}
return BURRSort(left, right, SPQ);
}
// Schedule as many instructions in each cycle as possible. So don't make an
// instruction available unless it is ready in the current cycle.
bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
if (SU->getHeight() > CurCycle) return false;
if (SPQ->getHazardRec()->getHazardType(SU, 0)
!= ScheduleHazardRecognizer::NoHazard)
return false;
return true;
}
static bool canEnableCoalescing(SUnit *SU) {
unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
// CopyToReg should be close to its uses to facilitate coalescing and
// avoid spilling.
return true;
if (Opc == TargetOpcode::EXTRACT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG ||
Opc == TargetOpcode::INSERT_SUBREG)
// EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
// close to their uses to facilitate coalescing.
return true;
if (SU->NumPreds == 0 && SU->NumSuccs != 0)
// If SU does not have a register def, schedule it close to its uses
// because it does not lengthen any live ranges.
return true;
return false;
}
// list-ilp is currently an experimental scheduler that allows various
// heuristics to be enabled prior to the normal register reduction logic.
bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
if (int res = checkSpecialNodes(left, right))
return res > 0;
if (left->isCall || right->isCall)
// No way to compute latency of calls.
return BURRSort(left, right, SPQ);
unsigned LLiveUses = 0, RLiveUses = 0;
int LPDiff = 0, RPDiff = 0;
if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
}
if (!DisableSchedRegPressure && LPDiff != RPDiff) {
DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
<< " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
return LPDiff > RPDiff;
}
if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
bool LReduce = canEnableCoalescing(left);
bool RReduce = canEnableCoalescing(right);
if (LReduce && !RReduce) return false;
if (RReduce && !LReduce) return true;
}
if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
<< " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
return LLiveUses < RLiveUses;
}
if (!DisableSchedStalls) {
bool LStall = BUHasStall(left, left->getHeight(), SPQ);
bool RStall = BUHasStall(right, right->getHeight(), SPQ);
if (LStall != RStall)
return left->getHeight() > right->getHeight();
}
if (!DisableSchedCriticalPath) {
int spread = (int)left->getDepth() - (int)right->getDepth();
if (std::abs(spread) > MaxReorderWindow) {
DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
<< left->getDepth() << " != SU(" << right->NodeNum << "): "
<< right->getDepth() << "\n");
return left->getDepth() < right->getDepth();
}
}
if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
int spread = (int)left->getHeight() - (int)right->getHeight();
if (std::abs(spread) > MaxReorderWindow)
return left->getHeight() > right->getHeight();
}
return BURRSort(left, right, SPQ);
}
void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
SUnits = &sunits;
// Add pseudo dependency edges for two-address nodes.
if (!Disable2AddrHack)
AddPseudoTwoAddrDeps();
// Reroute edges to nodes with multiple uses.
if (!TracksRegPressure && !SrcOrder)
PrescheduleNodesWithMultipleUses();
// Calculate node priorities.
CalculateSethiUllmanNumbers();
// For single block loops, mark nodes that look like canonical IV increments.
if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
initVRegCycle(&sunits[i]);
}
}
}
//===----------------------------------------------------------------------===//
// Preschedule for Register Pressure
//===----------------------------------------------------------------------===//
bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
if (SU->isTwoAddress) {
unsigned Opc = SU->getNode()->getMachineOpcode();
const MCInstrDesc &MCID = TII->get(Opc);
unsigned NumRes = MCID.getNumDefs();
unsigned NumOps = MCID.getNumOperands() - NumRes;
for (unsigned i = 0; i != NumOps; ++i) {
if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
SDNode *DU = SU->getNode()->getOperand(i).getNode();
if (DU->getNodeId() != -1 &&
Op->OrigNode == &(*SUnits)[DU->getNodeId()])
return true;
}
}
}
return false;
}
/// canClobberReachingPhysRegUse - True if SU would clobber one of it's
/// successor's explicit physregs whose definition can reach DepSU.
/// i.e. DepSU should not be scheduled above SU.
static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
ScheduleDAGRRList *scheduleDAG,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
const uint16_t *ImpDefs
= TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
const uint32_t *RegMask = getNodeRegMask(SU->getNode());
if(!ImpDefs && !RegMask)
return false;
for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
SI != SE; ++SI) {
SUnit *SuccSU = SI->getSUnit();
for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
PE = SuccSU->Preds.end(); PI != PE; ++PI) {
if (!PI->isAssignedRegDep())
continue;
if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
return true;
if (ImpDefs)
for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
// Return true if SU clobbers this physical register use and the
// definition of the register reaches from DepSU. IsReachable queries
// a topological forward sort of the DAG (following the successors).
if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
return true;
}
}
return false;
}
/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
/// physical register defs.
static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
SDNode *N = SuccSU->getNode();
unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
assert(ImpDefs && "Caller should check hasPhysRegDefs");
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (!SUNode->isMachineOpcode())
continue;
const uint16_t *SUImpDefs =
TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
const uint32_t *SURegMask = getNodeRegMask(SUNode);
if (!SUImpDefs && !SURegMask)
continue;
for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
MVT VT = N->getSimpleValueType(i);
if (VT == MVT::Glue || VT == MVT::Other)
continue;
if (!N->hasAnyUseOfValue(i))
continue;
unsigned Reg = ImpDefs[i - NumDefs];
if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
return true;
if (!SUImpDefs)
continue;
for (;*SUImpDefs; ++SUImpDefs) {
unsigned SUReg = *SUImpDefs;
if (TRI->regsOverlap(Reg, SUReg))
return true;
}
}
}
return false;
}
/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
/// are not handled well by the general register pressure reduction
/// heuristics. When presented with code like this:
///
/// N
/// / |
/// / |
/// U store
/// |
/// ...
///
/// the heuristics tend to push the store up, but since the
/// operand of the store has another use (U), this would increase
/// the length of that other use (the U->N edge).
///
/// This function transforms code like the above to route U's
/// dependence through the store when possible, like this:
///
/// N
/// ||
/// ||
/// store
/// |
/// U
/// |
/// ...
///
/// This results in the store being scheduled immediately
/// after N, which shortens the U->N live range, reducing
/// register pressure.
///
void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
// Visit all the nodes in topological order, working top-down.
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
SUnit *SU = &(*SUnits)[i];
// For now, only look at nodes with no data successors, such as stores.
// These are especially important, due to the heuristics in
// getNodePriority for nodes with no data successors.
if (SU->NumSuccs != 0)
continue;
// For now, only look at nodes with exactly one data predecessor.
if (SU->NumPreds != 1)
continue;
// Avoid prescheduling copies to virtual registers, which don't behave
// like other nodes from the perspective of scheduling heuristics.
if (SDNode *N = SU->getNode())
if (N->getOpcode() == ISD::CopyToReg &&
TargetRegisterInfo::isVirtualRegister
(cast<RegisterSDNode>(N->getOperand(1))->getReg()))
continue;
// Locate the single data predecessor.
SUnit *PredSU = nullptr;
for (SUnit::const_pred_iterator II = SU->Preds.begin(),
EE = SU->Preds.end(); II != EE; ++II)
if (!II->isCtrl()) {
PredSU = II->getSUnit();
break;
}
assert(PredSU);
// Don't rewrite edges that carry physregs, because that requires additional
// support infrastructure.
if (PredSU->hasPhysRegDefs)
continue;
// Short-circuit the case where SU is PredSU's only data successor.
if (PredSU->NumSuccs == 1)
continue;
// Avoid prescheduling to copies from virtual registers, which don't behave
// like other nodes from the perspective of scheduling heuristics.
if (SDNode *N = SU->getNode())
if (N->getOpcode() == ISD::CopyFromReg &&
TargetRegisterInfo::isVirtualRegister
(cast<RegisterSDNode>(N->getOperand(1))->getReg()))
continue;
// Perform checks on the successors of PredSU.
for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
EE = PredSU->Succs.end(); II != EE; ++II) {
SUnit *PredSuccSU = II->getSUnit();
if (PredSuccSU == SU) continue;
// If PredSU has another successor with no data successors, for
// now don't attempt to choose either over the other.
if (PredSuccSU->NumSuccs == 0)
goto outer_loop_continue;
// Don't break physical register dependencies.
if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
goto outer_loop_continue;
// Don't introduce graph cycles.
if (scheduleDAG->IsReachable(SU, PredSuccSU))
goto outer_loop_continue;
}
// Ok, the transformation is safe and the heuristics suggest it is
// profitable. Update the graph.
DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
<< " next to PredSU #" << PredSU->NodeNum
<< " to guide scheduling in the presence of multiple uses\n");
for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
SDep Edge = PredSU->Succs[i];
assert(!Edge.isAssignedRegDep());
SUnit *SuccSU = Edge.getSUnit();
if (SuccSU != SU) {
Edge.setSUnit(PredSU);
scheduleDAG->RemovePred(SuccSU, Edge);
scheduleDAG->AddPred(SU, Edge);
Edge.setSUnit(SU);
scheduleDAG->AddPred(SuccSU, Edge);
--i;
}
}
outer_loop_continue:;
}
}
/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
/// it as a def&use operand. Add a pseudo control edge from it to the other
/// node (if it won't create a cycle) so the two-address one will be scheduled
/// first (lower in the schedule). If both nodes are two-address, favor the
/// one that has a CopyToReg use (more likely to be a loop induction update).
/// If both are two-address, but one is commutable while the other is not
/// commutable, favor the one that's not commutable.
void RegReductionPQBase::AddPseudoTwoAddrDeps() {
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
SUnit *SU = &(*SUnits)[i];
if (!SU->isTwoAddress)
continue;
SDNode *Node = SU->getNode();
if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
continue;
bool isLiveOut = hasOnlyLiveOutUses(SU);
unsigned Opc = Node->getMachineOpcode();
const MCInstrDesc &MCID = TII->get(Opc);
unsigned NumRes = MCID.getNumDefs();
unsigned NumOps = MCID.getNumOperands() - NumRes;
for (unsigned j = 0; j != NumOps; ++j) {
if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
continue;
SDNode *DU = SU->getNode()->getOperand(j).getNode();
if (DU->getNodeId() == -1)
continue;
const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
if (!DUSU) continue;
for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
E = DUSU->Succs.end(); I != E; ++I) {
if (I->isCtrl()) continue;
SUnit *SuccSU = I->getSUnit();
if (SuccSU == SU)
continue;
// Be conservative. Ignore if nodes aren't at roughly the same
// depth and height.
if (SuccSU->getHeight() < SU->getHeight() &&
(SU->getHeight() - SuccSU->getHeight()) > 1)
continue;
// Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
// constrains whatever is using the copy, instead of the copy
// itself. In the case that the copy is coalesced, this
// preserves the intent of the pseudo two-address heurietics.
while (SuccSU->Succs.size() == 1 &&
SuccSU->getNode()->isMachineOpcode() &&
SuccSU->getNode()->getMachineOpcode() ==
TargetOpcode::COPY_TO_REGCLASS)
SuccSU = SuccSU->Succs.front().getSUnit();
// Don't constrain non-instruction nodes.
if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
continue;
// Don't constrain nodes with physical register defs if the
// predecessor can clobber them.
if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
continue;
}
// Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
// these may be coalesced away. We want them close to their uses.
unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
SuccOpc == TargetOpcode::INSERT_SUBREG ||
SuccOpc == TargetOpcode::SUBREG_TO_REG)
continue;
if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
(!canClobber(SuccSU, DUSU) ||
(isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
(!SU->isCommutable && SuccSU->isCommutable)) &&
!scheduleDAG->IsReachable(SuccSU, SU)) {
DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
<< SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
}
}
}
}
}
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
llvm::ScheduleDAGSDNodes *
llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
const TargetInstrInfo *TII = STI.getInstrInfo();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
BURegReductionPriorityQueue *PQ =
new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
}
llvm::ScheduleDAGSDNodes *
llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
const TargetInstrInfo *TII = STI.getInstrInfo();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
SrcRegReductionPriorityQueue *PQ =
new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
}
llvm::ScheduleDAGSDNodes *
llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
const TargetInstrInfo *TII = STI.getInstrInfo();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
const TargetLowering *TLI = IS->TLI;
HybridBURRPriorityQueue *PQ =
new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
}
llvm::ScheduleDAGSDNodes *
llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
const TargetInstrInfo *TII = STI.getInstrInfo();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
const TargetLowering *TLI = IS->TLI;
ILPBURRPriorityQueue *PQ =
new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h | //===---- ScheduleDAGSDNodes.h - SDNode Scheduling --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the ScheduleDAGSDNodes class, which implements
// scheduling for an SDNode-based dependency graph.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SCHEDULEDAGSDNODES_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_SCHEDULEDAGSDNODES_H
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/ScheduleDAG.h"
namespace llvm {
/// ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
///
/// Edges between SUnits are initially based on edges in the SelectionDAG,
/// and additional edges can be added by the schedulers as heuristics.
/// SDNodes such as Constants, Registers, and a few others that are not
/// interesting to schedulers are not allocated SUnits.
///
/// SDNodes with MVT::Glue operands are grouped along with the flagged
/// nodes into a single SUnit so that they are scheduled together.
///
/// SDNode-based scheduling graphs do not use SDep::Anti or SDep::Output
/// edges. Physical register dependence information is not carried in
/// the DAG and must be handled explicitly by schedulers.
///
class ScheduleDAGSDNodes : public ScheduleDAG {
public:
MachineBasicBlock *BB;
SelectionDAG *DAG; // DAG of the current basic block
const InstrItineraryData *InstrItins;
/// The schedule. Null SUnit*'s represent noop instructions.
std::vector<SUnit*> Sequence;
explicit ScheduleDAGSDNodes(MachineFunction &mf);
~ScheduleDAGSDNodes() override {}
/// Run - perform scheduling.
///
void Run(SelectionDAG *dag, MachineBasicBlock *bb);
/// isPassiveNode - Return true if the node is a non-scheduled leaf.
///
static bool isPassiveNode(SDNode *Node) {
if (isa<ConstantSDNode>(Node)) return true;
if (isa<ConstantFPSDNode>(Node)) return true;
if (isa<RegisterSDNode>(Node)) return true;
if (isa<RegisterMaskSDNode>(Node)) return true;
if (isa<GlobalAddressSDNode>(Node)) return true;
if (isa<BasicBlockSDNode>(Node)) return true;
if (isa<FrameIndexSDNode>(Node)) return true;
if (isa<ConstantPoolSDNode>(Node)) return true;
if (isa<TargetIndexSDNode>(Node)) return true;
if (isa<JumpTableSDNode>(Node)) return true;
if (isa<ExternalSymbolSDNode>(Node)) return true;
if (isa<MCSymbolSDNode>(Node)) return true;
if (isa<BlockAddressSDNode>(Node)) return true;
if (Node->getOpcode() == ISD::EntryToken ||
isa<MDNodeSDNode>(Node)) return true;
return false;
}
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
SUnit *newSUnit(SDNode *N);
/// Clone - Creates a clone of the specified SUnit. It does not copy the
/// predecessors / successors info nor the temporary scheduling states.
///
SUnit *Clone(SUnit *N);
/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
void BuildSchedGraph(AliasAnalysis *AA);
/// InitVRegCycleFlag - Set isVRegCycle if this node's single use is
/// CopyToReg and its only active data operands are CopyFromReg within a
/// single block loop.
///
void InitVRegCycleFlag(SUnit *SU);
/// InitNumRegDefsLeft - Determine the # of regs defined by this node.
///
void InitNumRegDefsLeft(SUnit *SU);
/// computeLatency - Compute node latency.
///
virtual void computeLatency(SUnit *SU);
virtual void computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const;
/// Schedule - Order nodes according to selected style, filling
/// in the Sequence member.
///
virtual void Schedule() = 0;
/// VerifyScheduledSequence - Verify that all SUnits are scheduled and
/// consistent with the Sequence of scheduled instructions.
void VerifyScheduledSequence(bool isBottomUp);
/// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
/// according to the order specified in Sequence.
///
virtual MachineBasicBlock*
EmitSchedule(MachineBasicBlock::iterator &InsertPos);
void dumpNode(const SUnit *SU) const override;
void dumpSchedule() const;
std::string getGraphNodeLabel(const SUnit *SU) const override;
std::string getDAGName() const override;
virtual void getCustomGraphFeatures(GraphWriter<ScheduleDAG*> &GW) const;
/// RegDefIter - In place iteration over the values defined by an
/// SUnit. This does not need copies of the iterator or any other STLisms.
/// The iterator creates itself, rather than being provided by the SchedDAG.
class RegDefIter {
const ScheduleDAGSDNodes *SchedDAG;
const SDNode *Node;
unsigned DefIdx;
unsigned NodeNumDefs;
MVT ValueType;
public:
RegDefIter(const SUnit *SU, const ScheduleDAGSDNodes *SD);
bool IsValid() const { return Node != nullptr; }
MVT GetValue() const {
assert(IsValid() && "bad iterator");
return ValueType;
}
const SDNode *GetNode() const {
return Node;
}
unsigned GetIdx() const {
return DefIdx-1;
}
void Advance();
private:
void InitNodeNumDefs();
};
protected:
/// ForceUnitLatencies - Return true if all scheduling edges should be given
/// a latency value of one. The default is to return false; schedulers may
/// override this as needed.
virtual bool forceUnitLatencies() const { return false; }
private:
/// ClusterNeighboringLoads - Cluster loads from "near" addresses into
/// combined SUnits.
void ClusterNeighboringLoads(SDNode *Node);
/// ClusterNodes - Cluster certain nodes which should be scheduled together.
///
void ClusterNodes();
/// BuildSchedUnits, AddSchedEdges - Helper functions for BuildSchedGraph.
void BuildSchedUnits();
void AddSchedEdges();
void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
MachineBasicBlock::iterator InsertPos);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h | //===-- llvm/CodeGen/SDNodeDbgValue.h - SelectionDAG dbg_value --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the SDDbgValue class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SDNODEDBGVALUE_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_SDNODEDBGVALUE_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
class MDNode;
class SDNode;
class Value;
/// SDDbgValue - Holds the information from a dbg_value node through SDISel.
/// We do not use SDValue here to avoid including its header.
class SDDbgValue {
public:
enum DbgValueKind {
SDNODE = 0, // value is the result of an expression
CONST = 1, // value is a constant
FRAMEIX = 2 // value is contents of a stack location
};
private:
union {
struct {
SDNode *Node; // valid for expressions
unsigned ResNo; // valid for expressions
} s;
const Value *Const; // valid for constants
unsigned FrameIx; // valid for stack objects
} u;
MDNode *Var;
MDNode *Expr;
uint64_t Offset;
DebugLoc DL;
unsigned Order;
enum DbgValueKind kind;
bool IsIndirect;
bool Invalid = false;
public:
// Constructor for non-constants.
SDDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, unsigned R, bool indir,
uint64_t off, DebugLoc dl, unsigned O)
: Var(Var), Expr(Expr), Offset(off), DL(dl), Order(O), IsIndirect(indir) {
kind = SDNODE;
u.s.Node = N;
u.s.ResNo = R;
}
// Constructor for constants.
SDDbgValue(MDNode *Var, MDNode *Expr, const Value *C, uint64_t off,
DebugLoc dl, unsigned O)
: Var(Var), Expr(Expr), Offset(off), DL(dl), Order(O), IsIndirect(false) {
kind = CONST;
u.Const = C;
}
// Constructor for frame indices.
SDDbgValue(MDNode *Var, MDNode *Expr, unsigned FI, uint64_t off, DebugLoc dl,
unsigned O)
: Var(Var), Expr(Expr), Offset(off), DL(dl), Order(O), IsIndirect(false) {
kind = FRAMEIX;
u.FrameIx = FI;
}
// Returns the kind.
DbgValueKind getKind() const { return kind; }
// Returns the MDNode pointer for the variable.
MDNode *getVariable() const { return Var; }
// Returns the MDNode pointer for the expression.
MDNode *getExpression() const { return Expr; }
// Returns the SDNode* for a register ref
SDNode *getSDNode() const { assert (kind==SDNODE); return u.s.Node; }
// Returns the ResNo for a register ref
unsigned getResNo() const { assert (kind==SDNODE); return u.s.ResNo; }
// Returns the Value* for a constant
const Value *getConst() const { assert (kind==CONST); return u.Const; }
// Returns the FrameIx for a stack object
unsigned getFrameIx() const { assert (kind==FRAMEIX); return u.FrameIx; }
// Returns whether this is an indirect value.
bool isIndirect() const { return IsIndirect; }
// Returns the offset.
uint64_t getOffset() const { return Offset; }
// Returns the DebugLoc.
DebugLoc getDebugLoc() const { return DL; }
// Returns the SDNodeOrder. This is the order of the preceding node in the
// input.
unsigned getOrder() const { return Order; }
// setIsInvalidated / isInvalidated - Setter / getter of the "Invalidated"
// property. A SDDbgValue is invalid if the SDNode that produces the value is
// deleted.
void setIsInvalidated() { Invalid = true; }
bool isInvalidated() const { return Invalid; }
};
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/MIRParser/MILexer.h | //===- MILexer.h - Lexer for machine instructions -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the function that lexes the machine instruction source
// string.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_MIRPARSER_MILEXER_H
#define LLVM_LIB_CODEGEN_MIRPARSER_MILEXER_H
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/STLExtras.h"
#include <functional>
namespace llvm {
class Twine;
/// A token produced by the machine instruction lexer.
struct MIToken {
enum TokenKind {
// Markers
Eof,
Error,
// Tokens with no info.
comma,
equal,
underscore,
colon,
// Keywords
kw_implicit,
kw_implicit_define,
kw_dead,
kw_killed,
kw_undef,
// Identifier tokens
Identifier,
NamedRegister,
MachineBasicBlock,
NamedGlobalValue,
GlobalValue,
// Other tokens
IntegerLiteral,
VirtualRegister
};
private:
TokenKind Kind;
unsigned StringOffset;
StringRef Range;
APSInt IntVal;
public:
MIToken(TokenKind Kind, StringRef Range, unsigned StringOffset = 0)
: Kind(Kind), StringOffset(StringOffset), Range(Range) {}
MIToken(TokenKind Kind, StringRef Range, const APSInt &IntVal,
unsigned StringOffset = 0)
: Kind(Kind), StringOffset(StringOffset), Range(Range), IntVal(IntVal) {}
TokenKind kind() const { return Kind; }
bool isError() const { return Kind == Error; }
bool isRegister() const {
return Kind == NamedRegister || Kind == underscore ||
Kind == VirtualRegister;
}
bool isRegisterFlag() const {
return Kind == kw_implicit || Kind == kw_implicit_define ||
Kind == kw_dead || Kind == kw_killed || Kind == kw_undef;
}
bool is(TokenKind K) const { return Kind == K; }
bool isNot(TokenKind K) const { return Kind != K; }
StringRef::iterator location() const { return Range.begin(); }
StringRef stringValue() const { return Range.drop_front(StringOffset); }
const APSInt &integerValue() const { return IntVal; }
bool hasIntegerValue() const {
return Kind == IntegerLiteral || Kind == MachineBasicBlock ||
Kind == GlobalValue || Kind == VirtualRegister;
}
};
/// Consume a single machine instruction token in the given source and return
/// the remaining source string.
StringRef lexMIToken(
StringRef Source, MIToken &Token,
function_ref<void(StringRef::iterator, const Twine &)> ErrorCallback);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/MIRParser/CMakeLists.txt | add_llvm_library(LLVMMIRParser
MILexer.cpp
MIParser.cpp
MIRParser.cpp
)
add_dependencies(LLVMMIRParser intrinsics_gen)
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/MIRParser/MIRParser.cpp | //===- MIRParser.cpp - MIR serialization format parser implementation -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the class that parses the optional LLVM IR and machine
// functions that are stored in MIR files.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MIRParser/MIRParser.h"
#include "MIParser.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/AsmParser/SlotMapping.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/YAMLTraits.h"
#include <memory>
using namespace llvm;
namespace llvm {
/// This class implements the parsing of LLVM IR that's embedded inside a MIR
/// file.
class MIRParserImpl {
SourceMgr SM;
StringRef Filename;
LLVMContext &Context;
StringMap<std::unique_ptr<yaml::MachineFunction>> Functions;
SlotMapping IRSlots;
/// Maps from register class names to register classes.
StringMap<const TargetRegisterClass *> Names2RegClasses;
public:
MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents, StringRef Filename,
LLVMContext &Context);
void reportDiagnostic(const SMDiagnostic &Diag);
/// Report an error with the given message at unknown location.
///
/// Always returns true.
bool error(const Twine &Message);
/// Report an error with the given message at the given location.
///
/// Always returns true.
bool error(SMLoc Loc, const Twine &Message);
/// Report a given error with the location translated from the location in an
/// embedded string literal to a location in the MIR file.
///
/// Always returns true.
bool error(const SMDiagnostic &Error, SMRange SourceRange);
/// Try to parse the optional LLVM module and the machine functions in the MIR
/// file.
///
/// Return null if an error occurred.
std::unique_ptr<Module> parse();
/// Parse the machine function in the current YAML document.
///
/// \param NoLLVMIR - set to true when the MIR file doesn't have LLVM IR.
/// A dummy IR function is created and inserted into the given module when
/// this parameter is true.
///
/// Return true if an error occurred.
bool parseMachineFunction(yaml::Input &In, Module &M, bool NoLLVMIR);
/// Initialize the machine function to the state that's described in the MIR
/// file.
///
/// Return true if error occurred.
bool initializeMachineFunction(MachineFunction &MF);
/// Initialize the machine basic block using it's YAML representation.
///
/// Return true if an error occurred.
bool initializeMachineBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
const yaml::MachineBasicBlock &YamlMBB,
const PerFunctionMIParsingState &PFS);
bool
initializeRegisterInfo(const MachineFunction &MF,
MachineRegisterInfo &RegInfo,
const yaml::MachineFunction &YamlMF,
DenseMap<unsigned, unsigned> &VirtualRegisterSlots);
bool initializeFrameInfo(MachineFrameInfo &MFI,
const yaml::MachineFunction &YamlMF);
private:
/// Return a MIR diagnostic converted from an MI string diagnostic.
SMDiagnostic diagFromMIStringDiag(const SMDiagnostic &Error,
SMRange SourceRange);
/// Return a MIR diagnostic converted from an LLVM assembly diagnostic.
SMDiagnostic diagFromLLVMAssemblyDiag(const SMDiagnostic &Error,
SMRange SourceRange);
/// Create an empty function with the given name.
void createDummyFunction(StringRef Name, Module &M);
void initNames2RegClasses(const MachineFunction &MF);
/// Check if the given identifier is a name of a register class.
///
/// Return null if the name isn't a register class.
const TargetRegisterClass *getRegClass(const MachineFunction &MF,
StringRef Name);
};
} // end namespace llvm
MIRParserImpl::MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents,
StringRef Filename, LLVMContext &Context)
: SM(), Filename(Filename), Context(Context) {
SM.AddNewSourceBuffer(std::move(Contents), SMLoc());
}
bool MIRParserImpl::error(const Twine &Message) {
Context.diagnose(DiagnosticInfoMIRParser(
DS_Error, SMDiagnostic(Filename, SourceMgr::DK_Error, Message.str())));
return true;
}
bool MIRParserImpl::error(SMLoc Loc, const Twine &Message) {
Context.diagnose(DiagnosticInfoMIRParser(
DS_Error, SM.GetMessage(Loc, SourceMgr::DK_Error, Message)));
return true;
}
bool MIRParserImpl::error(const SMDiagnostic &Error, SMRange SourceRange) {
assert(Error.getKind() == SourceMgr::DK_Error && "Expected an error");
reportDiagnostic(diagFromMIStringDiag(Error, SourceRange));
return true;
}
void MIRParserImpl::reportDiagnostic(const SMDiagnostic &Diag) {
DiagnosticSeverity Kind;
switch (Diag.getKind()) {
case SourceMgr::DK_Error:
Kind = DS_Error;
break;
case SourceMgr::DK_Warning:
Kind = DS_Warning;
break;
case SourceMgr::DK_Note:
Kind = DS_Note;
break;
}
Context.diagnose(DiagnosticInfoMIRParser(Kind, Diag));
}
static void handleYAMLDiag(const SMDiagnostic &Diag, void *Context) {
reinterpret_cast<MIRParserImpl *>(Context)->reportDiagnostic(Diag);
}
std::unique_ptr<Module> MIRParserImpl::parse() {
yaml::Input In(SM.getMemoryBuffer(SM.getMainFileID())->getBuffer(),
/*Ctxt=*/nullptr, handleYAMLDiag, this);
In.setContext(&In);
if (!In.setCurrentDocument()) {
if (In.error())
return nullptr;
// Create an empty module when the MIR file is empty.
return llvm::make_unique<Module>(Filename, Context);
}
std::unique_ptr<Module> M;
bool NoLLVMIR = false;
// Parse the block scalar manually so that we can return unique pointer
// without having to go trough YAML traits.
if (const auto *BSN =
dyn_cast_or_null<yaml::BlockScalarNode>(In.getCurrentNode())) {
SMDiagnostic Error;
M = parseAssembly(MemoryBufferRef(BSN->getValue(), Filename), Error,
Context, &IRSlots);
if (!M) {
reportDiagnostic(diagFromLLVMAssemblyDiag(Error, BSN->getSourceRange()));
return M;
}
In.nextDocument();
if (!In.setCurrentDocument())
return M;
} else {
// Create an new, empty module.
M = llvm::make_unique<Module>(Filename, Context);
NoLLVMIR = true;
}
// Parse the machine functions.
do {
if (parseMachineFunction(In, *M, NoLLVMIR))
return nullptr;
In.nextDocument();
} while (In.setCurrentDocument());
return M;
}
bool MIRParserImpl::parseMachineFunction(yaml::Input &In, Module &M,
bool NoLLVMIR) {
auto MF = llvm::make_unique<yaml::MachineFunction>();
yaml::yamlize(In, *MF, false);
if (In.error())
return true;
auto FunctionName = MF->Name;
if (Functions.find(FunctionName) != Functions.end())
return error(Twine("redefinition of machine function '") + FunctionName +
"'");
Functions.insert(std::make_pair(FunctionName, std::move(MF)));
if (NoLLVMIR)
createDummyFunction(FunctionName, M);
else if (!M.getFunction(FunctionName))
return error(Twine("function '") + FunctionName +
"' isn't defined in the provided LLVM IR");
return false;
}
void MIRParserImpl::createDummyFunction(StringRef Name, Module &M) {
auto &Context = M.getContext();
Function *F = cast<Function>(M.getOrInsertFunction(
Name, FunctionType::get(Type::getVoidTy(Context), false)));
BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
new UnreachableInst(Context, BB);
}
bool MIRParserImpl::initializeMachineFunction(MachineFunction &MF) {
auto It = Functions.find(MF.getName());
if (It == Functions.end())
return error(Twine("no machine function information for function '") +
MF.getName() + "' in the MIR file");
// TODO: Recreate the machine function.
const yaml::MachineFunction &YamlMF = *It->getValue();
if (YamlMF.Alignment)
MF.setAlignment(YamlMF.Alignment);
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
MF.setHasInlineAsm(YamlMF.HasInlineAsm);
PerFunctionMIParsingState PFS;
if (initializeRegisterInfo(MF, MF.getRegInfo(), YamlMF,
PFS.VirtualRegisterSlots))
return true;
if (initializeFrameInfo(*MF.getFrameInfo(), YamlMF))
return true;
const auto &F = *MF.getFunction();
for (const auto &YamlMBB : YamlMF.BasicBlocks) {
const BasicBlock *BB = nullptr;
const yaml::StringValue &Name = YamlMBB.Name;
if (!Name.Value.empty()) {
BB = dyn_cast_or_null<BasicBlock>(
F.getValueSymbolTable().lookup(Name.Value));
if (!BB)
return error(Name.SourceRange.Start,
Twine("basic block '") + Name.Value +
"' is not defined in the function '" + MF.getName() +
"'");
}
auto *MBB = MF.CreateMachineBasicBlock(BB);
MF.insert(MF.end(), MBB);
bool WasInserted =
PFS.MBBSlots.insert(std::make_pair(YamlMBB.ID, MBB)).second;
if (!WasInserted)
return error(Twine("redefinition of machine basic block with id #") +
Twine(YamlMBB.ID));
}
if (YamlMF.BasicBlocks.empty())
return error(Twine("machine function '") + Twine(MF.getName()) +
"' requires at least one machine basic block in its body");
// Initialize the machine basic blocks after creating them all so that the
// machine instructions parser can resolve the MBB references.
unsigned I = 0;
for (const auto &YamlMBB : YamlMF.BasicBlocks) {
if (initializeMachineBasicBlock(MF, *MF.getBlockNumbered(I++), YamlMBB,
PFS))
return true;
}
return false;
}
bool MIRParserImpl::initializeMachineBasicBlock(
MachineFunction &MF, MachineBasicBlock &MBB,
const yaml::MachineBasicBlock &YamlMBB,
const PerFunctionMIParsingState &PFS) {
MBB.setAlignment(YamlMBB.Alignment);
if (YamlMBB.AddressTaken)
MBB.setHasAddressTaken();
MBB.setIsLandingPad(YamlMBB.IsLandingPad);
SMDiagnostic Error;
// Parse the successors.
for (const auto &MBBSource : YamlMBB.Successors) {
MachineBasicBlock *SuccMBB = nullptr;
if (parseMBBReference(SuccMBB, SM, MF, MBBSource.Value, PFS, IRSlots,
Error))
return error(Error, MBBSource.SourceRange);
// TODO: Report an error when adding the same successor more than once.
MBB.addSuccessor(SuccMBB);
}
// Parse the liveins.
for (const auto &LiveInSource : YamlMBB.LiveIns) {
unsigned Reg = 0;
if (parseNamedRegisterReference(Reg, SM, MF, LiveInSource.Value, PFS,
IRSlots, Error))
return error(Error, LiveInSource.SourceRange);
MBB.addLiveIn(Reg);
}
// Parse the instructions.
for (const auto &MISource : YamlMBB.Instructions) {
MachineInstr *MI = nullptr;
if (parseMachineInstr(MI, SM, MF, MISource.Value, PFS, IRSlots, Error))
return error(Error, MISource.SourceRange);
MBB.insert(MBB.end(), MI);
}
return false;
}
bool MIRParserImpl::initializeRegisterInfo(
const MachineFunction &MF, MachineRegisterInfo &RegInfo,
const yaml::MachineFunction &YamlMF,
DenseMap<unsigned, unsigned> &VirtualRegisterSlots) {
assert(RegInfo.isSSA());
if (!YamlMF.IsSSA)
RegInfo.leaveSSA();
assert(RegInfo.tracksLiveness());
if (!YamlMF.TracksRegLiveness)
RegInfo.invalidateLiveness();
RegInfo.enableSubRegLiveness(YamlMF.TracksSubRegLiveness);
// Parse the virtual register information.
for (const auto &VReg : YamlMF.VirtualRegisters) {
const auto *RC = getRegClass(MF, VReg.Class.Value);
if (!RC)
return error(VReg.Class.SourceRange.Start,
Twine("use of undefined register class '") +
VReg.Class.Value + "'");
unsigned Reg = RegInfo.createVirtualRegister(RC);
// TODO: Report an error when the same virtual register with the same ID is
// redefined.
VirtualRegisterSlots.insert(std::make_pair(VReg.ID, Reg));
}
return false;
}
bool MIRParserImpl::initializeFrameInfo(MachineFrameInfo &MFI,
const yaml::MachineFunction &YamlMF) {
const yaml::MachineFrameInfo &YamlMFI = YamlMF.FrameInfo;
MFI.setFrameAddressIsTaken(YamlMFI.IsFrameAddressTaken);
MFI.setReturnAddressIsTaken(YamlMFI.IsReturnAddressTaken);
MFI.setHasStackMap(YamlMFI.HasStackMap);
MFI.setHasPatchPoint(YamlMFI.HasPatchPoint);
MFI.setStackSize(YamlMFI.StackSize);
MFI.setOffsetAdjustment(YamlMFI.OffsetAdjustment);
if (YamlMFI.MaxAlignment)
MFI.ensureMaxAlignment(YamlMFI.MaxAlignment);
MFI.setAdjustsStack(YamlMFI.AdjustsStack);
MFI.setHasCalls(YamlMFI.HasCalls);
MFI.setMaxCallFrameSize(YamlMFI.MaxCallFrameSize);
MFI.setHasOpaqueSPAdjustment(YamlMFI.HasOpaqueSPAdjustment);
MFI.setHasVAStart(YamlMFI.HasVAStart);
MFI.setHasMustTailInVarArgFunc(YamlMFI.HasMustTailInVarArgFunc);
// Initialize the fixed frame objects.
for (const auto &Object : YamlMF.FixedStackObjects) {
int ObjectIdx;
if (Object.Type != yaml::FixedMachineStackObject::SpillSlot)
ObjectIdx = MFI.CreateFixedObject(Object.Size, Object.Offset,
Object.IsImmutable, Object.IsAliased);
else
ObjectIdx = MFI.CreateFixedSpillStackObject(Object.Size, Object.Offset);
MFI.setObjectAlignment(ObjectIdx, Object.Alignment);
// TODO: Store the mapping between fixed object IDs and object indices to
// parse fixed stack object references correctly.
}
// Initialize the ordinary frame objects.
for (const auto &Object : YamlMF.StackObjects) {
int ObjectIdx;
if (Object.Type == yaml::MachineStackObject::VariableSized)
ObjectIdx =
MFI.CreateVariableSizedObject(Object.Alignment, /*Alloca=*/nullptr);
else
ObjectIdx = MFI.CreateStackObject(
Object.Size, Object.Alignment,
Object.Type == yaml::MachineStackObject::SpillSlot);
MFI.setObjectOffset(ObjectIdx, Object.Offset);
// TODO: Store the mapping between object IDs and object indices to parse
// stack object references correctly.
}
return false;
}
SMDiagnostic MIRParserImpl::diagFromMIStringDiag(const SMDiagnostic &Error,
SMRange SourceRange) {
assert(SourceRange.isValid() && "Invalid source range");
SMLoc Loc = SourceRange.Start;
bool HasQuote = Loc.getPointer() < SourceRange.End.getPointer() &&
*Loc.getPointer() == '\'';
// Translate the location of the error from the location in the MI string to
// the corresponding location in the MIR file.
Loc = Loc.getFromPointer(Loc.getPointer() + Error.getColumnNo() +
(HasQuote ? 1 : 0));
// TODO: Translate any source ranges as well.
return SM.GetMessage(Loc, Error.getKind(), Error.getMessage(), None,
Error.getFixIts());
}
SMDiagnostic MIRParserImpl::diagFromLLVMAssemblyDiag(const SMDiagnostic &Error,
SMRange SourceRange) {
assert(SourceRange.isValid());
// Translate the location of the error from the location in the llvm IR string
// to the corresponding location in the MIR file.
auto LineAndColumn = SM.getLineAndColumn(SourceRange.Start);
unsigned Line = LineAndColumn.first + Error.getLineNo() - 1;
unsigned Column = Error.getColumnNo();
StringRef LineStr = Error.getLineContents();
SMLoc Loc = Error.getLoc();
// Get the full line and adjust the column number by taking the indentation of
// LLVM IR into account.
for (line_iterator L(*SM.getMemoryBuffer(SM.getMainFileID()), false), E;
L != E; ++L) {
if (L.line_number() == Line) {
LineStr = *L;
Loc = SMLoc::getFromPointer(LineStr.data());
auto Indent = LineStr.find(Error.getLineContents());
if (Indent != StringRef::npos)
Column += Indent;
break;
}
}
return SMDiagnostic(SM, Loc, Filename, Line, Column, Error.getKind(),
Error.getMessage(), LineStr, Error.getRanges(),
Error.getFixIts());
}
void MIRParserImpl::initNames2RegClasses(const MachineFunction &MF) {
if (!Names2RegClasses.empty())
return;
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; ++I) {
const auto *RC = TRI->getRegClass(I);
Names2RegClasses.insert(
std::make_pair(StringRef(TRI->getRegClassName(RC)).lower(), RC));
}
}
const TargetRegisterClass *MIRParserImpl::getRegClass(const MachineFunction &MF,
StringRef Name) {
initNames2RegClasses(MF);
auto RegClassInfo = Names2RegClasses.find(Name);
if (RegClassInfo == Names2RegClasses.end())
return nullptr;
return RegClassInfo->getValue();
}
MIRParser::MIRParser(std::unique_ptr<MIRParserImpl> Impl)
: Impl(std::move(Impl)) {}
MIRParser::~MIRParser() {}
std::unique_ptr<Module> MIRParser::parseLLVMModule() { return Impl->parse(); }
bool MIRParser::initializeMachineFunction(MachineFunction &MF) {
return Impl->initializeMachineFunction(MF);
}
std::unique_ptr<MIRParser> llvm::createMIRParserFromFile(StringRef Filename,
SMDiagnostic &Error,
LLVMContext &Context) {
auto FileOrErr = MemoryBuffer::getFile(Filename);
if (std::error_code EC = FileOrErr.getError()) {
Error = SMDiagnostic(Filename, SourceMgr::DK_Error,
"Could not open input file: " + EC.message());
return nullptr;
}
return createMIRParser(std::move(FileOrErr.get()), Context);
}
std::unique_ptr<MIRParser>
llvm::createMIRParser(std::unique_ptr<MemoryBuffer> Contents,
LLVMContext &Context) {
auto Filename = Contents->getBufferIdentifier();
return llvm::make_unique<MIRParser>(
llvm::make_unique<MIRParserImpl>(std::move(Contents), Filename, Context));
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/MIRParser/LLVMBuild.txt | ;===- ./lib/CodeGen/MIRParser/LLVMBuild.txt --------------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = Library
name = MIRParser
parent = CodeGen
required_libraries = Core Support Target AsmParser CodeGen
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/MIRParser/MIParser.cpp | //===- MIParser.cpp - Machine instructions parser implementation ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the parsing of machine instructions.
//
//===----------------------------------------------------------------------===//
#include "MIParser.h"
#include "MILexer.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/AsmParser/SlotMapping.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
namespace {
/// A wrapper struct around the 'MachineOperand' struct that includes a source
/// range.
struct MachineOperandWithLocation {
MachineOperand Operand;
StringRef::iterator Begin;
StringRef::iterator End;
MachineOperandWithLocation(const MachineOperand &Operand,
StringRef::iterator Begin, StringRef::iterator End)
: Operand(Operand), Begin(Begin), End(End) {}
};
class MIParser {
SourceMgr &SM;
MachineFunction &MF;
SMDiagnostic &Error;
StringRef Source, CurrentSource;
MIToken Token;
const PerFunctionMIParsingState &PFS;
/// Maps from indices to unnamed global values and metadata nodes.
const SlotMapping &IRSlots;
/// Maps from instruction names to op codes.
StringMap<unsigned> Names2InstrOpCodes;
/// Maps from register names to registers.
StringMap<unsigned> Names2Regs;
/// Maps from register mask names to register masks.
StringMap<const uint32_t *> Names2RegMasks;
/// Maps from subregister names to subregister indices.
StringMap<unsigned> Names2SubRegIndices;
public:
MIParser(SourceMgr &SM, MachineFunction &MF, SMDiagnostic &Error,
StringRef Source, const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots);
void lex();
/// Report an error at the current location with the given message.
///
/// This function always return true.
bool error(const Twine &Msg);
/// Report an error at the given location with the given message.
///
/// This function always return true.
bool error(StringRef::iterator Loc, const Twine &Msg);
bool parse(MachineInstr *&MI);
bool parseMBB(MachineBasicBlock *&MBB);
bool parseNamedRegister(unsigned &Reg);
bool parseRegister(unsigned &Reg);
bool parseRegisterFlag(unsigned &Flags);
bool parseSubRegisterIndex(unsigned &SubReg);
bool parseRegisterOperand(MachineOperand &Dest, bool IsDef = false);
bool parseImmediateOperand(MachineOperand &Dest);
bool parseMBBReference(MachineBasicBlock *&MBB);
bool parseMBBOperand(MachineOperand &Dest);
bool parseGlobalAddressOperand(MachineOperand &Dest);
bool parseMachineOperand(MachineOperand &Dest);
private:
/// Convert the integer literal in the current token into an unsigned integer.
///
/// Return true if an error occurred.
bool getUnsigned(unsigned &Result);
void initNames2InstrOpCodes();
/// Try to convert an instruction name to an opcode. Return true if the
/// instruction name is invalid.
bool parseInstrName(StringRef InstrName, unsigned &OpCode);
bool parseInstruction(unsigned &OpCode);
bool verifyImplicitOperands(ArrayRef<MachineOperandWithLocation> Operands,
const MCInstrDesc &MCID);
void initNames2Regs();
/// Try to convert a register name to a register number. Return true if the
/// register name is invalid.
bool getRegisterByName(StringRef RegName, unsigned &Reg);
void initNames2RegMasks();
/// Check if the given identifier is a name of a register mask.
///
/// Return null if the identifier isn't a register mask.
const uint32_t *getRegMask(StringRef Identifier);
void initNames2SubRegIndices();
/// Check if the given identifier is a name of a subregister index.
///
/// Return 0 if the name isn't a subregister index class.
unsigned getSubRegIndex(StringRef Name);
};
} // end anonymous namespace
MIParser::MIParser(SourceMgr &SM, MachineFunction &MF, SMDiagnostic &Error,
StringRef Source, const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots)
: SM(SM), MF(MF), Error(Error), Source(Source), CurrentSource(Source),
Token(MIToken::Error, StringRef()), PFS(PFS), IRSlots(IRSlots) {}
void MIParser::lex() {
CurrentSource = lexMIToken(
CurrentSource, Token,
[this](StringRef::iterator Loc, const Twine &Msg) { error(Loc, Msg); });
}
bool MIParser::error(const Twine &Msg) { return error(Token.location(), Msg); }
bool MIParser::error(StringRef::iterator Loc, const Twine &Msg) {
assert(Loc >= Source.data() && Loc <= (Source.data() + Source.size()));
Error = SMDiagnostic(
SM, SMLoc(),
SM.getMemoryBuffer(SM.getMainFileID())->getBufferIdentifier(), 1,
Loc - Source.data(), SourceMgr::DK_Error, Msg.str(), Source, None, None);
return true;
}
bool MIParser::parse(MachineInstr *&MI) {
lex();
// Parse any register operands before '='
// TODO: Allow parsing of multiple operands before '='
MachineOperand MO = MachineOperand::CreateImm(0);
SmallVector<MachineOperandWithLocation, 8> Operands;
if (Token.isRegister() || Token.isRegisterFlag()) {
auto Loc = Token.location();
if (parseRegisterOperand(MO, /*IsDef=*/true))
return true;
Operands.push_back(MachineOperandWithLocation(MO, Loc, Token.location()));
if (Token.isNot(MIToken::equal))
return error("expected '='");
lex();
}
unsigned OpCode;
if (Token.isError() || parseInstruction(OpCode))
return true;
// TODO: Parse the instruction flags and memory operands.
// Parse the remaining machine operands.
while (Token.isNot(MIToken::Eof)) {
auto Loc = Token.location();
if (parseMachineOperand(MO))
return true;
Operands.push_back(MachineOperandWithLocation(MO, Loc, Token.location()));
if (Token.is(MIToken::Eof))
break;
if (Token.isNot(MIToken::comma))
return error("expected ',' before the next machine operand");
lex();
}
const auto &MCID = MF.getSubtarget().getInstrInfo()->get(OpCode);
if (!MCID.isVariadic()) {
// FIXME: Move the implicit operand verification to the machine verifier.
if (verifyImplicitOperands(Operands, MCID))
return true;
}
// TODO: Check for extraneous machine operands.
MI = MF.CreateMachineInstr(MCID, DebugLoc(), /*NoImplicit=*/true);
for (const auto &Operand : Operands)
MI->addOperand(MF, Operand.Operand);
return false;
}
bool MIParser::parseMBB(MachineBasicBlock *&MBB) {
lex();
if (Token.isNot(MIToken::MachineBasicBlock))
return error("expected a machine basic block reference");
if (parseMBBReference(MBB))
return true;
lex();
if (Token.isNot(MIToken::Eof))
return error(
"expected end of string after the machine basic block reference");
return false;
}
bool MIParser::parseNamedRegister(unsigned &Reg) {
lex();
if (Token.isNot(MIToken::NamedRegister))
return error("expected a named register");
if (parseRegister(Reg))
return 0;
lex();
if (Token.isNot(MIToken::Eof))
return error("expected end of string after the register reference");
return false;
}
static const char *printImplicitRegisterFlag(const MachineOperand &MO) {
assert(MO.isImplicit());
return MO.isDef() ? "implicit-def" : "implicit";
}
static std::string getRegisterName(const TargetRegisterInfo *TRI,
unsigned Reg) {
assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "expected phys reg");
return StringRef(TRI->getName(Reg)).lower();
}
bool MIParser::verifyImplicitOperands(
ArrayRef<MachineOperandWithLocation> Operands, const MCInstrDesc &MCID) {
if (MCID.isCall())
// We can't verify call instructions as they can contain arbitrary implicit
// register and register mask operands.
return false;
// Gather all the expected implicit operands.
SmallVector<MachineOperand, 4> ImplicitOperands;
if (MCID.ImplicitDefs)
for (const uint16_t *ImpDefs = MCID.getImplicitDefs(); *ImpDefs; ++ImpDefs)
ImplicitOperands.push_back(
MachineOperand::CreateReg(*ImpDefs, true, true));
if (MCID.ImplicitUses)
for (const uint16_t *ImpUses = MCID.getImplicitUses(); *ImpUses; ++ImpUses)
ImplicitOperands.push_back(
MachineOperand::CreateReg(*ImpUses, false, true));
const auto *TRI = MF.getSubtarget().getRegisterInfo();
assert(TRI && "Expected target register info");
size_t I = ImplicitOperands.size(), J = Operands.size();
while (I) {
--I;
if (J) {
--J;
const auto &ImplicitOperand = ImplicitOperands[I];
const auto &Operand = Operands[J].Operand;
if (ImplicitOperand.isIdenticalTo(Operand))
continue;
if (Operand.isReg() && Operand.isImplicit()) {
return error(Operands[J].Begin,
Twine("expected an implicit register operand '") +
printImplicitRegisterFlag(ImplicitOperand) + " %" +
getRegisterName(TRI, ImplicitOperand.getReg()) + "'");
}
}
// TODO: Fix source location when Operands[J].end is right before '=', i.e:
// insead of reporting an error at this location:
// %eax = MOV32r0
// ^
// report the error at the following location:
// %eax = MOV32r0
// ^
return error(J < Operands.size() ? Operands[J].End : Token.location(),
Twine("missing implicit register operand '") +
printImplicitRegisterFlag(ImplicitOperands[I]) + " %" +
getRegisterName(TRI, ImplicitOperands[I].getReg()) + "'");
}
return false;
}
bool MIParser::parseInstruction(unsigned &OpCode) {
if (Token.isNot(MIToken::Identifier))
return error("expected a machine instruction");
StringRef InstrName = Token.stringValue();
if (parseInstrName(InstrName, OpCode))
return error(Twine("unknown machine instruction name '") + InstrName + "'");
lex();
return false;
}
bool MIParser::parseRegister(unsigned &Reg) {
switch (Token.kind()) {
case MIToken::underscore:
Reg = 0;
break;
case MIToken::NamedRegister: {
StringRef Name = Token.stringValue();
if (getRegisterByName(Name, Reg))
return error(Twine("unknown register name '") + Name + "'");
break;
}
case MIToken::VirtualRegister: {
unsigned ID;
if (getUnsigned(ID))
return true;
const auto RegInfo = PFS.VirtualRegisterSlots.find(ID);
if (RegInfo == PFS.VirtualRegisterSlots.end())
return error(Twine("use of undefined virtual register '%") + Twine(ID) +
"'");
Reg = RegInfo->second;
break;
}
// TODO: Parse other register kinds.
default:
llvm_unreachable("The current token should be a register");
}
return false;
}
bool MIParser::parseRegisterFlag(unsigned &Flags) {
switch (Token.kind()) {
case MIToken::kw_implicit:
Flags |= RegState::Implicit;
break;
case MIToken::kw_implicit_define:
Flags |= RegState::ImplicitDefine;
break;
case MIToken::kw_dead:
Flags |= RegState::Dead;
break;
case MIToken::kw_killed:
Flags |= RegState::Kill;
break;
case MIToken::kw_undef:
Flags |= RegState::Undef;
break;
// TODO: report an error when we specify the same flag more than once.
// TODO: parse the other register flags.
default:
llvm_unreachable("The current token should be a register flag");
}
lex();
return false;
}
bool MIParser::parseSubRegisterIndex(unsigned &SubReg) {
assert(Token.is(MIToken::colon));
lex();
if (Token.isNot(MIToken::Identifier))
return error("expected a subregister index after ':'");
auto Name = Token.stringValue();
SubReg = getSubRegIndex(Name);
if (!SubReg)
return error(Twine("use of unknown subregister index '") + Name + "'");
lex();
return false;
}
bool MIParser::parseRegisterOperand(MachineOperand &Dest, bool IsDef) {
unsigned Reg;
unsigned Flags = IsDef ? RegState::Define : 0;
while (Token.isRegisterFlag()) {
if (parseRegisterFlag(Flags))
return true;
}
if (!Token.isRegister())
return error("expected a register after register flags");
if (parseRegister(Reg))
return true;
lex();
unsigned SubReg = 0;
if (Token.is(MIToken::colon)) {
if (parseSubRegisterIndex(SubReg))
return true;
}
Dest = MachineOperand::CreateReg(
Reg, Flags & RegState::Define, Flags & RegState::Implicit,
Flags & RegState::Kill, Flags & RegState::Dead, Flags & RegState::Undef,
/*isEarlyClobber=*/false, SubReg);
return false;
}
bool MIParser::parseImmediateOperand(MachineOperand &Dest) {
assert(Token.is(MIToken::IntegerLiteral));
const APSInt &Int = Token.integerValue();
if (Int.getMinSignedBits() > 64)
// TODO: Replace this with an error when we can parse CIMM Machine Operands.
llvm_unreachable("Can't parse large integer literals yet!");
Dest = MachineOperand::CreateImm(Int.getExtValue());
lex();
return false;
}
bool MIParser::getUnsigned(unsigned &Result) {
assert(Token.hasIntegerValue() && "Expected a token with an integer value");
const uint64_t Limit = uint64_t(std::numeric_limits<unsigned>::max()) + 1;
uint64_t Val64 = Token.integerValue().getLimitedValue(Limit);
if (Val64 == Limit)
return error("expected 32-bit integer (too large)");
Result = Val64;
return false;
}
bool MIParser::parseMBBReference(MachineBasicBlock *&MBB) {
assert(Token.is(MIToken::MachineBasicBlock));
unsigned Number;
if (getUnsigned(Number))
return true;
auto MBBInfo = PFS.MBBSlots.find(Number);
if (MBBInfo == PFS.MBBSlots.end())
return error(Twine("use of undefined machine basic block #") +
Twine(Number));
MBB = MBBInfo->second;
if (!Token.stringValue().empty() && Token.stringValue() != MBB->getName())
return error(Twine("the name of machine basic block #") + Twine(Number) +
" isn't '" + Token.stringValue() + "'");
return false;
}
bool MIParser::parseMBBOperand(MachineOperand &Dest) {
MachineBasicBlock *MBB;
if (parseMBBReference(MBB))
return true;
Dest = MachineOperand::CreateMBB(MBB);
lex();
return false;
}
bool MIParser::parseGlobalAddressOperand(MachineOperand &Dest) {
switch (Token.kind()) {
case MIToken::NamedGlobalValue: {
auto Name = Token.stringValue();
const Module *M = MF.getFunction()->getParent();
if (const auto *GV = M->getNamedValue(Name)) {
Dest = MachineOperand::CreateGA(GV, /*Offset=*/0);
break;
}
return error(Twine("use of undefined global value '@") + Name + "'");
}
case MIToken::GlobalValue: {
unsigned GVIdx;
if (getUnsigned(GVIdx))
return true;
if (GVIdx >= IRSlots.GlobalValues.size())
return error(Twine("use of undefined global value '@") + Twine(GVIdx) +
"'");
Dest = MachineOperand::CreateGA(IRSlots.GlobalValues[GVIdx],
/*Offset=*/0);
break;
}
default:
llvm_unreachable("The current token should be a global value");
}
// TODO: Parse offset and target flags.
lex();
return false;
}
bool MIParser::parseMachineOperand(MachineOperand &Dest) {
switch (Token.kind()) {
case MIToken::kw_implicit:
case MIToken::kw_implicit_define:
case MIToken::kw_dead:
case MIToken::kw_killed:
case MIToken::kw_undef:
case MIToken::underscore:
case MIToken::NamedRegister:
case MIToken::VirtualRegister:
return parseRegisterOperand(Dest);
case MIToken::IntegerLiteral:
return parseImmediateOperand(Dest);
case MIToken::MachineBasicBlock:
return parseMBBOperand(Dest);
case MIToken::GlobalValue:
case MIToken::NamedGlobalValue:
return parseGlobalAddressOperand(Dest);
case MIToken::Error:
return true;
case MIToken::Identifier:
if (const auto *RegMask = getRegMask(Token.stringValue())) {
Dest = MachineOperand::CreateRegMask(RegMask);
lex();
break;
}
// fallthrough
default:
// TODO: parse the other machine operands.
return error("expected a machine operand");
}
return false;
}
void MIParser::initNames2InstrOpCodes() {
if (!Names2InstrOpCodes.empty())
return;
const auto *TII = MF.getSubtarget().getInstrInfo();
assert(TII && "Expected target instruction info");
for (unsigned I = 0, E = TII->getNumOpcodes(); I < E; ++I)
Names2InstrOpCodes.insert(std::make_pair(StringRef(TII->getName(I)), I));
}
bool MIParser::parseInstrName(StringRef InstrName, unsigned &OpCode) {
initNames2InstrOpCodes();
auto InstrInfo = Names2InstrOpCodes.find(InstrName);
if (InstrInfo == Names2InstrOpCodes.end())
return true;
OpCode = InstrInfo->getValue();
return false;
}
void MIParser::initNames2Regs() {
if (!Names2Regs.empty())
return;
// The '%noreg' register is the register 0.
Names2Regs.insert(std::make_pair("noreg", 0));
const auto *TRI = MF.getSubtarget().getRegisterInfo();
assert(TRI && "Expected target register info");
for (unsigned I = 0, E = TRI->getNumRegs(); I < E; ++I) {
bool WasInserted =
Names2Regs.insert(std::make_pair(StringRef(TRI->getName(I)).lower(), I))
.second;
(void)WasInserted;
assert(WasInserted && "Expected registers to be unique case-insensitively");
}
}
bool MIParser::getRegisterByName(StringRef RegName, unsigned &Reg) {
initNames2Regs();
auto RegInfo = Names2Regs.find(RegName);
if (RegInfo == Names2Regs.end())
return true;
Reg = RegInfo->getValue();
return false;
}
void MIParser::initNames2RegMasks() {
if (!Names2RegMasks.empty())
return;
const auto *TRI = MF.getSubtarget().getRegisterInfo();
assert(TRI && "Expected target register info");
ArrayRef<const uint32_t *> RegMasks = TRI->getRegMasks();
ArrayRef<const char *> RegMaskNames = TRI->getRegMaskNames();
assert(RegMasks.size() == RegMaskNames.size());
for (size_t I = 0, E = RegMasks.size(); I < E; ++I)
Names2RegMasks.insert(
std::make_pair(StringRef(RegMaskNames[I]).lower(), RegMasks[I]));
}
const uint32_t *MIParser::getRegMask(StringRef Identifier) {
initNames2RegMasks();
auto RegMaskInfo = Names2RegMasks.find(Identifier);
if (RegMaskInfo == Names2RegMasks.end())
return nullptr;
return RegMaskInfo->getValue();
}
void MIParser::initNames2SubRegIndices() {
if (!Names2SubRegIndices.empty())
return;
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
for (unsigned I = 1, E = TRI->getNumSubRegIndices(); I < E; ++I)
Names2SubRegIndices.insert(
std::make_pair(StringRef(TRI->getSubRegIndexName(I)).lower(), I));
}
unsigned MIParser::getSubRegIndex(StringRef Name) {
initNames2SubRegIndices();
auto SubRegInfo = Names2SubRegIndices.find(Name);
if (SubRegInfo == Names2SubRegIndices.end())
return 0;
return SubRegInfo->getValue();
}
bool llvm::parseMachineInstr(MachineInstr *&MI, SourceMgr &SM,
MachineFunction &MF, StringRef Src,
const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots, SMDiagnostic &Error) {
return MIParser(SM, MF, Error, Src, PFS, IRSlots).parse(MI);
}
bool llvm::parseMBBReference(MachineBasicBlock *&MBB, SourceMgr &SM,
MachineFunction &MF, StringRef Src,
const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots, SMDiagnostic &Error) {
return MIParser(SM, MF, Error, Src, PFS, IRSlots).parseMBB(MBB);
}
bool llvm::parseNamedRegisterReference(unsigned &Reg, SourceMgr &SM,
MachineFunction &MF, StringRef Src,
const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots,
SMDiagnostic &Error) {
return MIParser(SM, MF, Error, Src, PFS, IRSlots).parseNamedRegister(Reg);
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/MIRParser/MIParser.h | //===- MIParser.h - Machine Instructions Parser ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the function that parses the machine instructions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_MIRPARSER_MIPARSER_H
#define LLVM_LIB_CODEGEN_MIRPARSER_MIPARSER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
namespace llvm {
class MachineBasicBlock;
class MachineInstr;
class MachineFunction;
struct SlotMapping;
class SMDiagnostic;
class SourceMgr;
struct PerFunctionMIParsingState {
DenseMap<unsigned, MachineBasicBlock *> MBBSlots;
DenseMap<unsigned, unsigned> VirtualRegisterSlots;
};
bool parseMachineInstr(MachineInstr *&MI, SourceMgr &SM, MachineFunction &MF,
StringRef Src, const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots, SMDiagnostic &Error);
bool parseMBBReference(MachineBasicBlock *&MBB, SourceMgr &SM,
MachineFunction &MF, StringRef Src,
const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots, SMDiagnostic &Error);
bool parseNamedRegisterReference(unsigned &Reg, SourceMgr &SM,
MachineFunction &MF, StringRef Src,
const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots,
SMDiagnostic &Error);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/MIRParser/MILexer.cpp | //===- MILexer.cpp - Machine instructions lexer implementation ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the lexing of machine instructions.
//
//===----------------------------------------------------------------------===//
#include "MILexer.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include <cctype>
using namespace llvm;
namespace {
/// This class provides a way to iterate and get characters from the source
/// string.
class Cursor {
const char *Ptr;
const char *End;
public:
Cursor(NoneType) : Ptr(nullptr), End(nullptr) {}
explicit Cursor(StringRef Str) {
Ptr = Str.data();
End = Ptr + Str.size();
}
bool isEOF() const { return Ptr == End; }
char peek(int I = 0) const { return End - Ptr <= I ? 0 : Ptr[I]; }
void advance(unsigned I = 1) { Ptr += I; }
StringRef remaining() const { return StringRef(Ptr, End - Ptr); }
StringRef upto(Cursor C) const {
assert(C.Ptr >= Ptr && C.Ptr <= End);
return StringRef(Ptr, C.Ptr - Ptr);
}
StringRef::iterator location() const { return Ptr; }
operator bool() const { return Ptr != nullptr; }
};
} // end anonymous namespace
/// Skip the leading whitespace characters and return the updated cursor.
static Cursor skipWhitespace(Cursor C) {
while (isspace(C.peek()))
C.advance();
return C;
}
static bool isIdentifierChar(char C) {
return isalpha(C) || isdigit(C) || C == '_' || C == '-' || C == '.';
}
static MIToken::TokenKind getIdentifierKind(StringRef Identifier) {
return StringSwitch<MIToken::TokenKind>(Identifier)
.Case("_", MIToken::underscore)
.Case("implicit", MIToken::kw_implicit)
.Case("implicit-def", MIToken::kw_implicit_define)
.Case("dead", MIToken::kw_dead)
.Case("killed", MIToken::kw_killed)
.Case("undef", MIToken::kw_undef)
.Default(MIToken::Identifier);
}
static Cursor maybeLexIdentifier(Cursor C, MIToken &Token) {
if (!isalpha(C.peek()) && C.peek() != '_')
return None;
auto Range = C;
while (isIdentifierChar(C.peek()))
C.advance();
auto Identifier = Range.upto(C);
Token = MIToken(getIdentifierKind(Identifier), Identifier);
return C;
}
static Cursor maybeLexMachineBasicBlock(
Cursor C, MIToken &Token,
function_ref<void(StringRef::iterator Loc, const Twine &)> ErrorCallback) {
if (!C.remaining().startswith("%bb."))
return None;
auto Range = C;
C.advance(4); // Skip '%bb.'
if (!isdigit(C.peek())) {
Token = MIToken(MIToken::Error, C.remaining());
ErrorCallback(C.location(), "expected a number after '%bb.'");
return C;
}
auto NumberRange = C;
while (isdigit(C.peek()))
C.advance();
StringRef Number = NumberRange.upto(C);
unsigned StringOffset = 4 + Number.size(); // Drop '%bb.<id>'
if (C.peek() == '.') {
C.advance(); // Skip '.'
++StringOffset;
while (isIdentifierChar(C.peek()))
C.advance();
}
Token = MIToken(MIToken::MachineBasicBlock, Range.upto(C), APSInt(Number),
StringOffset);
return C;
}
static Cursor lexVirtualRegister(Cursor C, MIToken &Token) {
auto Range = C;
C.advance(); // Skip '%'
auto NumberRange = C;
while (isdigit(C.peek()))
C.advance();
Token = MIToken(MIToken::VirtualRegister, Range.upto(C),
APSInt(NumberRange.upto(C)));
return C;
}
static Cursor maybeLexRegister(Cursor C, MIToken &Token) {
if (C.peek() != '%')
return None;
if (isdigit(C.peek(1)))
return lexVirtualRegister(C, Token);
auto Range = C;
C.advance(); // Skip '%'
while (isIdentifierChar(C.peek()))
C.advance();
Token = MIToken(MIToken::NamedRegister, Range.upto(C),
/*StringOffset=*/1); // Drop the '%'
return C;
}
static Cursor maybeLexGlobalValue(Cursor C, MIToken &Token) {
if (C.peek() != '@')
return None;
auto Range = C;
C.advance(); // Skip the '@'
// TODO: add support for quoted names.
if (!isdigit(C.peek())) {
while (isIdentifierChar(C.peek()))
C.advance();
Token = MIToken(MIToken::NamedGlobalValue, Range.upto(C),
/*StringOffset=*/1); // Drop the '@'
return C;
}
auto NumberRange = C;
while (isdigit(C.peek()))
C.advance();
Token =
MIToken(MIToken::GlobalValue, Range.upto(C), APSInt(NumberRange.upto(C)));
return C;
}
static Cursor maybeLexIntegerLiteral(Cursor C, MIToken &Token) {
if (!isdigit(C.peek()) && (C.peek() != '-' || !isdigit(C.peek(1))))
return None;
auto Range = C;
C.advance();
while (isdigit(C.peek()))
C.advance();
StringRef StrVal = Range.upto(C);
Token = MIToken(MIToken::IntegerLiteral, StrVal, APSInt(StrVal));
return C;
}
static MIToken::TokenKind symbolToken(char C) {
switch (C) {
case ',':
return MIToken::comma;
case '=':
return MIToken::equal;
case ':':
return MIToken::colon;
default:
return MIToken::Error;
}
}
static Cursor maybeLexSymbol(Cursor C, MIToken &Token) {
auto Kind = symbolToken(C.peek());
if (Kind == MIToken::Error)
return None;
auto Range = C;
C.advance();
Token = MIToken(Kind, Range.upto(C));
return C;
}
StringRef llvm::lexMIToken(
StringRef Source, MIToken &Token,
function_ref<void(StringRef::iterator Loc, const Twine &)> ErrorCallback) {
auto C = skipWhitespace(Cursor(Source));
if (C.isEOF()) {
Token = MIToken(MIToken::Eof, C.remaining());
return C.remaining();
}
if (Cursor R = maybeLexIdentifier(C, Token))
return R.remaining();
if (Cursor R = maybeLexMachineBasicBlock(C, Token, ErrorCallback))
return R.remaining();
if (Cursor R = maybeLexRegister(C, Token))
return R.remaining();
if (Cursor R = maybeLexGlobalValue(C, Token))
return R.remaining();
if (Cursor R = maybeLexIntegerLiteral(C, Token))
return R.remaining();
if (Cursor R = maybeLexSymbol(C, Token))
return R.remaining();
Token = MIToken(MIToken::Error, C.remaining());
ErrorCallback(C.location(),
Twine("unexpected character '") + Twine(C.peek()) + "'");
return C.remaining();
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp | //===-- OcamlGCPrinter.cpp - Ocaml frametable emitter ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements printing the assembly code for an Ocaml frametable.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCs.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cctype>
using namespace llvm;
namespace {
class OcamlGCMetadataPrinter : public GCMetadataPrinter {
public:
void beginAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) override;
void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) override;
};
}
static GCMetadataPrinterRegistry::Add<OcamlGCMetadataPrinter>
Y("ocaml", "ocaml 3.10-compatible collector");
void llvm::linkOcamlGCPrinter() {}
static void EmitCamlGlobal(const Module &M, AsmPrinter &AP, const char *Id) {
const std::string &MId = M.getModuleIdentifier();
std::string SymName;
SymName += "caml";
size_t Letter = SymName.size();
SymName.append(MId.begin(), std::find(MId.begin(), MId.end(), '.'));
SymName += "__";
SymName += Id;
// Capitalize the first letter of the module name.
SymName[Letter] = toupper(SymName[Letter]);
SmallString<128> TmpStr;
Mangler::getNameWithPrefix(TmpStr, SymName, M.getDataLayout());
MCSymbol *Sym = AP.OutContext.getOrCreateSymbol(TmpStr);
AP.OutStreamer->EmitSymbolAttribute(Sym, MCSA_Global);
AP.OutStreamer->EmitLabel(Sym);
}
void OcamlGCMetadataPrinter::beginAssembly(Module &M, GCModuleInfo &Info,
AsmPrinter &AP) {
AP.OutStreamer->SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(M, AP, "code_begin");
AP.OutStreamer->SwitchSection(AP.getObjFileLowering().getDataSection());
EmitCamlGlobal(M, AP, "data_begin");
}
/// emitAssembly - Print the frametable. The ocaml frametable format is thus:
///
/// extern "C" struct align(sizeof(intptr_t)) {
/// uint16_t NumDescriptors;
/// struct align(sizeof(intptr_t)) {
/// void *ReturnAddress;
/// uint16_t FrameSize;
/// uint16_t NumLiveOffsets;
/// uint16_t LiveOffsets[NumLiveOffsets];
/// } Descriptors[NumDescriptors];
/// } caml${module}__frametable;
///
/// Note that this precludes programs from stack frames larger than 64K
/// (FrameSize and LiveOffsets would overflow). FrameTablePrinter will abort if
/// either condition is detected in a function which uses the GC.
///
void OcamlGCMetadataPrinter::finishAssembly(Module &M, GCModuleInfo &Info,
AsmPrinter &AP) {
unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
AP.OutStreamer->SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(M, AP, "code_end");
AP.OutStreamer->SwitchSection(AP.getObjFileLowering().getDataSection());
EmitCamlGlobal(M, AP, "data_end");
// FIXME: Why does ocaml emit this??
AP.OutStreamer->EmitIntValue(0, IntPtrSize);
AP.OutStreamer->SwitchSection(AP.getObjFileLowering().getDataSection());
EmitCamlGlobal(M, AP, "frametable");
int NumDescriptors = 0;
for (GCModuleInfo::FuncInfoVec::iterator I = Info.funcinfo_begin(),
IE = Info.funcinfo_end();
I != IE; ++I) {
GCFunctionInfo &FI = **I;
if (FI.getStrategy().getName() != getStrategy().getName())
// this function is managed by some other GC
continue;
for (GCFunctionInfo::iterator J = FI.begin(), JE = FI.end(); J != JE; ++J) {
NumDescriptors++;
}
}
if (NumDescriptors >= 1 << 16) {
// Very rude!
report_fatal_error(" Too much descriptor for ocaml GC");
}
AP.EmitInt16(NumDescriptors);
AP.EmitAlignment(IntPtrSize == 4 ? 2 : 3);
for (GCModuleInfo::FuncInfoVec::iterator I = Info.funcinfo_begin(),
IE = Info.funcinfo_end();
I != IE; ++I) {
GCFunctionInfo &FI = **I;
if (FI.getStrategy().getName() != getStrategy().getName())
// this function is managed by some other GC
continue;
uint64_t FrameSize = FI.getFrameSize();
if (FrameSize >= 1 << 16) {
// Very rude!
report_fatal_error("Function '" + FI.getFunction().getName() +
"' is too large for the ocaml GC! "
"Frame size " +
Twine(FrameSize) + ">= 65536.\n"
"(" +
Twine(uintptr_t(&FI)) + ")");
}
AP.OutStreamer->AddComment("live roots for " +
Twine(FI.getFunction().getName()));
AP.OutStreamer->AddBlankLine();
for (GCFunctionInfo::iterator J = FI.begin(), JE = FI.end(); J != JE; ++J) {
size_t LiveCount = FI.live_size(J);
if (LiveCount >= 1 << 16) {
// Very rude!
report_fatal_error("Function '" + FI.getFunction().getName() +
"' is too large for the ocaml GC! "
"Live root count " +
Twine(LiveCount) + " >= 65536.");
}
AP.OutStreamer->EmitSymbolValue(J->Label, IntPtrSize);
AP.EmitInt16(FrameSize);
AP.EmitInt16(LiveCount);
for (GCFunctionInfo::live_iterator K = FI.live_begin(J),
KE = FI.live_end(J);
K != KE; ++K) {
if (K->StackOffset >= 1 << 16) {
// Very rude!
report_fatal_error(
"GC root stack offset is outside of fixed stack frame and out "
"of range for ocaml GC!");
}
AP.EmitInt16(K->StackOffset);
}
AP.EmitAlignment(IntPtrSize == 4 ? 2 : 3);
}
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp | //===-- ErlangGCPrinter.cpp - Erlang/OTP frametable emitter -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the compiler plugin that is used in order to emit
// garbage collection information in a convenient layout for parsing and
// loading in the Erlang/OTP runtime.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
#include "llvm/CodeGen/GCs.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Metadata.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
class ErlangGCPrinter : public GCMetadataPrinter {
public:
void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) override;
};
}
static GCMetadataPrinterRegistry::Add<ErlangGCPrinter>
X("erlang", "erlang-compatible garbage collector");
void llvm::linkErlangGCPrinter() {}
void ErlangGCPrinter::finishAssembly(Module &M, GCModuleInfo &Info,
AsmPrinter &AP) {
MCStreamer &OS = *AP.OutStreamer;
unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
// Put this in a custom .note section.
OS.SwitchSection(
AP.getObjFileLowering().getContext().getELFSection(".note.gc",
ELF::SHT_PROGBITS, 0));
// For each function...
for (GCModuleInfo::FuncInfoVec::iterator FI = Info.funcinfo_begin(),
IE = Info.funcinfo_end();
FI != IE; ++FI) {
GCFunctionInfo &MD = **FI;
if (MD.getStrategy().getName() != getStrategy().getName())
// this function is managed by some other GC
continue;
/** A compact GC layout. Emit this data structure:
*
* struct {
* int16_t PointCount;
* void *SafePointAddress[PointCount];
* int16_t StackFrameSize; (in words)
* int16_t StackArity;
* int16_t LiveCount;
* int16_t LiveOffsets[LiveCount];
* } __gcmap_<FUNCTIONNAME>;
**/
// Align to address width.
AP.EmitAlignment(IntPtrSize == 4 ? 2 : 3);
// Emit PointCount.
OS.AddComment("safe point count");
AP.EmitInt16(MD.size());
// And each safe point...
for (GCFunctionInfo::iterator PI = MD.begin(), PE = MD.end(); PI != PE;
++PI) {
// Emit the address of the safe point.
OS.AddComment("safe point address");
MCSymbol *Label = PI->Label;
AP.EmitLabelPlusOffset(Label /*Hi*/, 0 /*Offset*/, 4 /*Size*/);
}
// Stack information never change in safe points! Only print info from the
// first call-site.
GCFunctionInfo::iterator PI = MD.begin();
// Emit the stack frame size.
OS.AddComment("stack frame size (in words)");
AP.EmitInt16(MD.getFrameSize() / IntPtrSize);
// Emit stack arity, i.e. the number of stacked arguments.
unsigned RegisteredArgs = IntPtrSize == 4 ? 5 : 6;
unsigned StackArity = MD.getFunction().arg_size() > RegisteredArgs
? MD.getFunction().arg_size() - RegisteredArgs
: 0;
OS.AddComment("stack arity");
AP.EmitInt16(StackArity);
// Emit the number of live roots in the function.
OS.AddComment("live root count");
AP.EmitInt16(MD.live_size(PI));
// And for each live root...
for (GCFunctionInfo::live_iterator LI = MD.live_begin(PI),
LE = MD.live_end(PI);
LI != LE; ++LI) {
// Emit live root's offset within the stack frame.
OS.AddComment("stack index (offset / wordsize)");
AP.EmitInt16(LI->StackOffset / IntPtrSize);
}
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/AddressPool.cpp | //===-- llvm/CodeGen/AddressPool.cpp - Dwarf Debug Framework ---*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "AddressPool.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
class MCExpr;
unsigned AddressPool::getIndex(const MCSymbol *Sym, bool TLS) {
HasBeenUsed = true;
auto IterBool =
Pool.insert(std::make_pair(Sym, AddressPoolEntry(Pool.size(), TLS)));
return IterBool.first->second.Number;
}
// Emit addresses into the section given.
void AddressPool::emit(AsmPrinter &Asm, MCSection *AddrSection) {
if (Pool.empty())
return;
// Start the dwarf addr section.
Asm.OutStreamer->SwitchSection(AddrSection);
// Order the address pool entries by ID
SmallVector<const MCExpr *, 64> Entries(Pool.size());
for (const auto &I : Pool)
Entries[I.second.Number] =
I.second.TLS
? Asm.getObjFileLowering().getDebugThreadLocalSymbol(I.first)
: MCSymbolRefExpr::create(I.first, Asm.OutContext);
for (const MCExpr *Entry : Entries)
Asm.OutStreamer->EmitValue(Entry, Asm.getDataLayout().getPointerSize());
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h | //===-- lib/CodeGen/AsmPrinter/AsmPrinterHandler.h -------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a generic interface for AsmPrinter handlers,
// like debug and EH info emitters.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_ASMPRINTERHANDLER_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_ASMPRINTERHANDLER_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
class MachineFunction;
class MachineInstr;
class MCSymbol;
/// \brief Collects and handles AsmPrinter objects required to build debug
/// or EH information.
class AsmPrinterHandler {
public:
virtual ~AsmPrinterHandler();
/// \brief For symbols that have a size designated (e.g. common symbols),
/// this tracks that size.
virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0;
/// \brief Emit all sections that should come after the content.
virtual void endModule() = 0;
/// \brief Gather pre-function debug information.
/// Every beginFunction(MF) call should be followed by an endFunction(MF)
/// call.
virtual void beginFunction(const MachineFunction *MF) = 0;
// \brief Emit any of function marker (like .cfi_endproc). This is called
// before endFunction and cannot switch sections.
virtual void markFunctionEnd();
/// \brief Gather post-function debug information.
/// Please note that some AsmPrinter implementations may not call
/// beginFunction at all.
virtual void endFunction(const MachineFunction *MF) = 0;
/// \brief Process beginning of an instruction.
virtual void beginInstruction(const MachineInstr *MI) = 0;
/// \brief Process end of an instruction.
virtual void endInstruction() = 0;
};
} // End of namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp | //===-- AsmPrinterInlineAsm.cpp - AsmPrinter Inline Asm Handling ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the inline assembler pieces of the AsmPrinter class.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
namespace {
struct SrcMgrDiagInfo {
const MDNode *LocInfo;
LLVMContext::InlineAsmDiagHandlerTy DiagHandler;
void *DiagContext;
};
}
/// srcMgrDiagHandler - This callback is invoked when the SourceMgr for an
/// inline asm has an error in it. diagInfo is a pointer to the SrcMgrDiagInfo
/// struct above.
static void srcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) {
SrcMgrDiagInfo *DiagInfo = static_cast<SrcMgrDiagInfo *>(diagInfo);
assert(DiagInfo && "Diagnostic context not passed down?");
// If the inline asm had metadata associated with it, pull out a location
// cookie corresponding to which line the error occurred on.
unsigned LocCookie = 0;
if (const MDNode *LocInfo = DiagInfo->LocInfo) {
unsigned ErrorLine = Diag.getLineNo()-1;
if (ErrorLine >= LocInfo->getNumOperands())
ErrorLine = 0;
if (LocInfo->getNumOperands() != 0)
if (const ConstantInt *CI =
mdconst::dyn_extract<ConstantInt>(LocInfo->getOperand(ErrorLine)))
LocCookie = CI->getZExtValue();
}
DiagInfo->DiagHandler(Diag, DiagInfo->DiagContext, LocCookie);
}
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
void AsmPrinter::EmitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
const MCTargetOptions &MCOptions,
const MDNode *LocMDNode,
InlineAsm::AsmDialect Dialect) const {
assert(!Str.empty() && "Can't emit empty inline asm block");
// Remember if the buffer is nul terminated or not so we can avoid a copy.
bool isNullTerminated = Str.back() == 0;
if (isNullTerminated)
Str = Str.substr(0, Str.size()-1);
// If the output streamer does not have mature MC support or the integrated
// assembler has been disabled, just emit the blob textually.
// Otherwise parse the asm and emit it via MC support.
// This is useful in case the asm parser doesn't handle something but the
// system assembler does.
const MCAsmInfo *MCAI = TM.getMCAsmInfo();
assert(MCAI && "No MCAsmInfo");
if (!MCAI->useIntegratedAssembler() &&
!OutStreamer->isIntegratedAssemblerRequired()) {
emitInlineAsmStart();
OutStreamer->EmitRawText(Str);
emitInlineAsmEnd(STI, nullptr);
return;
}
SourceMgr SrcMgr;
SrcMgrDiagInfo DiagInfo;
// If the current LLVMContext has an inline asm handler, set it in SourceMgr.
LLVMContext &LLVMCtx = MMI->getModule()->getContext();
bool HasDiagHandler = false;
if (LLVMCtx.getInlineAsmDiagnosticHandler() != nullptr) {
// If the source manager has an issue, we arrange for srcMgrDiagHandler
// to be invoked, getting DiagInfo passed into it.
DiagInfo.LocInfo = LocMDNode;
DiagInfo.DiagHandler = LLVMCtx.getInlineAsmDiagnosticHandler();
DiagInfo.DiagContext = LLVMCtx.getInlineAsmDiagnosticContext();
SrcMgr.setDiagHandler(srcMgrDiagHandler, &DiagInfo);
HasDiagHandler = true;
}
std::unique_ptr<MemoryBuffer> Buffer;
if (isNullTerminated)
Buffer = MemoryBuffer::getMemBuffer(Str, "<inline asm>");
else
Buffer = MemoryBuffer::getMemBufferCopy(Str, "<inline asm>");
// Tell SrcMgr about this buffer, it takes ownership of the buffer.
SrcMgr.AddNewSourceBuffer(std::move(Buffer), SMLoc());
std::unique_ptr<MCAsmParser> Parser(
createMCAsmParser(SrcMgr, OutContext, *OutStreamer, *MAI));
// Create a temporary copy of the original STI because the parser may modify
// it. For example, when switching between arm and thumb mode. If the target
// needs to emit code to return to the original state it can do so in
// emitInlineAsmEnd().
MCSubtargetInfo TmpSTI = STI;
// We create a new MCInstrInfo here since we might be at the module level
// and not have a MachineFunction to initialize the TargetInstrInfo from and
// we only need MCInstrInfo for asm parsing. We create one unconditionally
// because it's not subtarget dependent.
std::unique_ptr<MCInstrInfo> MII(TM.getTarget().createMCInstrInfo());
std::unique_ptr<MCTargetAsmParser> TAP(TM.getTarget().createMCAsmParser(
TmpSTI, *Parser, *MII, MCOptions));
if (!TAP)
report_fatal_error("Inline asm not supported by this streamer because"
" we don't have an asm parser for this target\n");
Parser->setAssemblerDialect(Dialect);
Parser->setTargetParser(*TAP.get());
if (MF) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
TAP->SetFrameRegister(TRI->getFrameRegister(*MF));
}
emitInlineAsmStart();
// Don't implicitly switch to the text section before the asm.
int Res = Parser->Run(/*NoInitialTextSection*/ true,
/*NoFinalize*/ true);
emitInlineAsmEnd(STI, &TmpSTI);
if (Res && !HasDiagHandler)
report_fatal_error("Error parsing inline asm\n");
}
static void EmitMSInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
MachineModuleInfo *MMI, int InlineAsmVariant,
AsmPrinter *AP, unsigned LocCookie,
raw_ostream &OS) {
// Switch to the inline assembly variant.
OS << "\t.intel_syntax\n\t";
const char *LastEmitted = AsmStr; // One past the last character emitted.
unsigned NumOperands = MI->getNumOperands();
while (*LastEmitted) {
switch (*LastEmitted) {
default: {
// Not a special case, emit the string section literally.
const char *LiteralEnd = LastEmitted+1;
while (*LiteralEnd && *LiteralEnd != '{' && *LiteralEnd != '|' &&
*LiteralEnd != '}' && *LiteralEnd != '$' && *LiteralEnd != '\n')
++LiteralEnd;
OS.write(LastEmitted, LiteralEnd-LastEmitted);
LastEmitted = LiteralEnd;
break;
}
case '\n':
++LastEmitted; // Consume newline character.
OS << '\n'; // Indent code with newline.
break;
case '$': {
++LastEmitted; // Consume '$' character.
bool Done = true;
// Handle escapes.
switch (*LastEmitted) {
default: Done = false; break;
case '$':
++LastEmitted; // Consume second '$' character.
break;
}
if (Done) break;
const char *IDStart = LastEmitted;
const char *IDEnd = IDStart;
while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;
unsigned Val;
if (StringRef(IDStart, IDEnd-IDStart).getAsInteger(10, Val))
report_fatal_error("Bad $ operand number in inline asm string: '" +
Twine(AsmStr) + "'");
LastEmitted = IDEnd;
if (Val >= NumOperands-1)
report_fatal_error("Invalid $ operand number in inline asm string: '" +
Twine(AsmStr) + "'");
// Okay, we finally have a value number. Ask the target to print this
// operand!
unsigned OpNo = InlineAsm::MIOp_FirstOperand;
bool Error = false;
// Scan to find the machine operand number for the operand.
for (; Val; --Val) {
if (OpNo >= MI->getNumOperands()) break;
unsigned OpFlags = MI->getOperand(OpNo).getImm();
OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
}
// We may have a location metadata attached to the end of the
// instruction, and at no point should see metadata at any
// other point while processing. It's an error if so.
if (OpNo >= MI->getNumOperands() ||
MI->getOperand(OpNo).isMetadata()) {
Error = true;
} else {
unsigned OpFlags = MI->getOperand(OpNo).getImm();
++OpNo; // Skip over the ID number.
if (InlineAsm::isMemKind(OpFlags)) {
Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
/*Modifier*/ nullptr, OS);
} else {
Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
/*Modifier*/ nullptr, OS);
}
}
if (Error) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "invalid operand in inline asm: '" << AsmStr << "'";
MMI->getModule()->getContext().emitError(LocCookie, Msg.str());
}
break;
}
}
}
OS << "\n\t.att_syntax\n" << (char)0; // null terminate string.
}
static void EmitGCCInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
MachineModuleInfo *MMI, int InlineAsmVariant,
int AsmPrinterVariant, AsmPrinter *AP,
unsigned LocCookie, raw_ostream &OS) {
int CurVariant = -1; // The number of the {.|.|.} region we are in.
const char *LastEmitted = AsmStr; // One past the last character emitted.
unsigned NumOperands = MI->getNumOperands();
OS << '\t';
while (*LastEmitted) {
switch (*LastEmitted) {
default: {
// Not a special case, emit the string section literally.
const char *LiteralEnd = LastEmitted+1;
while (*LiteralEnd && *LiteralEnd != '{' && *LiteralEnd != '|' &&
*LiteralEnd != '}' && *LiteralEnd != '$' && *LiteralEnd != '\n')
++LiteralEnd;
if (CurVariant == -1 || CurVariant == AsmPrinterVariant)
OS.write(LastEmitted, LiteralEnd-LastEmitted);
LastEmitted = LiteralEnd;
break;
}
case '\n':
++LastEmitted; // Consume newline character.
OS << '\n'; // Indent code with newline.
break;
case '$': {
++LastEmitted; // Consume '$' character.
bool Done = true;
// Handle escapes.
switch (*LastEmitted) {
default: Done = false; break;
case '$': // $$ -> $
if (CurVariant == -1 || CurVariant == AsmPrinterVariant)
OS << '$';
++LastEmitted; // Consume second '$' character.
break;
case '(': // $( -> same as GCC's { character.
++LastEmitted; // Consume '(' character.
if (CurVariant != -1)
report_fatal_error("Nested variants found in inline asm string: '" +
Twine(AsmStr) + "'");
CurVariant = 0; // We're in the first variant now.
break;
case '|':
++LastEmitted; // consume '|' character.
if (CurVariant == -1)
OS << '|'; // this is gcc's behavior for | outside a variant
else
++CurVariant; // We're in the next variant.
break;
case ')': // $) -> same as GCC's } char.
++LastEmitted; // consume ')' character.
if (CurVariant == -1)
OS << '}'; // this is gcc's behavior for } outside a variant
else
CurVariant = -1;
break;
}
if (Done) break;
bool HasCurlyBraces = false;
if (*LastEmitted == '{') { // ${variable}
++LastEmitted; // Consume '{' character.
HasCurlyBraces = true;
}
// If we have ${:foo}, then this is not a real operand reference, it is a
// "magic" string reference, just like in .td files. Arrange to call
// PrintSpecial.
if (HasCurlyBraces && *LastEmitted == ':') {
++LastEmitted;
const char *StrStart = LastEmitted;
const char *StrEnd = strchr(StrStart, '}');
if (!StrEnd)
report_fatal_error("Unterminated ${:foo} operand in inline asm"
" string: '" + Twine(AsmStr) + "'");
std::string Val(StrStart, StrEnd);
AP->PrintSpecial(MI, OS, Val.c_str());
LastEmitted = StrEnd+1;
break;
}
const char *IDStart = LastEmitted;
const char *IDEnd = IDStart;
while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;
unsigned Val;
if (StringRef(IDStart, IDEnd-IDStart).getAsInteger(10, Val))
report_fatal_error("Bad $ operand number in inline asm string: '" +
Twine(AsmStr) + "'");
LastEmitted = IDEnd;
char Modifier[2] = { 0, 0 };
if (HasCurlyBraces) {
// If we have curly braces, check for a modifier character. This
// supports syntax like ${0:u}, which correspond to "%u0" in GCC asm.
if (*LastEmitted == ':') {
++LastEmitted; // Consume ':' character.
if (*LastEmitted == 0)
report_fatal_error("Bad ${:} expression in inline asm string: '" +
Twine(AsmStr) + "'");
Modifier[0] = *LastEmitted;
++LastEmitted; // Consume modifier character.
}
if (*LastEmitted != '}')
report_fatal_error("Bad ${} expression in inline asm string: '" +
Twine(AsmStr) + "'");
++LastEmitted; // Consume '}' character.
}
if (Val >= NumOperands-1)
report_fatal_error("Invalid $ operand number in inline asm string: '" +
Twine(AsmStr) + "'");
// Okay, we finally have a value number. Ask the target to print this
// operand!
if (CurVariant == -1 || CurVariant == AsmPrinterVariant) {
unsigned OpNo = InlineAsm::MIOp_FirstOperand;
bool Error = false;
// Scan to find the machine operand number for the operand.
for (; Val; --Val) {
if (OpNo >= MI->getNumOperands()) break;
unsigned OpFlags = MI->getOperand(OpNo).getImm();
OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
}
// We may have a location metadata attached to the end of the
// instruction, and at no point should see metadata at any
// other point while processing. It's an error if so.
if (OpNo >= MI->getNumOperands() ||
MI->getOperand(OpNo).isMetadata()) {
Error = true;
} else {
unsigned OpFlags = MI->getOperand(OpNo).getImm();
++OpNo; // Skip over the ID number.
if (Modifier[0] == 'l') { // Labels are target independent.
// FIXME: What if the operand isn't an MBB, report error?
const MCSymbol *Sym = MI->getOperand(OpNo).getMBB()->getSymbol();
Sym->print(OS, AP->MAI);
} else {
if (InlineAsm::isMemKind(OpFlags)) {
Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
Modifier[0] ? Modifier : nullptr,
OS);
} else {
Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
Modifier[0] ? Modifier : nullptr, OS);
}
}
}
if (Error) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "invalid operand in inline asm: '" << AsmStr << "'";
MMI->getModule()->getContext().emitError(LocCookie, Msg.str());
}
}
break;
}
}
}
OS << '\n' << (char)0; // null terminate string.
}
/// EmitInlineAsm - This method formats and emits the specified machine
/// instruction that is an inline asm.
void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms");
// Count the number of register definitions to find the asm string.
unsigned NumDefs = 0;
for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
++NumDefs)
assert(NumDefs != MI->getNumOperands()-2 && "No asm string?");
assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
// Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
// If this asmstr is empty, just print the #APP/#NOAPP markers.
// These are useful to see where empty asm's wound up.
if (AsmStr[0] == 0) {
OutStreamer->emitRawComment(MAI->getInlineAsmStart());
OutStreamer->emitRawComment(MAI->getInlineAsmEnd());
return;
}
// Emit the #APP start marker. This has to happen even if verbose-asm isn't
// enabled, so we use emitRawComment.
OutStreamer->emitRawComment(MAI->getInlineAsmStart());
// Get the !srcloc metadata node if we have it, and decode the loc cookie from
// it.
unsigned LocCookie = 0;
const MDNode *LocMD = nullptr;
for (unsigned i = MI->getNumOperands(); i != 0; --i) {
if (MI->getOperand(i-1).isMetadata() &&
(LocMD = MI->getOperand(i-1).getMetadata()) &&
LocMD->getNumOperands() != 0) {
if (const ConstantInt *CI =
mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
LocCookie = CI->getZExtValue();
break;
}
}
}
// Emit the inline asm to a temporary string so we can emit it through
// EmitInlineAsm.
SmallString<256> StringData;
raw_svector_ostream OS(StringData);
// The variant of the current asmprinter.
int AsmPrinterVariant = MAI->getAssemblerDialect();
InlineAsm::AsmDialect InlineAsmVariant = MI->getInlineAsmDialect();
AsmPrinter *AP = const_cast<AsmPrinter*>(this);
if (InlineAsmVariant == InlineAsm::AD_ATT)
EmitGCCInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AsmPrinterVariant,
AP, LocCookie, OS);
else
EmitMSInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AP, LocCookie, OS);
// Reset SanitizeAddress based on the function's attribute.
MCTargetOptions MCOptions = TM.Options.MCOptions;
MCOptions.SanitizeAddress =
MF->getFunction()->hasFnAttribute(Attribute::SanitizeAddress);
EmitInlineAsm(OS.str(), getSubtargetInfo(), MCOptions, LocMD,
MI->getInlineAsmDialect());
// Emit the #NOAPP end marker. This has to happen even if verbose-asm isn't
// enabled, so we use emitRawComment.
OutStreamer->emitRawComment(MAI->getInlineAsmEnd());
}
/// PrintSpecial - Print information related to the specified machine instr
/// that is independent of the operand, and may be independent of the instr
/// itself. This can be useful for portably encoding the comment character
/// or other bits of target-specific knowledge into the asmstrings. The
/// syntax used is ${:comment}. Targets can override this to add support
/// for their own strange codes.
void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
const char *Code) const {
const DataLayout *DL = TM.getDataLayout();
if (!strcmp(Code, "private")) {
OS << DL->getPrivateGlobalPrefix();
} else if (!strcmp(Code, "comment")) {
OS << MAI->getCommentString();
} else if (!strcmp(Code, "uid")) {
// Comparing the address of MI isn't sufficient, because machineinstrs may
// be allocated to the same address across functions.
// If this is a new LastFn instruction, bump the counter.
if (LastMI != MI || LastFn != getFunctionNumber()) {
++Counter;
LastMI = MI;
LastFn = getFunctionNumber();
}
OS << Counter;
} else {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Unknown special formatter '" << Code
<< "' for machine instr: " << *MI;
report_fatal_error(Msg.str());
}
}
/// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
/// instruction, using the specified assembler variant. Targets should
/// override this to format as appropriate.
bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O) {
// Does this asm operand have a single letter operand modifier?
if (ExtraCode && ExtraCode[0]) {
if (ExtraCode[1] != 0) return true; // Unknown modifier.
const MachineOperand &MO = MI->getOperand(OpNo);
switch (ExtraCode[0]) {
default:
return true; // Unknown modifier.
case 'c': // Substitute immediate value without immediate syntax
if (MO.getType() != MachineOperand::MO_Immediate)
return true;
O << MO.getImm();
return false;
case 'n': // Negate the immediate constant.
if (MO.getType() != MachineOperand::MO_Immediate)
return true;
O << -MO.getImm();
return false;
}
}
return true;
}
bool AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant,
const char *ExtraCode, raw_ostream &O) {
// Target doesn't support this yet!
return true;
}
void AsmPrinter::emitInlineAsmStart() const {}
void AsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
const MCSubtargetInfo *EndInfo) const {}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h | //===-- llvm/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h ----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DBGVALUEHISTORYCALCULATOR_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_DBGVALUEHISTORYCALCULATOR_H
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
class MachineFunction;
class MachineInstr;
class DILocalVariable;
class DILocation;
class TargetRegisterInfo;
// For each user variable, keep a list of instruction ranges where this variable
// is accessible. The variables are listed in order of appearance.
class DbgValueHistoryMap {
// Each instruction range starts with a DBG_VALUE instruction, specifying the
// location of a variable, which is assumed to be valid until the end of the
// range. If end is not specified, location is valid until the start
// instruction of the next instruction range, or until the end of the
// function.
public:
typedef std::pair<const MachineInstr *, const MachineInstr *> InstrRange;
typedef SmallVector<InstrRange, 4> InstrRanges;
typedef std::pair<const DILocalVariable *, const DILocation *>
InlinedVariable;
typedef MapVector<InlinedVariable, InstrRanges> InstrRangesMap;
private:
InstrRangesMap VarInstrRanges;
public:
void startInstrRange(InlinedVariable Var, const MachineInstr &MI);
void endInstrRange(InlinedVariable Var, const MachineInstr &MI);
// Returns register currently describing @Var. If @Var is currently
// unaccessible or is not described by a register, returns 0.
unsigned getRegisterForVar(InlinedVariable Var) const;
bool empty() const { return VarInstrRanges.empty(); }
void clear() { VarInstrRanges.clear(); }
InstrRangesMap::const_iterator begin() const { return VarInstrRanges.begin(); }
InstrRangesMap::const_iterator end() const { return VarInstrRanges.end(); }
};
void calculateDbgValueHistory(const MachineFunction *MF,
const TargetRegisterInfo *TRI,
DbgValueHistoryMap &Result);
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp | //===-- AsmPrinterDwarf.cpp - AsmPrinter Dwarf Support --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Dwarf emissions parts of AsmPrinter.
//
//===----------------------------------------------------------------------===//
#include "ByteStreamer.h"
#include "DwarfDebug.h"
#include "DwarfExpression.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/DIE.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
//===----------------------------------------------------------------------===//
// Dwarf Emission Helper Routines
//===----------------------------------------------------------------------===//
/// EmitSLEB128 - emit the specified signed leb128 value.
void AsmPrinter::EmitSLEB128(int64_t Value, const char *Desc) const {
if (isVerbose() && Desc)
OutStreamer->AddComment(Desc);
OutStreamer->EmitSLEB128IntValue(Value);
}
/// EmitULEB128 - emit the specified signed leb128 value.
void AsmPrinter::EmitULEB128(uint64_t Value, const char *Desc,
unsigned PadTo) const {
if (isVerbose() && Desc)
OutStreamer->AddComment(Desc);
OutStreamer->EmitULEB128IntValue(Value, PadTo);
}
/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
void AsmPrinter::EmitCFAByte(unsigned Val) const {
if (isVerbose()) {
if (Val >= dwarf::DW_CFA_offset && Val < dwarf::DW_CFA_offset + 64)
OutStreamer->AddComment("DW_CFA_offset + Reg (" +
Twine(Val - dwarf::DW_CFA_offset) + ")");
else
OutStreamer->AddComment(dwarf::CallFrameString(Val));
}
OutStreamer->EmitIntValue(Val, 1);
}
static const char *DecodeDWARFEncoding(unsigned Encoding) {
switch (Encoding) {
case dwarf::DW_EH_PE_absptr:
return "absptr";
case dwarf::DW_EH_PE_omit:
return "omit";
case dwarf::DW_EH_PE_pcrel:
return "pcrel";
case dwarf::DW_EH_PE_udata4:
return "udata4";
case dwarf::DW_EH_PE_udata8:
return "udata8";
case dwarf::DW_EH_PE_sdata4:
return "sdata4";
case dwarf::DW_EH_PE_sdata8:
return "sdata8";
case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4:
return "pcrel udata4";
case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4:
return "pcrel sdata4";
case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8:
return "pcrel udata8";
case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8:
return "pcrel sdata8";
case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4
:
return "indirect pcrel udata4";
case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
:
return "indirect pcrel sdata4";
case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8
:
return "indirect pcrel udata8";
case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8
:
return "indirect pcrel sdata8";
}
return "<unknown encoding>";
}
/// EmitEncodingByte - Emit a .byte 42 directive that corresponds to an
/// encoding. If verbose assembly output is enabled, we output comments
/// describing the encoding. Desc is an optional string saying what the
/// encoding is specifying (e.g. "LSDA").
void AsmPrinter::EmitEncodingByte(unsigned Val, const char *Desc) const {
if (isVerbose()) {
if (Desc)
OutStreamer->AddComment(Twine(Desc) + " Encoding = " +
Twine(DecodeDWARFEncoding(Val)));
else
OutStreamer->AddComment(Twine("Encoding = ") + DecodeDWARFEncoding(Val));
}
OutStreamer->EmitIntValue(Val, 1);
}
/// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const {
if (Encoding == dwarf::DW_EH_PE_omit)
return 0;
switch (Encoding & 0x07) {
default:
llvm_unreachable("Invalid encoded value.");
case dwarf::DW_EH_PE_absptr:
return TM.getDataLayout()->getPointerSize();
case dwarf::DW_EH_PE_udata2:
return 2;
case dwarf::DW_EH_PE_udata4:
return 4;
case dwarf::DW_EH_PE_udata8:
return 8;
}
}
void AsmPrinter::EmitTTypeReference(const GlobalValue *GV,
unsigned Encoding) const {
if (GV) {
const TargetLoweringObjectFile &TLOF = getObjFileLowering();
const MCExpr *Exp =
TLOF.getTTypeGlobalReference(GV, Encoding, *Mang, TM, MMI,
*OutStreamer);
OutStreamer->EmitValue(Exp, GetSizeOfEncodedValue(Encoding));
} else
OutStreamer->EmitIntValue(0, GetSizeOfEncodedValue(Encoding));
}
void AsmPrinter::emitDwarfSymbolReference(const MCSymbol *Label,
bool ForceOffset) const {
if (!ForceOffset) {
// On COFF targets, we have to emit the special .secrel32 directive.
if (MAI->needsDwarfSectionOffsetDirective()) {
OutStreamer->EmitCOFFSecRel32(Label);
return;
}
// If the format uses relocations with dwarf, refer to the symbol directly.
if (MAI->doesDwarfUseRelocationsAcrossSections()) {
OutStreamer->EmitSymbolValue(Label, 4);
return;
}
}
// Otherwise, emit it as a label difference from the start of the section.
EmitLabelDifference(Label, Label->getSection().getBeginSymbol(), 4);
}
void AsmPrinter::emitDwarfStringOffset(DwarfStringPoolEntryRef S) const {
if (MAI->doesDwarfUseRelocationsAcrossSections()) {
emitDwarfSymbolReference(S.getSymbol());
return;
}
// Just emit the offset directly; no need for symbol math.
EmitInt32(S.getOffset());
}
/// EmitDwarfRegOp - Emit dwarf register operation.
void AsmPrinter::EmitDwarfRegOp(ByteStreamer &Streamer,
const MachineLocation &MLoc) const {
DebugLocDwarfExpression Expr(*MF->getSubtarget().getRegisterInfo(),
getDwarfDebug()->getDwarfVersion(), Streamer);
const MCRegisterInfo *MRI = MMI->getContext().getRegisterInfo();
int Reg = MRI->getDwarfRegNum(MLoc.getReg(), false);
if (Reg < 0) {
// We assume that pointers are always in an addressable register.
if (MLoc.isIndirect())
// FIXME: We have no reasonable way of handling errors in here. The
// caller might be in the middle of a dwarf expression. We should
// probably assert that Reg >= 0 once debug info generation is more
// mature.
return Expr.EmitOp(dwarf::DW_OP_nop,
"nop (could not find a dwarf register number)");
// Attempt to find a valid super- or sub-register.
if (!Expr.AddMachineRegPiece(MLoc.getReg()))
Expr.EmitOp(dwarf::DW_OP_nop,
"nop (could not find a dwarf register number)");
return;
}
if (MLoc.isIndirect())
Expr.AddRegIndirect(Reg, MLoc.getOffset());
else
Expr.AddReg(Reg);
}
//===----------------------------------------------------------------------===//
// Dwarf Lowering Routines
//===----------------------------------------------------------------------===//
void AsmPrinter::emitCFIInstruction(const MCCFIInstruction &Inst) const {
switch (Inst.getOperation()) {
default:
llvm_unreachable("Unexpected instruction");
case MCCFIInstruction::OpDefCfaOffset:
OutStreamer->EmitCFIDefCfaOffset(Inst.getOffset());
break;
case MCCFIInstruction::OpDefCfa:
OutStreamer->EmitCFIDefCfa(Inst.getRegister(), Inst.getOffset());
break;
case MCCFIInstruction::OpDefCfaRegister:
OutStreamer->EmitCFIDefCfaRegister(Inst.getRegister());
break;
case MCCFIInstruction::OpOffset:
OutStreamer->EmitCFIOffset(Inst.getRegister(), Inst.getOffset());
break;
case MCCFIInstruction::OpRegister:
OutStreamer->EmitCFIRegister(Inst.getRegister(), Inst.getRegister2());
break;
case MCCFIInstruction::OpWindowSave:
OutStreamer->EmitCFIWindowSave();
break;
case MCCFIInstruction::OpSameValue:
OutStreamer->EmitCFISameValue(Inst.getRegister());
break;
}
}
void AsmPrinter::emitDwarfDIE(const DIE &Die) const {
// Emit the code (index) for the abbreviation.
if (isVerbose())
OutStreamer->AddComment("Abbrev [" + Twine(Die.getAbbrevNumber()) + "] 0x" +
Twine::utohexstr(Die.getOffset()) + ":0x" +
Twine::utohexstr(Die.getSize()) + " " +
dwarf::TagString(Die.getTag()));
EmitULEB128(Die.getAbbrevNumber());
// Emit the DIE attribute values.
for (const auto &V : Die.values()) {
dwarf::Attribute Attr = V.getAttribute();
assert(V.getForm() && "Too many attributes for DIE (check abbreviation)");
if (isVerbose()) {
OutStreamer->AddComment(dwarf::AttributeString(Attr));
if (Attr == dwarf::DW_AT_accessibility)
OutStreamer->AddComment(
dwarf::AccessibilityString(V.getDIEInteger().getValue()));
}
// Emit an attribute using the defined form.
V.EmitValue(this);
}
// Emit the DIE children if any.
if (Die.hasChildren()) {
for (auto &Child : Die.children())
emitDwarfDIE(Child);
OutStreamer->AddComment("End Of Children Mark");
EmitInt8(0);
}
}
void
AsmPrinter::emitDwarfAbbrevs(const std::vector<DIEAbbrev *>& Abbrevs) const {
// For each abbrevation.
for (const DIEAbbrev *Abbrev : Abbrevs) {
// Emit the abbrevations code (base 1 index.)
EmitULEB128(Abbrev->getNumber(), "Abbreviation Code");
// Emit the abbreviations data.
Abbrev->Emit(this);
}
// Mark end of abbreviations.
EmitULEB128(0, "EOM(3)");
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfExpression.h | //===-- llvm/CodeGen/DwarfExpression.h - Dwarf Compile Unit ---*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing dwarf compile unit.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFEXPRESSION_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFEXPRESSION_H
#include "llvm/IR/DebugInfo.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
class AsmPrinter;
class ByteStreamer;
class TargetRegisterInfo;
class DwarfUnit;
class DIELoc;
/// Base class containing the logic for constructing DWARF expressions
/// independently of whether they are emitted into a DIE or into a .debug_loc
/// entry.
class DwarfExpression {
protected:
// Various convenience accessors that extract things out of AsmPrinter.
const TargetRegisterInfo &TRI;
unsigned DwarfVersion;
public:
DwarfExpression(const TargetRegisterInfo &TRI,
unsigned DwarfVersion)
: TRI(TRI), DwarfVersion(DwarfVersion) {}
virtual ~DwarfExpression() {}
/// Output a dwarf operand and an optional assembler comment.
virtual void EmitOp(uint8_t Op, const char *Comment = nullptr) = 0;
/// Emit a raw signed value.
virtual void EmitSigned(int64_t Value) = 0;
/// Emit a raw unsigned value.
virtual void EmitUnsigned(uint64_t Value) = 0;
/// Return whether the given machine register is the frame register in the
/// current function.
virtual bool isFrameRegister(unsigned MachineReg) = 0;
/// Emit a dwarf register operation.
void AddReg(int DwarfReg, const char *Comment = nullptr);
/// Emit an (double-)indirect dwarf register operation.
void AddRegIndirect(int DwarfReg, int Offset, bool Deref = false);
/// Emit a dwarf register operation for describing
/// - a small value occupying only part of a register or
/// - a register representing only part of a value.
void AddOpPiece(unsigned SizeInBits, unsigned OffsetInBits = 0);
/// Emit a shift-right dwarf expression.
void AddShr(unsigned ShiftBy);
/// Emit an indirect dwarf register operation for the given machine register.
/// \return false if no DWARF register exists for MachineReg.
bool AddMachineRegIndirect(unsigned MachineReg, int Offset = 0);
/// \brief Emit a partial DWARF register operation.
/// \param MachineReg the register
/// \param PieceSizeInBits size and
/// \param PieceOffsetInBits offset of the piece in bits, if this is one
/// piece of an aggregate value.
///
/// If size and offset is zero an operation for the entire
/// register is emitted: Some targets do not provide a DWARF
/// register number for every register. If this is the case, this
/// function will attempt to emit a DWARF register by emitting a
/// piece of a super-register or by piecing together multiple
/// subregisters that alias the register.
///
/// \return false if no DWARF register exists for MachineReg.
bool AddMachineRegPiece(unsigned MachineReg, unsigned PieceSizeInBits = 0,
unsigned PieceOffsetInBits = 0);
/// Emit a signed constant.
void AddSignedConstant(int Value);
/// Emit an unsigned constant.
void AddUnsignedConstant(unsigned Value);
/// \brief Emit an entire expression on top of a machine register location.
///
/// \param PieceOffsetInBits If this is one piece out of a fragmented
/// location, this is the offset of the piece inside the entire variable.
/// \return false if no DWARF register exists for MachineReg.
bool AddMachineRegExpression(const DIExpression *Expr, unsigned MachineReg,
unsigned PieceOffsetInBits = 0);
/// Emit a the operations remaining the DIExpressionIterator I.
/// \param PieceOffsetInBits If this is one piece out of a fragmented
/// location, this is the offset of the piece inside the entire variable.
void AddExpression(DIExpression::expr_op_iterator I,
DIExpression::expr_op_iterator E,
unsigned PieceOffsetInBits = 0);
};
/// DwarfExpression implementation for .debug_loc entries.
class DebugLocDwarfExpression : public DwarfExpression {
ByteStreamer &BS;
public:
DebugLocDwarfExpression(const TargetRegisterInfo &TRI,
unsigned DwarfVersion, ByteStreamer &BS)
: DwarfExpression(TRI, DwarfVersion), BS(BS) {}
void EmitOp(uint8_t Op, const char *Comment = nullptr) override;
void EmitSigned(int64_t Value) override;
void EmitUnsigned(uint64_t Value) override;
bool isFrameRegister(unsigned MachineReg) override;
};
/// DwarfExpression implementation for singular DW_AT_location.
class DIEDwarfExpression : public DwarfExpression {
const AsmPrinter &AP;
DwarfUnit &DU;
DIELoc &DIE;
public:
DIEDwarfExpression(const AsmPrinter &AP, DwarfUnit &DU, DIELoc &DIE);
void EmitOp(uint8_t Op, const char *Comment = nullptr) override;
void EmitSigned(int64_t Value) override;
void EmitUnsigned(uint64_t Value) override;
bool isFrameRegister(unsigned MachineReg) override;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfFile.cpp | //===-- llvm/CodeGen/DwarfFile.cpp - Dwarf Debug Framework ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "DwarfFile.h"
#include "DwarfDebug.h"
#include "DwarfUnit.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
namespace llvm {
DwarfFile::DwarfFile(AsmPrinter *AP, StringRef Pref, BumpPtrAllocator &DA)
: Asm(AP), StrPool(DA, *Asm, Pref) {}
DwarfFile::~DwarfFile() {
for (DIEAbbrev *Abbrev : Abbreviations)
Abbrev->~DIEAbbrev();
}
// Define a unique number for the abbreviation.
//
DIEAbbrev &DwarfFile::assignAbbrevNumber(DIE &Die) {
FoldingSetNodeID ID;
DIEAbbrev Abbrev = Die.generateAbbrev();
Abbrev.Profile(ID);
void *InsertPos;
if (DIEAbbrev *Existing =
AbbreviationsSet.FindNodeOrInsertPos(ID, InsertPos)) {
Die.setAbbrevNumber(Existing->getNumber());
return *Existing;
}
// Move the abbreviation to the heap and assign a number.
DIEAbbrev *New = new (AbbrevAllocator) DIEAbbrev(std::move(Abbrev));
Abbreviations.push_back(New);
New->setNumber(Abbreviations.size());
Die.setAbbrevNumber(Abbreviations.size());
// Store it for lookup.
AbbreviationsSet.InsertNode(New, InsertPos);
return *New;
}
void DwarfFile::addUnit(std::unique_ptr<DwarfUnit> U) {
CUs.push_back(std::move(U));
}
// Emit the various dwarf units to the unit section USection with
// the abbreviations going into ASection.
void DwarfFile::emitUnits(bool UseOffsets) {
for (const auto &TheU : CUs) {
DIE &Die = TheU->getUnitDie();
MCSection *USection = TheU->getSection();
Asm->OutStreamer->SwitchSection(USection);
TheU->emitHeader(UseOffsets);
Asm->emitDwarfDIE(Die);
}
}
// Compute the size and offset for each DIE.
void DwarfFile::computeSizeAndOffsets() {
// Offset from the first CU in the debug info section is 0 initially.
unsigned SecOffset = 0;
// Iterate over each compile unit and set the size and offsets for each
// DIE within each compile unit. All offsets are CU relative.
for (const auto &TheU : CUs) {
TheU->setDebugInfoOffset(SecOffset);
// CU-relative offset is reset to 0 here.
unsigned Offset = sizeof(int32_t) + // Length of Unit Info
TheU->getHeaderSize(); // Unit-specific headers
// EndOffset here is CU-relative, after laying out
// all of the CU DIE.
unsigned EndOffset = computeSizeAndOffset(TheU->getUnitDie(), Offset);
SecOffset += EndOffset;
}
}
// Compute the size and offset of a DIE. The offset is relative to start of the
// CU. It returns the offset after laying out the DIE.
unsigned DwarfFile::computeSizeAndOffset(DIE &Die, unsigned Offset) {
// Record the abbreviation.
const DIEAbbrev &Abbrev = assignAbbrevNumber(Die);
// Set DIE offset
Die.setOffset(Offset);
// Start the size with the size of abbreviation code.
Offset += getULEB128Size(Die.getAbbrevNumber());
// Size the DIE attribute values.
for (const auto &V : Die.values())
// Size attribute value.
Offset += V.SizeOf(Asm);
// Size the DIE children if any.
if (Die.hasChildren()) {
(void)Abbrev;
assert(Abbrev.hasChildren() && "Children flag not set");
for (auto &Child : Die.children())
Offset = computeSizeAndOffset(Child, Offset);
// End of children marker.
Offset += sizeof(int8_t);
}
Die.setSize(Offset - Die.getOffset());
return Offset;
}
void DwarfFile::emitAbbrevs(MCSection *Section) {
// Check to see if it is worth the effort.
if (!Abbreviations.empty()) {
// Start the debug abbrev section.
Asm->OutStreamer->SwitchSection(Section);
Asm->emitDwarfAbbrevs(Abbreviations);
}
}
// Emit strings into a string section.
void DwarfFile::emitStrings(MCSection *StrSection, MCSection *OffsetSection) {
StrPool.emit(*Asm, StrSection, OffsetSection);
}
bool DwarfFile::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
SmallVectorImpl<DbgVariable *> &Vars = ScopeVariables[LS];
const DILocalVariable *DV = Var->getVariable();
// Variables with positive arg numbers are parameters.
if (unsigned ArgNum = DV->getArg()) {
// Keep all parameters in order at the start of the variable list to ensure
// function types are correct (no out-of-order parameters)
//
// This could be improved by only doing it for optimized builds (unoptimized
// builds have the right order to begin with), searching from the back (this
// would catch the unoptimized case quickly), or doing a binary search
// rather than linear search.
auto I = Vars.begin();
while (I != Vars.end()) {
unsigned CurNum = (*I)->getVariable()->getArg();
// A local (non-parameter) variable has been found, insert immediately
// before it.
if (CurNum == 0)
break;
// A later indexed parameter has been found, insert immediately before it.
if (CurNum > ArgNum)
break;
if (CurNum == ArgNum) {
(*I)->addMMIEntry(*Var);
return false;
}
++I;
}
Vars.insert(I, Var);
return true;
}
Vars.push_back(Var);
return true;
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp |
#include "DwarfCompileUnit.h"
#include "DwarfExpression.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instruction.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
namespace llvm {
DwarfCompileUnit::DwarfCompileUnit(unsigned UID, const DICompileUnit *Node,
AsmPrinter *A, DwarfDebug *DW,
DwarfFile *DWU)
: DwarfUnit(UID, dwarf::DW_TAG_compile_unit, Node, A, DW, DWU),
Skeleton(nullptr), BaseAddress(nullptr) {
insertDIE(Node, &getUnitDie());
}
/// addLabelAddress - Add a dwarf label attribute data and value using
/// DW_FORM_addr or DW_FORM_GNU_addr_index.
///
void DwarfCompileUnit::addLabelAddress(DIE &Die, dwarf::Attribute Attribute,
const MCSymbol *Label) {
// Don't use the address pool in non-fission or in the skeleton unit itself.
// FIXME: Once GDB supports this, it's probably worthwhile using the address
// pool from the skeleton - maybe even in non-fission (possibly fewer
// relocations by sharing them in the pool, but we have other ideas about how
// to reduce the number of relocations as well/instead).
if (!DD->useSplitDwarf() || !Skeleton)
return addLocalLabelAddress(Die, Attribute, Label);
if (Label)
DD->addArangeLabel(SymbolCU(this, Label));
unsigned idx = DD->getAddressPool().getIndex(Label);
Die.addValue(DIEValueAllocator, Attribute, dwarf::DW_FORM_GNU_addr_index,
DIEInteger(idx));
}
void DwarfCompileUnit::addLocalLabelAddress(DIE &Die,
dwarf::Attribute Attribute,
const MCSymbol *Label) {
if (Label)
DD->addArangeLabel(SymbolCU(this, Label));
if (Label)
Die.addValue(DIEValueAllocator, Attribute, dwarf::DW_FORM_addr,
DIELabel(Label));
else
Die.addValue(DIEValueAllocator, Attribute, dwarf::DW_FORM_addr,
DIEInteger(0));
}
unsigned DwarfCompileUnit::getOrCreateSourceID(StringRef FileName,
StringRef DirName) {
// If we print assembly, we can't separate .file entries according to
// compile units. Thus all files will belong to the default compile unit.
// FIXME: add a better feature test than hasRawTextSupport. Even better,
// extend .file to support this.
return Asm->OutStreamer->EmitDwarfFileDirective(
0, DirName, FileName,
Asm->OutStreamer->hasRawTextSupport() ? 0 : getUniqueID());
}
// Return const expression if value is a GEP to access merged global
// constant. e.g.
// i8* getelementptr ({ i8, i8, i8, i8 }* @_MergedGlobals, i32 0, i32 0)
static const ConstantExpr *getMergedGlobalExpr(const Value *V) {
const ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(V);
if (!CE || CE->getNumOperands() != 3 ||
CE->getOpcode() != Instruction::GetElementPtr)
return nullptr;
// First operand points to a global struct.
Value *Ptr = CE->getOperand(0);
if (!isa<GlobalValue>(Ptr) ||
!isa<StructType>(cast<PointerType>(Ptr->getType())->getElementType()))
return nullptr;
// Second operand is zero.
const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CE->getOperand(1));
if (!CI || !CI->isZero())
return nullptr;
// Third operand is offset.
if (!isa<ConstantInt>(CE->getOperand(2)))
return nullptr;
return CE;
}
/// getOrCreateGlobalVariableDIE - get or create global variable DIE.
DIE *DwarfCompileUnit::getOrCreateGlobalVariableDIE(
const DIGlobalVariable *GV) {
// Check for pre-existence.
if (DIE *Die = getDIE(GV))
return Die;
assert(GV);
auto *GVContext = GV->getScope();
auto *GTy = DD->resolve(GV->getType());
// Construct the context before querying for the existence of the DIE in
// case such construction creates the DIE.
DIE *ContextDIE = getOrCreateContextDIE(GVContext);
// Add to map.
DIE *VariableDIE = &createAndAddDIE(GV->getTag(), *ContextDIE, GV);
DIScope *DeclContext;
if (auto *SDMDecl = GV->getStaticDataMemberDeclaration()) {
DeclContext = resolve(SDMDecl->getScope());
assert(SDMDecl->isStaticMember() && "Expected static member decl");
assert(GV->isDefinition());
// We need the declaration DIE that is in the static member's class.
DIE *VariableSpecDIE = getOrCreateStaticMemberDIE(SDMDecl);
addDIEEntry(*VariableDIE, dwarf::DW_AT_specification, *VariableSpecDIE);
} else {
DeclContext = GV->getScope();
// Add name and type.
addString(*VariableDIE, dwarf::DW_AT_name, GV->getDisplayName());
addType(*VariableDIE, GTy);
// Add scoping info.
if (!GV->isLocalToUnit())
addFlag(*VariableDIE, dwarf::DW_AT_external);
// Add line number info.
addSourceLine(*VariableDIE, GV);
}
if (!GV->isDefinition())
addFlag(*VariableDIE, dwarf::DW_AT_declaration);
else
addGlobalName(GV->getName(), *VariableDIE, DeclContext);
// Add location.
bool addToAccelTable = false;
if (auto *Global = dyn_cast_or_null<GlobalVariable>(GV->getVariable())) {
addToAccelTable = true;
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
const MCSymbol *Sym = Asm->getSymbol(Global);
if (Global->isThreadLocal()) {
// FIXME: Make this work with -gsplit-dwarf.
unsigned PointerSize = Asm->getDataLayout().getPointerSize();
assert((PointerSize == 4 || PointerSize == 8) &&
"Add support for other sizes if necessary");
// Based on GCC's support for TLS:
if (!DD->useSplitDwarf()) {
// 1) Start with a constNu of the appropriate pointer size
addUInt(*Loc, dwarf::DW_FORM_data1,
PointerSize == 4 ? dwarf::DW_OP_const4u : dwarf::DW_OP_const8u);
// 2) containing the (relocated) offset of the TLS variable
// within the module's TLS block.
addExpr(*Loc, dwarf::DW_FORM_udata,
Asm->getObjFileLowering().getDebugThreadLocalSymbol(Sym));
} else {
addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_const_index);
addUInt(*Loc, dwarf::DW_FORM_udata,
DD->getAddressPool().getIndex(Sym, /* TLS */ true));
}
// 3) followed by an OP to make the debugger do a TLS lookup.
addUInt(*Loc, dwarf::DW_FORM_data1,
DD->useGNUTLSOpcode() ? dwarf::DW_OP_GNU_push_tls_address
: dwarf::DW_OP_form_tls_address);
} else {
DD->addArangeLabel(SymbolCU(this, Sym));
addOpAddress(*Loc, Sym);
}
addBlock(*VariableDIE, dwarf::DW_AT_location, Loc);
addLinkageName(*VariableDIE, GV->getLinkageName());
} else if (const ConstantInt *CI =
dyn_cast_or_null<ConstantInt>(GV->getVariable())) {
addConstantValue(*VariableDIE, CI, GTy);
} else if (const ConstantExpr *CE = getMergedGlobalExpr(GV->getVariable())) {
addToAccelTable = true;
// GV is a merged global.
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
Value *Ptr = CE->getOperand(0);
MCSymbol *Sym = Asm->getSymbol(cast<GlobalValue>(Ptr));
DD->addArangeLabel(SymbolCU(this, Sym));
addOpAddress(*Loc, Sym);
addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
SmallVector<Value *, 3> Idx(CE->op_begin() + 1, CE->op_end());
addUInt(*Loc, dwarf::DW_FORM_udata,
Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx));
addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
addBlock(*VariableDIE, dwarf::DW_AT_location, Loc);
}
if (addToAccelTable) {
DD->addAccelName(GV->getName(), *VariableDIE);
// If the linkage name is different than the name, go ahead and output
// that as well into the name table.
if (GV->getLinkageName() != "" && GV->getName() != GV->getLinkageName())
DD->addAccelName(GV->getLinkageName(), *VariableDIE);
}
return VariableDIE;
}
void DwarfCompileUnit::addRange(RangeSpan Range) {
bool SameAsPrevCU = this == DD->getPrevCU();
DD->setPrevCU(this);
// If we have no current ranges just add the range and return, otherwise,
// check the current section and CU against the previous section and CU we
// emitted into and the subprogram was contained within. If these are the
// same then extend our current range, otherwise add this as a new range.
if (CURanges.empty() || !SameAsPrevCU ||
(&CURanges.back().getEnd()->getSection() !=
&Range.getEnd()->getSection())) {
CURanges.push_back(Range);
return;
}
CURanges.back().setEnd(Range.getEnd());
}
DIE::value_iterator
DwarfCompileUnit::addSectionLabel(DIE &Die, dwarf::Attribute Attribute,
const MCSymbol *Label, const MCSymbol *Sec) {
if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
return addLabel(Die, Attribute,
DD->getDwarfVersion() >= 4 ? dwarf::DW_FORM_sec_offset
: dwarf::DW_FORM_data4,
Label);
return addSectionDelta(Die, Attribute, Label, Sec);
}
void DwarfCompileUnit::initStmtList() {
// Define start line table label for each Compile Unit.
MCSymbol *LineTableStartSym =
Asm->OutStreamer->getDwarfLineTableSymbol(getUniqueID());
// DW_AT_stmt_list is a offset of line number information for this
// compile unit in debug_line section. For split dwarf this is
// left in the skeleton CU and so not included.
// The line table entries are not always emitted in assembly, so it
// is not okay to use line_table_start here.
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
StmtListValue =
addSectionLabel(UnitDie, dwarf::DW_AT_stmt_list, LineTableStartSym,
TLOF.getDwarfLineSection()->getBeginSymbol());
}
void DwarfCompileUnit::applyStmtList(DIE &D) {
D.addValue(DIEValueAllocator, *StmtListValue);
}
void DwarfCompileUnit::attachLowHighPC(DIE &D, const MCSymbol *Begin,
const MCSymbol *End) {
assert(Begin && "Begin label should not be null!");
assert(End && "End label should not be null!");
assert(Begin->isDefined() && "Invalid starting label");
assert(End->isDefined() && "Invalid end label");
addLabelAddress(D, dwarf::DW_AT_low_pc, Begin);
if (DD->getDwarfVersion() < 4)
addLabelAddress(D, dwarf::DW_AT_high_pc, End);
else
addLabelDelta(D, dwarf::DW_AT_high_pc, End, Begin);
}
// Find DIE for the given subprogram and attach appropriate DW_AT_low_pc
// and DW_AT_high_pc attributes. If there are global variables in this
// scope then create and insert DIEs for these variables.
DIE &DwarfCompileUnit::updateSubprogramScopeDIE(const DISubprogram *SP) {
DIE *SPDie = getOrCreateSubprogramDIE(SP, includeMinimalInlineScopes());
attachLowHighPC(*SPDie, Asm->getFunctionBegin(), Asm->getFunctionEnd());
if (!DD->getCurrentFunction()->getTarget().Options.DisableFramePointerElim(
*DD->getCurrentFunction()))
addFlag(*SPDie, dwarf::DW_AT_APPLE_omit_frame_ptr);
// Only include DW_AT_frame_base in full debug info
if (!includeMinimalInlineScopes()) {
const TargetRegisterInfo *RI = Asm->MF->getSubtarget().getRegisterInfo();
MachineLocation Location(RI->getFrameRegister(*Asm->MF));
if (RI->isPhysicalRegister(Location.getReg()))
addAddress(*SPDie, dwarf::DW_AT_frame_base, Location);
}
// Add name to the name table, we do this here because we're guaranteed
// to have concrete versions of our DW_TAG_subprogram nodes.
DD->addSubprogramNames(SP, *SPDie);
return *SPDie;
}
// Construct a DIE for this scope.
void DwarfCompileUnit::constructScopeDIE(
LexicalScope *Scope, SmallVectorImpl<DIE *> &FinalChildren) {
if (!Scope || !Scope->getScopeNode())
return;
auto *DS = Scope->getScopeNode();
assert((Scope->getInlinedAt() || !isa<DISubprogram>(DS)) &&
"Only handle inlined subprograms here, use "
"constructSubprogramScopeDIE for non-inlined "
"subprograms");
SmallVector<DIE *, 8> Children;
// We try to create the scope DIE first, then the children DIEs. This will
// avoid creating un-used children then removing them later when we find out
// the scope DIE is null.
DIE *ScopeDIE;
if (Scope->getParent() && isa<DISubprogram>(DS)) {
ScopeDIE = constructInlinedScopeDIE(Scope);
if (!ScopeDIE)
return;
// We create children when the scope DIE is not null.
createScopeChildrenDIE(Scope, Children);
} else {
// Early exit when we know the scope DIE is going to be null.
if (DD->isLexicalScopeDIENull(Scope))
return;
unsigned ChildScopeCount;
// We create children here when we know the scope DIE is not going to be
// null and the children will be added to the scope DIE.
createScopeChildrenDIE(Scope, Children, &ChildScopeCount);
// Skip imported directives in gmlt-like data.
if (!includeMinimalInlineScopes()) {
// There is no need to emit empty lexical block DIE.
for (const auto &E : DD->findImportedEntitiesForScope(DS))
Children.push_back(
constructImportedEntityDIE(cast<DIImportedEntity>(E.second)));
}
// If there are only other scopes as children, put them directly in the
// parent instead, as this scope would serve no purpose.
if (Children.size() == ChildScopeCount) {
FinalChildren.insert(FinalChildren.end(),
std::make_move_iterator(Children.begin()),
std::make_move_iterator(Children.end()));
return;
}
ScopeDIE = constructLexicalScopeDIE(Scope);
assert(ScopeDIE && "Scope DIE should not be null.");
}
// Add children
for (auto &I : Children)
ScopeDIE->addChild(std::move(I));
FinalChildren.push_back(std::move(ScopeDIE));
}
DIE::value_iterator
DwarfCompileUnit::addSectionDelta(DIE &Die, dwarf::Attribute Attribute,
const MCSymbol *Hi, const MCSymbol *Lo) {
return Die.addValue(DIEValueAllocator, Attribute,
DD->getDwarfVersion() >= 4 ? dwarf::DW_FORM_sec_offset
: dwarf::DW_FORM_data4,
new (DIEValueAllocator) DIEDelta(Hi, Lo));
}
void DwarfCompileUnit::addScopeRangeList(DIE &ScopeDIE,
SmallVector<RangeSpan, 2> Range) {
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
// Emit offset in .debug_range as a relocatable label. emitDIE will handle
// emitting it appropriately.
const MCSymbol *RangeSectionSym =
TLOF.getDwarfRangesSection()->getBeginSymbol();
RangeSpanList List(Asm->createTempSymbol("debug_ranges"), std::move(Range));
// Under fission, ranges are specified by constant offsets relative to the
// CU's DW_AT_GNU_ranges_base.
if (isDwoUnit())
addSectionDelta(ScopeDIE, dwarf::DW_AT_ranges, List.getSym(),
RangeSectionSym);
else
addSectionLabel(ScopeDIE, dwarf::DW_AT_ranges, List.getSym(),
RangeSectionSym);
// Add the range list to the set of ranges to be emitted.
(Skeleton ? Skeleton : this)->CURangeLists.push_back(std::move(List));
}
void DwarfCompileUnit::attachRangesOrLowHighPC(
DIE &Die, SmallVector<RangeSpan, 2> Ranges) {
if (Ranges.size() == 1) {
const auto &single = Ranges.front();
attachLowHighPC(Die, single.getStart(), single.getEnd());
} else
addScopeRangeList(Die, std::move(Ranges));
}
void DwarfCompileUnit::attachRangesOrLowHighPC(
DIE &Die, const SmallVectorImpl<InsnRange> &Ranges) {
SmallVector<RangeSpan, 2> List;
List.reserve(Ranges.size());
for (const InsnRange &R : Ranges)
List.push_back(RangeSpan(DD->getLabelBeforeInsn(R.first),
DD->getLabelAfterInsn(R.second)));
attachRangesOrLowHighPC(Die, std::move(List));
}
// This scope represents inlined body of a function. Construct DIE to
// represent this concrete inlined copy of the function.
DIE *DwarfCompileUnit::constructInlinedScopeDIE(LexicalScope *Scope) {
assert(Scope->getScopeNode());
auto *DS = Scope->getScopeNode();
auto *InlinedSP = getDISubprogram(DS);
// Find the subprogram's DwarfCompileUnit in the SPMap in case the subprogram
// was inlined from another compile unit.
DIE *OriginDIE = DU->getAbstractSPDies()[InlinedSP];
assert(OriginDIE && "Unable to find original DIE for an inlined subprogram.");
auto ScopeDIE = DIE::get(DIEValueAllocator, dwarf::DW_TAG_inlined_subroutine);
addDIEEntry(*ScopeDIE, dwarf::DW_AT_abstract_origin, *OriginDIE);
attachRangesOrLowHighPC(*ScopeDIE, Scope->getRanges());
// Add the call site information to the DIE.
const DILocation *IA = Scope->getInlinedAt();
addUInt(*ScopeDIE, dwarf::DW_AT_call_file, None,
getOrCreateSourceID(IA->getFilename(), IA->getDirectory()));
addUInt(*ScopeDIE, dwarf::DW_AT_call_line, None, IA->getLine());
// Add name to the name table, we do this here because we're guaranteed
// to have concrete versions of our DW_TAG_inlined_subprogram nodes.
DD->addSubprogramNames(InlinedSP, *ScopeDIE);
return ScopeDIE;
}
// Construct new DW_TAG_lexical_block for this scope and attach
// DW_AT_low_pc/DW_AT_high_pc labels.
DIE *DwarfCompileUnit::constructLexicalScopeDIE(LexicalScope *Scope) {
if (DD->isLexicalScopeDIENull(Scope))
return nullptr;
auto ScopeDIE = DIE::get(DIEValueAllocator, dwarf::DW_TAG_lexical_block);
if (Scope->isAbstractScope())
return ScopeDIE;
attachRangesOrLowHighPC(*ScopeDIE, Scope->getRanges());
return ScopeDIE;
}
/// constructVariableDIE - Construct a DIE for the given DbgVariable.
DIE *DwarfCompileUnit::constructVariableDIE(DbgVariable &DV, bool Abstract) {
auto D = constructVariableDIEImpl(DV, Abstract);
DV.setDIE(*D);
return D;
}
DIE *DwarfCompileUnit::constructVariableDIEImpl(const DbgVariable &DV,
bool Abstract) {
// Define variable debug information entry.
auto VariableDie = DIE::get(DIEValueAllocator, DV.getTag());
if (Abstract) {
applyVariableAttributes(DV, *VariableDie);
return VariableDie;
}
// Add variable address.
unsigned Offset = DV.getDebugLocListIndex();
if (Offset != ~0U) {
addLocationList(*VariableDie, dwarf::DW_AT_location, Offset);
return VariableDie;
}
// Check if variable is described by a DBG_VALUE instruction.
if (const MachineInstr *DVInsn = DV.getMInsn()) {
assert(DVInsn->getNumOperands() == 4);
if (DVInsn->getOperand(0).isReg()) {
const MachineOperand RegOp = DVInsn->getOperand(0);
// If the second operand is an immediate, this is an indirect value.
if (DVInsn->getOperand(1).isImm()) {
MachineLocation Location(RegOp.getReg(),
DVInsn->getOperand(1).getImm());
addVariableAddress(DV, *VariableDie, Location);
} else if (RegOp.getReg())
addVariableAddress(DV, *VariableDie, MachineLocation(RegOp.getReg()));
} else if (DVInsn->getOperand(0).isImm())
addConstantValue(*VariableDie, DVInsn->getOperand(0), DV.getType());
else if (DVInsn->getOperand(0).isFPImm())
addConstantFPValue(*VariableDie, DVInsn->getOperand(0));
else if (DVInsn->getOperand(0).isCImm())
addConstantValue(*VariableDie, DVInsn->getOperand(0).getCImm(),
DV.getType());
return VariableDie;
}
// .. else use frame index.
if (DV.getFrameIndex().empty())
return VariableDie;
auto Expr = DV.getExpression().begin();
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc);
for (auto FI : DV.getFrameIndex()) {
unsigned FrameReg = 0;
const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering();
int Offset = TFI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
assert(Expr != DV.getExpression().end() &&
"Wrong number of expressions");
DwarfExpr.AddMachineRegIndirect(FrameReg, Offset);
DwarfExpr.AddExpression((*Expr)->expr_op_begin(), (*Expr)->expr_op_end());
++Expr;
}
addBlock(*VariableDie, dwarf::DW_AT_location, Loc);
return VariableDie;
}
DIE *DwarfCompileUnit::constructVariableDIE(DbgVariable &DV,
const LexicalScope &Scope,
DIE *&ObjectPointer) {
auto Var = constructVariableDIE(DV, Scope.isAbstractScope());
if (DV.isObjectPointer())
ObjectPointer = Var;
return Var;
}
DIE *DwarfCompileUnit::createScopeChildrenDIE(LexicalScope *Scope,
SmallVectorImpl<DIE *> &Children,
unsigned *ChildScopeCount) {
DIE *ObjectPointer = nullptr;
for (DbgVariable *DV : DU->getScopeVariables().lookup(Scope))
Children.push_back(constructVariableDIE(*DV, *Scope, ObjectPointer));
unsigned ChildCountWithoutScopes = Children.size();
for (LexicalScope *LS : Scope->getChildren())
constructScopeDIE(LS, Children);
if (ChildScopeCount)
*ChildScopeCount = Children.size() - ChildCountWithoutScopes;
return ObjectPointer;
}
void DwarfCompileUnit::constructSubprogramScopeDIE(LexicalScope *Scope) {
assert(Scope && Scope->getScopeNode());
assert(!Scope->getInlinedAt());
assert(!Scope->isAbstractScope());
auto *Sub = cast<DISubprogram>(Scope->getScopeNode());
DD->getProcessedSPNodes().insert(Sub);
DIE &ScopeDIE = updateSubprogramScopeDIE(Sub);
// If this is a variadic function, add an unspecified parameter.
DITypeRefArray FnArgs = Sub->getType()->getTypeArray();
// Collect lexical scope children first.
// ObjectPointer might be a local (non-argument) local variable if it's a
// block's synthetic this pointer.
if (DIE *ObjectPointer = createAndAddScopeChildren(Scope, ScopeDIE))
addDIEEntry(ScopeDIE, dwarf::DW_AT_object_pointer, *ObjectPointer);
// If we have a single element of null, it is a function that returns void.
// If we have more than one elements and the last one is null, it is a
// variadic function.
if (FnArgs.size() > 1 && !FnArgs[FnArgs.size() - 1] &&
!includeMinimalInlineScopes())
ScopeDIE.addChild(
DIE::get(DIEValueAllocator, dwarf::DW_TAG_unspecified_parameters));
}
DIE *DwarfCompileUnit::createAndAddScopeChildren(LexicalScope *Scope,
DIE &ScopeDIE) {
// We create children when the scope DIE is not null.
SmallVector<DIE *, 8> Children;
DIE *ObjectPointer = createScopeChildrenDIE(Scope, Children);
// Add children
for (auto &I : Children)
ScopeDIE.addChild(std::move(I));
return ObjectPointer;
}
void
DwarfCompileUnit::constructAbstractSubprogramScopeDIE(LexicalScope *Scope) {
DIE *&AbsDef = DU->getAbstractSPDies()[Scope->getScopeNode()];
if (AbsDef)
return;
auto *SP = cast<DISubprogram>(Scope->getScopeNode());
DIE *ContextDIE;
if (includeMinimalInlineScopes())
ContextDIE = &getUnitDie();
// Some of this is duplicated from DwarfUnit::getOrCreateSubprogramDIE, with
// the important distinction that the debug node is not associated with the
// DIE (since the debug node will be associated with the concrete DIE, if
// any). It could be refactored to some common utility function.
else if (auto *SPDecl = SP->getDeclaration()) {
ContextDIE = &getUnitDie();
getOrCreateSubprogramDIE(SPDecl);
} else
ContextDIE = getOrCreateContextDIE(resolve(SP->getScope()));
// Passing null as the associated node because the abstract definition
// shouldn't be found by lookup.
AbsDef = &createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr);
applySubprogramAttributesToDefinition(SP, *AbsDef);
if (!includeMinimalInlineScopes())
addUInt(*AbsDef, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
if (DIE *ObjectPointer = createAndAddScopeChildren(Scope, *AbsDef))
addDIEEntry(*AbsDef, dwarf::DW_AT_object_pointer, *ObjectPointer);
}
DIE *DwarfCompileUnit::constructImportedEntityDIE(
const DIImportedEntity *Module) {
DIE *IMDie = DIE::get(DIEValueAllocator, (dwarf::Tag)Module->getTag());
insertDIE(Module, IMDie);
DIE *EntityDie;
auto *Entity = resolve(Module->getEntity());
if (auto *NS = dyn_cast<DINamespace>(Entity))
EntityDie = getOrCreateNameSpace(NS);
else if (auto *M = dyn_cast<DIModule>(Entity))
EntityDie = getOrCreateModule(M);
else if (auto *SP = dyn_cast<DISubprogram>(Entity))
EntityDie = getOrCreateSubprogramDIE(SP);
else if (auto *T = dyn_cast<DIType>(Entity))
EntityDie = getOrCreateTypeDIE(T);
else if (auto *GV = dyn_cast<DIGlobalVariable>(Entity))
EntityDie = getOrCreateGlobalVariableDIE(GV);
else
EntityDie = getDIE(Entity);
assert(EntityDie);
addSourceLine(*IMDie, Module->getLine(), Module->getScope()->getFilename(),
Module->getScope()->getDirectory());
addDIEEntry(*IMDie, dwarf::DW_AT_import, *EntityDie);
StringRef Name = Module->getName();
if (!Name.empty())
addString(*IMDie, dwarf::DW_AT_name, Name);
return IMDie;
}
void DwarfCompileUnit::finishSubprogramDefinition(const DISubprogram *SP) {
DIE *D = getDIE(SP);
if (DIE *AbsSPDIE = DU->getAbstractSPDies().lookup(SP)) {
if (D)
// If this subprogram has an abstract definition, reference that
addDIEEntry(*D, dwarf::DW_AT_abstract_origin, *AbsSPDIE);
} else {
if (!D && !includeMinimalInlineScopes())
// Lazily construct the subprogram if we didn't see either concrete or
// inlined versions during codegen. (except in -gmlt ^ where we want
// to omit these entirely)
D = getOrCreateSubprogramDIE(SP);
if (D)
// And attach the attributes
applySubprogramAttributesToDefinition(SP, *D);
}
}
void DwarfCompileUnit::collectDeadVariables(const DISubprogram *SP) {
assert(SP && "CU's subprogram list contains a non-subprogram");
assert(SP->isDefinition() &&
"CU's subprogram list contains a subprogram declaration");
auto Variables = SP->getVariables();
if (Variables.size() == 0)
return;
DIE *SPDIE = DU->getAbstractSPDies().lookup(SP);
if (!SPDIE)
SPDIE = getDIE(SP);
assert(SPDIE);
for (const DILocalVariable *DV : Variables) {
DbgVariable NewVar(DV, /* IA */ nullptr, DD);
auto VariableDie = constructVariableDIE(NewVar);
applyVariableAttributes(NewVar, *VariableDie);
SPDIE->addChild(std::move(VariableDie));
}
}
void DwarfCompileUnit::emitHeader(bool UseOffsets) {
// Don't bother labeling the .dwo unit, as its offset isn't used.
if (!Skeleton) {
LabelBegin = Asm->createTempSymbol("cu_begin");
Asm->OutStreamer->EmitLabel(LabelBegin);
}
DwarfUnit::emitHeader(UseOffsets);
}
/// addGlobalName - Add a new global name to the compile unit.
void DwarfCompileUnit::addGlobalName(StringRef Name, DIE &Die,
const DIScope *Context) {
if (includeMinimalInlineScopes())
return;
std::string FullName = getParentContextString(Context) + Name.str();
GlobalNames[FullName] = &Die;
}
/// Add a new global type to the unit.
void DwarfCompileUnit::addGlobalType(const DIType *Ty, const DIE &Die,
const DIScope *Context) {
if (includeMinimalInlineScopes())
return;
std::string FullName = getParentContextString(Context) + Ty->getName().str();
GlobalTypes[FullName] = &Die;
}
/// addVariableAddress - Add DW_AT_location attribute for a
/// DbgVariable based on provided MachineLocation.
void DwarfCompileUnit::addVariableAddress(const DbgVariable &DV, DIE &Die,
MachineLocation Location) {
if (DV.hasComplexAddress())
addComplexAddress(DV, Die, dwarf::DW_AT_location, Location);
else if (DV.isBlockByrefVariable())
addBlockByrefAddress(DV, Die, dwarf::DW_AT_location, Location);
else
addAddress(Die, dwarf::DW_AT_location, Location);
}
/// Add an address attribute to a die based on the location provided.
void DwarfCompileUnit::addAddress(DIE &Die, dwarf::Attribute Attribute,
const MachineLocation &Location) {
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
bool validReg;
if (Location.isReg())
validReg = addRegisterOpPiece(*Loc, Location.getReg());
else
validReg = addRegisterOffset(*Loc, Location.getReg(), Location.getOffset());
if (!validReg)
return;
// Now attach the location information to the DIE.
addBlock(Die, Attribute, Loc);
}
/// Start with the address based on the location provided, and generate the
/// DWARF information necessary to find the actual variable given the extra
/// address information encoded in the DbgVariable, starting from the starting
/// location. Add the DWARF information to the die.
void DwarfCompileUnit::addComplexAddress(const DbgVariable &DV, DIE &Die,
dwarf::Attribute Attribute,
const MachineLocation &Location) {
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc);
assert(DV.getExpression().size() == 1);
const DIExpression *Expr = DV.getExpression().back();
bool ValidReg;
if (Location.getOffset()) {
ValidReg = DwarfExpr.AddMachineRegIndirect(Location.getReg(),
Location.getOffset());
if (ValidReg)
DwarfExpr.AddExpression(Expr->expr_op_begin(), Expr->expr_op_end());
} else
ValidReg = DwarfExpr.AddMachineRegExpression(Expr, Location.getReg());
// Now attach the location information to the DIE.
if (ValidReg)
addBlock(Die, Attribute, Loc);
}
/// Add a Dwarf loclistptr attribute data and value.
void DwarfCompileUnit::addLocationList(DIE &Die, dwarf::Attribute Attribute,
unsigned Index) {
dwarf::Form Form = DD->getDwarfVersion() >= 4 ? dwarf::DW_FORM_sec_offset
: dwarf::DW_FORM_data4;
Die.addValue(DIEValueAllocator, Attribute, Form, DIELocList(Index));
}
void DwarfCompileUnit::applyVariableAttributes(const DbgVariable &Var,
DIE &VariableDie) {
StringRef Name = Var.getName();
if (!Name.empty())
addString(VariableDie, dwarf::DW_AT_name, Name);
addSourceLine(VariableDie, Var.getVariable());
addType(VariableDie, Var.getType());
if (Var.isArtificial())
addFlag(VariableDie, dwarf::DW_AT_artificial);
}
/// Add a Dwarf expression attribute data and value.
void DwarfCompileUnit::addExpr(DIELoc &Die, dwarf::Form Form,
const MCExpr *Expr) {
Die.addValue(DIEValueAllocator, (dwarf::Attribute)0, Form, DIEExpr(Expr));
}
void DwarfCompileUnit::applySubprogramAttributesToDefinition(
const DISubprogram *SP, DIE &SPDie) {
auto *SPDecl = SP->getDeclaration();
auto *Context = resolve(SPDecl ? SPDecl->getScope() : SP->getScope());
applySubprogramAttributes(SP, SPDie, includeMinimalInlineScopes());
addGlobalName(SP->getName(), SPDie, Context);
}
bool DwarfCompileUnit::isDwoUnit() const {
return DD->useSplitDwarf() && Skeleton;
}
bool DwarfCompileUnit::includeMinimalInlineScopes() const {
return getCUNode()->getEmissionKind() == DIBuilder::LineTablesOnly ||
(DD->useSplitDwarf() && !Skeleton);
}
} // end llvm namespace
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp | //=-- llvm/CodeGen/DwarfAccelTable.cpp - Dwarf Accelerator Tables -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing dwarf accelerator tables.
//
//===----------------------------------------------------------------------===//
#include "DwarfAccelTable.h"
#include "DwarfCompileUnit.h"
#include "DwarfDebug.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/DIE.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
// The length of the header data is always going to be 4 + 4 + 4*NumAtoms.
DwarfAccelTable::DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom> atomList)
: Header(8 + (atomList.size() * 4)), HeaderData(atomList),
Entries(Allocator) {}
void DwarfAccelTable::AddName(DwarfStringPoolEntryRef Name, const DIE *die,
char Flags) {
assert(Data.empty() && "Already finalized!");
// If the string is in the list already then add this die to the list
// otherwise add a new one.
DataArray &DIEs = Entries[Name.getString()];
assert(!DIEs.Name || DIEs.Name == Name);
DIEs.Name = Name;
DIEs.Values.push_back(new (Allocator) HashDataContents(die, Flags));
}
void DwarfAccelTable::ComputeBucketCount(void) {
// First get the number of unique hashes.
std::vector<uint32_t> uniques(Data.size());
for (size_t i = 0, e = Data.size(); i < e; ++i)
uniques[i] = Data[i]->HashValue;
array_pod_sort(uniques.begin(), uniques.end());
std::vector<uint32_t>::iterator p =
std::unique(uniques.begin(), uniques.end());
uint32_t num = std::distance(uniques.begin(), p);
// Then compute the bucket size, minimum of 1 bucket.
if (num > 1024)
Header.bucket_count = num / 4;
else if (num > 16)
Header.bucket_count = num / 2;
else
Header.bucket_count = num > 0 ? num : 1;
Header.hashes_count = num;
}
// compareDIEs - comparison predicate that sorts DIEs by their offset.
static bool compareDIEs(const DwarfAccelTable::HashDataContents *A,
const DwarfAccelTable::HashDataContents *B) {
return A->Die->getOffset() < B->Die->getOffset();
}
void DwarfAccelTable::FinalizeTable(AsmPrinter *Asm, StringRef Prefix) {
// Create the individual hash data outputs.
Data.reserve(Entries.size());
for (StringMap<DataArray>::iterator EI = Entries.begin(), EE = Entries.end();
EI != EE; ++EI) {
// Unique the entries.
std::stable_sort(EI->second.Values.begin(), EI->second.Values.end(), compareDIEs);
EI->second.Values.erase(
std::unique(EI->second.Values.begin(), EI->second.Values.end()),
EI->second.Values.end());
HashData *Entry = new (Allocator) HashData(EI->getKey(), EI->second);
Data.push_back(Entry);
}
// Figure out how many buckets we need, then compute the bucket
// contents and the final ordering. We'll emit the hashes and offsets
// by doing a walk during the emission phase. We add temporary
// symbols to the data so that we can reference them during the offset
// later, we'll emit them when we emit the data.
ComputeBucketCount();
// Compute bucket contents and final ordering.
Buckets.resize(Header.bucket_count);
for (size_t i = 0, e = Data.size(); i < e; ++i) {
uint32_t bucket = Data[i]->HashValue % Header.bucket_count;
Buckets[bucket].push_back(Data[i]);
Data[i]->Sym = Asm->createTempSymbol(Prefix);
}
// Sort the contents of the buckets by hash value so that hash
// collisions end up together. Stable sort makes testing easier and
// doesn't cost much more.
for (size_t i = 0; i < Buckets.size(); ++i)
std::stable_sort(Buckets[i].begin(), Buckets[i].end(),
[] (HashData *LHS, HashData *RHS) {
return LHS->HashValue < RHS->HashValue;
});
}
// Emits the header for the table via the AsmPrinter.
void DwarfAccelTable::EmitHeader(AsmPrinter *Asm) {
Asm->OutStreamer->AddComment("Header Magic");
Asm->EmitInt32(Header.magic);
Asm->OutStreamer->AddComment("Header Version");
Asm->EmitInt16(Header.version);
Asm->OutStreamer->AddComment("Header Hash Function");
Asm->EmitInt16(Header.hash_function);
Asm->OutStreamer->AddComment("Header Bucket Count");
Asm->EmitInt32(Header.bucket_count);
Asm->OutStreamer->AddComment("Header Hash Count");
Asm->EmitInt32(Header.hashes_count);
Asm->OutStreamer->AddComment("Header Data Length");
Asm->EmitInt32(Header.header_data_len);
Asm->OutStreamer->AddComment("HeaderData Die Offset Base");
Asm->EmitInt32(HeaderData.die_offset_base);
Asm->OutStreamer->AddComment("HeaderData Atom Count");
Asm->EmitInt32(HeaderData.Atoms.size());
for (size_t i = 0; i < HeaderData.Atoms.size(); i++) {
Atom A = HeaderData.Atoms[i];
Asm->OutStreamer->AddComment(dwarf::AtomTypeString(A.type));
Asm->EmitInt16(A.type);
Asm->OutStreamer->AddComment(dwarf::FormEncodingString(A.form));
Asm->EmitInt16(A.form);
}
}
// Walk through and emit the buckets for the table. Each index is
// an offset into the list of hashes.
void DwarfAccelTable::EmitBuckets(AsmPrinter *Asm) {
unsigned index = 0;
for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
Asm->OutStreamer->AddComment("Bucket " + Twine(i));
if (Buckets[i].size() != 0)
Asm->EmitInt32(index);
else
Asm->EmitInt32(UINT32_MAX);
// Buckets point in the list of hashes, not to the data. Do not
// increment the index multiple times in case of hash collisions.
uint64_t PrevHash = UINT64_MAX;
for (auto *HD : Buckets[i]) {
uint32_t HashValue = HD->HashValue;
if (PrevHash != HashValue)
++index;
PrevHash = HashValue;
}
}
}
// Walk through the buckets and emit the individual hashes for each
// bucket.
void DwarfAccelTable::EmitHashes(AsmPrinter *Asm) {
uint64_t PrevHash = UINT64_MAX;
for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
for (HashList::const_iterator HI = Buckets[i].begin(),
HE = Buckets[i].end();
HI != HE; ++HI) {
uint32_t HashValue = (*HI)->HashValue;
if (PrevHash == HashValue)
continue;
Asm->OutStreamer->AddComment("Hash in Bucket " + Twine(i));
Asm->EmitInt32(HashValue);
PrevHash = HashValue;
}
}
}
// Walk through the buckets and emit the individual offsets for each
// element in each bucket. This is done via a symbol subtraction from the
// beginning of the section. The non-section symbol will be output later
// when we emit the actual data.
void DwarfAccelTable::emitOffsets(AsmPrinter *Asm, const MCSymbol *SecBegin) {
uint64_t PrevHash = UINT64_MAX;
for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
for (HashList::const_iterator HI = Buckets[i].begin(),
HE = Buckets[i].end();
HI != HE; ++HI) {
uint32_t HashValue = (*HI)->HashValue;
if (PrevHash == HashValue)
continue;
PrevHash = HashValue;
Asm->OutStreamer->AddComment("Offset in Bucket " + Twine(i));
MCContext &Context = Asm->OutStreamer->getContext();
const MCExpr *Sub = MCBinaryExpr::createSub(
MCSymbolRefExpr::create((*HI)->Sym, Context),
MCSymbolRefExpr::create(SecBegin, Context), Context);
Asm->OutStreamer->EmitValue(Sub, sizeof(uint32_t));
}
}
}
// Walk through the buckets and emit the full data for each element in
// the bucket. For the string case emit the dies and the various offsets.
// Terminate each HashData bucket with 0.
void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfDebug *D) {
for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
uint64_t PrevHash = UINT64_MAX;
for (HashList::const_iterator HI = Buckets[i].begin(),
HE = Buckets[i].end();
HI != HE; ++HI) {
// Terminate the previous entry if there is no hash collision
// with the current one.
if (PrevHash != UINT64_MAX && PrevHash != (*HI)->HashValue)
Asm->EmitInt32(0);
// Remember to emit the label for our offset.
Asm->OutStreamer->EmitLabel((*HI)->Sym);
Asm->OutStreamer->AddComment((*HI)->Str);
Asm->emitDwarfStringOffset((*HI)->Data.Name);
Asm->OutStreamer->AddComment("Num DIEs");
Asm->EmitInt32((*HI)->Data.Values.size());
for (HashDataContents *HD : (*HI)->Data.Values) {
// Emit the DIE offset
DwarfCompileUnit *CU = D->lookupUnit(HD->Die->getUnit());
assert(CU && "Accelerated DIE should belong to a CU.");
Asm->EmitInt32(HD->Die->getOffset() + CU->getDebugInfoOffset());
// If we have multiple Atoms emit that info too.
// FIXME: A bit of a hack, we either emit only one atom or all info.
if (HeaderData.Atoms.size() > 1) {
Asm->EmitInt16(HD->Die->getTag());
Asm->EmitInt8(HD->Flags);
}
}
PrevHash = (*HI)->HashValue;
}
// Emit the final end marker for the bucket.
if (!Buckets[i].empty())
Asm->EmitInt32(0);
}
}
// Emit the entire data structure to the output file.
void DwarfAccelTable::emit(AsmPrinter *Asm, const MCSymbol *SecBegin,
DwarfDebug *D) {
// Emit the header.
EmitHeader(Asm);
// Emit the buckets.
EmitBuckets(Asm);
// Emit the hashes.
EmitHashes(Asm);
// Emit the offsets.
emitOffsets(Asm, SecBegin);
// Emit the hash data.
EmitData(Asm, D);
}
#ifndef NDEBUG
void DwarfAccelTable::print(raw_ostream &O) {
Header.print(O);
HeaderData.print(O);
O << "Entries: \n";
for (StringMap<DataArray>::const_iterator EI = Entries.begin(),
EE = Entries.end();
EI != EE; ++EI) {
O << "Name: " << EI->getKeyData() << "\n";
for (HashDataContents *HD : EI->second.Values)
HD->print(O);
}
O << "Buckets and Hashes: \n";
for (size_t i = 0, e = Buckets.size(); i < e; ++i)
for (HashList::const_iterator HI = Buckets[i].begin(),
HE = Buckets[i].end();
HI != HE; ++HI)
(*HI)->print(O);
O << "Data: \n";
for (std::vector<HashData *>::const_iterator DI = Data.begin(),
DE = Data.end();
DI != DE; ++DI)
(*DI)->print(O);
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DebugLocStream.h | //===--- lib/CodeGen/DebugLocStream.h - DWARF debug_loc stream --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DEBUGLOCSTREAM_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_DEBUGLOCSTREAM_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "ByteStreamer.h"
namespace llvm {
class AsmPrinter;
class DbgVariable;
class DwarfCompileUnit;
class MachineInstr;
class MCSymbol;
/// \brief Byte stream of .debug_loc entries.
///
/// Stores a unified stream of .debug_loc entries. There's \a List for each
/// variable/inlined-at pair, and an \a Entry for each \a DebugLocEntry.
///
/// FIXME: Do we need all these temp symbols?
/// FIXME: Why not output directly to the output stream?
class DebugLocStream {
public:
struct List {
DwarfCompileUnit *CU;
MCSymbol *Label = nullptr;
size_t EntryOffset;
List(DwarfCompileUnit *CU, size_t EntryOffset)
: CU(CU), EntryOffset(EntryOffset) {}
};
struct Entry {
const MCSymbol *BeginSym;
const MCSymbol *EndSym;
size_t ByteOffset;
size_t CommentOffset;
Entry(const MCSymbol *BeginSym, const MCSymbol *EndSym, size_t ByteOffset,
size_t CommentOffset)
: BeginSym(BeginSym), EndSym(EndSym), ByteOffset(ByteOffset),
CommentOffset(CommentOffset) {}
};
private:
SmallVector<List, 4> Lists;
SmallVector<Entry, 32> Entries;
SmallString<256> DWARFBytes;
SmallVector<std::string, 32> Comments;
/// \brief Only verbose textual output needs comments. This will be set to
/// true for that case, and false otherwise.
bool GenerateComments;
public:
DebugLocStream(bool GenerateComments) : GenerateComments(GenerateComments) { }
size_t getNumLists() const { return Lists.size(); }
const List &getList(size_t LI) const { return Lists[LI]; }
ArrayRef<List> getLists() const { return Lists; }
class ListBuilder;
class EntryBuilder;
private:
/// \brief Start a new .debug_loc entry list.
///
/// Start a new .debug_loc entry list. Return the new list's index so it can
/// be retrieved later via \a getList().
///
/// Until the next call, \a startEntry() will add entries to this list.
size_t startList(DwarfCompileUnit *CU) {
size_t LI = Lists.size();
Lists.emplace_back(CU, Entries.size());
return LI;
}
/// Finalize a .debug_loc entry list.
///
/// If there are no entries in this list, delete it outright. Otherwise,
/// create a label with \a Asm.
///
/// \return false iff the list is deleted.
bool finalizeList(AsmPrinter &Asm);
/// \brief Start a new .debug_loc entry.
///
/// Until the next call, bytes added to the stream will be added to this
/// entry.
void startEntry(const MCSymbol *BeginSym, const MCSymbol *EndSym) {
Entries.emplace_back(BeginSym, EndSym, DWARFBytes.size(), Comments.size());
}
/// Finalize a .debug_loc entry, deleting if it's empty.
void finalizeEntry();
public:
BufferByteStreamer getStreamer() {
return BufferByteStreamer(DWARFBytes, Comments, GenerateComments);
}
ArrayRef<Entry> getEntries(const List &L) const {
size_t LI = getIndex(L);
return makeArrayRef(Entries)
.slice(Lists[LI].EntryOffset, getNumEntries(LI));
}
ArrayRef<char> getBytes(const Entry &E) const {
size_t EI = getIndex(E);
return makeArrayRef(DWARFBytes.begin(), DWARFBytes.end())
.slice(Entries[EI].ByteOffset, getNumBytes(EI));
}
ArrayRef<std::string> getComments(const Entry &E) const {
size_t EI = getIndex(E);
return makeArrayRef(Comments)
.slice(Entries[EI].CommentOffset, getNumComments(EI));
}
private:
size_t getIndex(const List &L) const {
assert(&Lists.front() <= &L && &L <= &Lists.back() &&
"Expected valid list");
return &L - &Lists.front();
}
size_t getIndex(const Entry &E) const {
assert(&Entries.front() <= &E && &E <= &Entries.back() &&
"Expected valid entry");
return &E - &Entries.front();
}
size_t getNumEntries(size_t LI) const {
if (LI + 1 == Lists.size())
return Entries.size() - Lists[LI].EntryOffset;
return Lists[LI + 1].EntryOffset - Lists[LI].EntryOffset;
}
size_t getNumBytes(size_t EI) const {
if (EI + 1 == Entries.size())
return DWARFBytes.size() - Entries[EI].ByteOffset;
return Entries[EI + 1].ByteOffset - Entries[EI].ByteOffset;
}
size_t getNumComments(size_t EI) const {
if (EI + 1 == Entries.size())
return Comments.size() - Entries[EI].CommentOffset;
return Entries[EI + 1].CommentOffset - Entries[EI].CommentOffset;
}
};
/// Builder for DebugLocStream lists.
class DebugLocStream::ListBuilder {
DebugLocStream &Locs;
AsmPrinter &Asm;
DbgVariable &V;
const MachineInstr &MI;
size_t ListIndex;
public:
ListBuilder(DebugLocStream &Locs, DwarfCompileUnit &CU, AsmPrinter &Asm,
DbgVariable &V, const MachineInstr &MI)
: Locs(Locs), Asm(Asm), V(V), MI(MI), ListIndex(Locs.startList(&CU)) {}
/// Finalize the list.
///
/// If the list is empty, delete it. Otherwise, finalize it by creating a
/// temp symbol in \a Asm and setting up the \a DbgVariable.
~ListBuilder();
DebugLocStream &getLocs() { return Locs; }
};
/// Builder for DebugLocStream entries.
class DebugLocStream::EntryBuilder {
DebugLocStream &Locs;
public:
EntryBuilder(ListBuilder &List, const MCSymbol *Begin, const MCSymbol *End)
: Locs(List.getLocs()) {
Locs.startEntry(Begin, End);
}
/// Finalize the entry, deleting it if it's empty.
~EntryBuilder() { Locs.finalizeEntry(); }
BufferByteStreamer getStreamer() { return Locs.getStreamer(); }
};
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfUnit.h | //===-- llvm/CodeGen/DwarfUnit.h - Dwarf Compile Unit ---*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing dwarf compile unit.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFUNIT_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFUNIT_H
#include "DwarfDebug.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/DIE.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSection.h"
namespace llvm {
class MachineLocation;
class MachineOperand;
class ConstantInt;
class ConstantFP;
class DbgVariable;
class DwarfCompileUnit;
// Data structure to hold a range for range lists.
class RangeSpan {
public:
RangeSpan(MCSymbol *S, MCSymbol *E) : Start(S), End(E) {}
const MCSymbol *getStart() const { return Start; }
const MCSymbol *getEnd() const { return End; }
void setEnd(const MCSymbol *E) { End = E; }
private:
const MCSymbol *Start, *End;
};
class RangeSpanList {
private:
// Index for locating within the debug_range section this particular span.
MCSymbol *RangeSym;
// List of ranges.
SmallVector<RangeSpan, 2> Ranges;
public:
RangeSpanList(MCSymbol *Sym, SmallVector<RangeSpan, 2> Ranges)
: RangeSym(Sym), Ranges(std::move(Ranges)) {}
MCSymbol *getSym() const { return RangeSym; }
const SmallVectorImpl<RangeSpan> &getRanges() const { return Ranges; }
void addRange(RangeSpan Range) { Ranges.push_back(Range); }
};
// //
///////////////////////////////////////////////////////////////////////////////
/// This dwarf writer support class manages information associated with a
/// source file.
class DwarfUnit {
protected:
/// A numeric ID unique among all CUs in the module
unsigned UniqueID;
/// MDNode for the compile unit.
const DICompileUnit *CUNode;
// All DIEValues are allocated through this allocator.
BumpPtrAllocator DIEValueAllocator;
/// Unit debug information entry.
DIE &UnitDie;
/// Offset of the UnitDie from beginning of debug info section.
unsigned DebugInfoOffset;
/// Target of Dwarf emission.
AsmPrinter *Asm;
// Holders for some common dwarf information.
DwarfDebug *DD;
DwarfFile *DU;
/// An anonymous type for index type. Owned by UnitDie.
DIE *IndexTyDie;
/// Tracks the mapping of unit level debug information variables to debug
/// information entries.
DenseMap<const MDNode *, DIE *> MDNodeToDieMap;
/// A list of all the DIEBlocks in use.
std::vector<DIEBlock *> DIEBlocks;
/// A list of all the DIELocs in use.
std::vector<DIELoc *> DIELocs;
/// This map is used to keep track of subprogram DIEs that need
/// DW_AT_containing_type attribute. This attribute points to a DIE that
/// corresponds to the MDNode mapped with the subprogram DIE.
DenseMap<DIE *, const DINode *> ContainingTypeMap;
/// The section this unit will be emitted in.
MCSection *Section;
DwarfUnit(unsigned UID, dwarf::Tag, const DICompileUnit *CU, AsmPrinter *A,
DwarfDebug *DW, DwarfFile *DWU);
/// Add a string attribute data and value.
///
/// This is guaranteed to be in the local string pool instead of indirected.
void addLocalString(DIE &Die, dwarf::Attribute Attribute, StringRef Str);
void addIndexedString(DIE &Die, dwarf::Attribute Attribute, StringRef Str);
bool applySubprogramDefinitionAttributes(const DISubprogram *SP, DIE &SPDie);
public:
virtual ~DwarfUnit();
void initSection(MCSection *Section);
MCSection *getSection() const {
assert(Section);
return Section;
}
// Accessors.
AsmPrinter* getAsmPrinter() const { return Asm; }
unsigned getUniqueID() const { return UniqueID; }
uint16_t getLanguage() const { return CUNode->getSourceLanguage(); }
const DICompileUnit *getCUNode() const { return CUNode; }
DIE &getUnitDie() { return UnitDie; }
unsigned getDebugInfoOffset() const { return DebugInfoOffset; }
void setDebugInfoOffset(unsigned DbgInfoOff) { DebugInfoOffset = DbgInfoOff; }
/// Return true if this compile unit has something to write out.
bool hasContent() const { return UnitDie.hasChildren(); }
/// Get string containing language specific context for a global name.
///
/// Walks the metadata parent chain in a language specific manner (using the
/// compile unit language) and returns it as a string. This is done at the
/// metadata level because DIEs may not currently have been added to the
/// parent context and walking the DIEs looking for names is more expensive
/// than walking the metadata.
std::string getParentContextString(const DIScope *Context) const;
/// Add a new global name to the compile unit.
virtual void addGlobalName(StringRef Name, DIE &Die, const DIScope *Context) {
}
/// Add a new global type to the compile unit.
virtual void addGlobalType(const DIType *Ty, const DIE &Die,
const DIScope *Context) {}
/// Add a new name to the namespace accelerator table.
void addAccelNamespace(StringRef Name, const DIE &Die);
/// Returns the DIE map slot for the specified debug variable.
///
/// We delegate the request to DwarfDebug when the MDNode can be part of the
/// type system, since DIEs for the type system can be shared across CUs and
/// the mappings are kept in DwarfDebug.
DIE *getDIE(const DINode *D) const;
/// Returns a fresh newly allocated DIELoc.
DIELoc *getDIELoc() { return new (DIEValueAllocator) DIELoc; }
/// Insert DIE into the map.
///
/// We delegate the request to DwarfDebug when the MDNode can be part of the
/// type system, since DIEs for the type system can be shared across CUs and
/// the mappings are kept in DwarfDebug.
void insertDIE(const DINode *Desc, DIE *D);
/// Add a flag that is true to the DIE.
void addFlag(DIE &Die, dwarf::Attribute Attribute);
/// Add an unsigned integer attribute data and value.
void addUInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
uint64_t Integer);
void addUInt(DIE &Block, dwarf::Form Form, uint64_t Integer);
/// Add an signed integer attribute data and value.
void addSInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
int64_t Integer);
void addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer);
/// Add a string attribute data and value.
///
/// We always emit a reference to the string pool instead of immediate
/// strings so that DIEs have more predictable sizes. In the case of split
/// dwarf we emit an index into another table which gets us the static offset
/// into the string table.
void addString(DIE &Die, dwarf::Attribute Attribute, StringRef Str);
/// Add a Dwarf label attribute data and value.
DIE::value_iterator addLabel(DIE &Die, dwarf::Attribute Attribute,
dwarf::Form Form, const MCSymbol *Label);
void addLabel(DIELoc &Die, dwarf::Form Form, const MCSymbol *Label);
/// Add an offset into a section attribute data and value.
void addSectionOffset(DIE &Die, dwarf::Attribute Attribute, uint64_t Integer);
/// Add a dwarf op address data and value using the form given and an
/// op of either DW_FORM_addr or DW_FORM_GNU_addr_index.
void addOpAddress(DIELoc &Die, const MCSymbol *Label);
/// Add a label delta attribute data and value.
void addLabelDelta(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Hi,
const MCSymbol *Lo);
/// Add a DIE attribute data and value.
void addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIE &Entry);
/// Add a DIE attribute data and value.
void addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIEEntry Entry);
void addDIETypeSignature(DIE &Die, const DwarfTypeUnit &Type);
/// Add block data.
void addBlock(DIE &Die, dwarf::Attribute Attribute, DIELoc *Block);
/// Add block data.
void addBlock(DIE &Die, dwarf::Attribute Attribute, DIEBlock *Block);
/// Add location information to specified debug information entry.
void addSourceLine(DIE &Die, unsigned Line, StringRef File,
StringRef Directory);
void addSourceLine(DIE &Die, const DILocalVariable *V);
void addSourceLine(DIE &Die, const DIGlobalVariable *G);
void addSourceLine(DIE &Die, const DISubprogram *SP);
void addSourceLine(DIE &Die, const DIType *Ty);
void addSourceLine(DIE &Die, const DINamespace *NS);
void addSourceLine(DIE &Die, const DIObjCProperty *Ty);
/// Add constant value entry in variable DIE.
void addConstantValue(DIE &Die, const MachineOperand &MO, const DIType *Ty);
void addConstantValue(DIE &Die, const ConstantInt *CI, const DIType *Ty);
void addConstantValue(DIE &Die, const APInt &Val, const DIType *Ty);
void addConstantValue(DIE &Die, const APInt &Val, bool Unsigned);
void addConstantValue(DIE &Die, bool Unsigned, uint64_t Val);
/// Add constant value entry in variable DIE.
void addConstantFPValue(DIE &Die, const MachineOperand &MO);
void addConstantFPValue(DIE &Die, const ConstantFP *CFP);
/// Add a linkage name, if it isn't empty.
void addLinkageName(DIE &Die, StringRef LinkageName);
/// Add template parameters in buffer.
void addTemplateParams(DIE &Buffer, DINodeArray TParams);
/// Add register operand.
/// \returns false if the register does not exist, e.g., because it was never
/// materialized.
bool addRegisterOpPiece(DIELoc &TheDie, unsigned Reg,
unsigned SizeInBits = 0, unsigned OffsetInBits = 0);
/// Add register offset.
/// \returns false if the register does not exist, e.g., because it was never
/// materialized.
bool addRegisterOffset(DIELoc &TheDie, unsigned Reg, int64_t Offset);
// FIXME: Should be reformulated in terms of addComplexAddress.
/// Start with the address based on the location provided, and generate the
/// DWARF information necessary to find the actual Block variable (navigating
/// the Block struct) based on the starting location. Add the DWARF
/// information to the die. Obsolete, please use addComplexAddress instead.
void addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
dwarf::Attribute Attribute,
const MachineLocation &Location);
/// Add a new type attribute to the specified entity.
///
/// This takes and attribute parameter because DW_AT_friend attributes are
/// also type references.
void addType(DIE &Entity, const DIType *Ty,
dwarf::Attribute Attribute = dwarf::DW_AT_type);
DIE *getOrCreateNameSpace(const DINamespace *NS);
DIE *getOrCreateModule(const DIModule *M);
DIE *getOrCreateSubprogramDIE(const DISubprogram *SP, bool Minimal = false);
void applySubprogramAttributes(const DISubprogram *SP, DIE &SPDie,
bool Minimal = false);
/// Find existing DIE or create new DIE for the given type.
DIE *getOrCreateTypeDIE(const MDNode *N);
/// Get context owner's DIE.
DIE *createTypeDIE(const DICompositeType *Ty);
/// Get context owner's DIE.
DIE *getOrCreateContextDIE(const DIScope *Context);
/// Construct DIEs for types that contain vtables.
void constructContainingTypeDIEs();
/// Construct function argument DIEs.
void constructSubprogramArguments(DIE &Buffer, DITypeRefArray Args);
/// Create a DIE with the given Tag, add the DIE to its parent, and
/// call insertDIE if MD is not null.
DIE &createAndAddDIE(unsigned Tag, DIE &Parent, const DINode *N = nullptr);
/// Compute the size of a header for this unit, not including the initial
/// length field.
virtual unsigned getHeaderSize() const {
return sizeof(int16_t) + // DWARF version number
sizeof(int32_t) + // Offset Into Abbrev. Section
sizeof(int8_t); // Pointer Size (in bytes)
}
/// Emit the header for this unit, not including the initial length field.
virtual void emitHeader(bool UseOffsets);
virtual DwarfCompileUnit &getCU() = 0;
void constructTypeDIE(DIE &Buffer, const DICompositeType *CTy);
protected:
/// Create new static data member DIE.
DIE *getOrCreateStaticMemberDIE(const DIDerivedType *DT);
/// Look up the source ID with the given directory and source file names. If
/// none currently exists, create a new ID and insert it in the line table.
virtual unsigned getOrCreateSourceID(StringRef File, StringRef Directory) = 0;
/// Look in the DwarfDebug map for the MDNode that corresponds to the
/// reference.
template <typename T> T *resolve(TypedDINodeRef<T> Ref) const {
return DD->resolve(Ref);
}
private:
void constructTypeDIE(DIE &Buffer, const DIBasicType *BTy);
void constructTypeDIE(DIE &Buffer, const DIDerivedType *DTy);
void constructTypeDIE(DIE &Buffer, const DISubroutineType *DTy);
void constructSubrangeDIE(DIE &Buffer, const DISubrange *SR, DIE *IndexTy);
void constructArrayTypeDIE(DIE &Buffer, const DICompositeType *CTy);
void constructEnumTypeDIE(DIE &Buffer, const DICompositeType *CTy);
void constructMemberDIE(DIE &Buffer, const DIDerivedType *DT);
void constructTemplateTypeParameterDIE(DIE &Buffer,
const DITemplateTypeParameter *TP);
void constructTemplateValueParameterDIE(DIE &Buffer,
const DITemplateValueParameter *TVP);
/// Return the default lower bound for an array.
///
/// If the DWARF version doesn't handle the language, return -1.
int64_t getDefaultLowerBound() const;
/// Get an anonymous type for index type.
DIE *getIndexTyDie();
/// Set D as anonymous type for index which can be reused later.
void setIndexTyDie(DIE *D) { IndexTyDie = D; }
/// If this is a named finished type then include it in the list of types for
/// the accelerator tables.
void updateAcceleratorTables(const DIScope *Context, const DIType *Ty,
const DIE &TyDIE);
virtual bool isDwoUnit() const = 0;
};
class DwarfTypeUnit : public DwarfUnit {
uint64_t TypeSignature;
const DIE *Ty;
DwarfCompileUnit &CU;
MCDwarfDwoLineTable *SplitLineTable;
unsigned getOrCreateSourceID(StringRef File, StringRef Directory) override;
bool isDwoUnit() const override;
public:
DwarfTypeUnit(unsigned UID, DwarfCompileUnit &CU, AsmPrinter *A,
DwarfDebug *DW, DwarfFile *DWU,
MCDwarfDwoLineTable *SplitLineTable = nullptr);
void setTypeSignature(uint64_t Signature) { TypeSignature = Signature; }
uint64_t getTypeSignature() const { return TypeSignature; }
void setType(const DIE *Ty) { this->Ty = Ty; }
/// Emit the header for this unit, not including the initial length field.
void emitHeader(bool UseOffsets) override;
unsigned getHeaderSize() const override {
return DwarfUnit::getHeaderSize() + sizeof(uint64_t) + // Type Signature
sizeof(uint32_t); // Type DIE Offset
}
DwarfCompileUnit &getCU() override { return CU; }
};
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfFile.h | //===-- llvm/CodeGen/DwarfFile.h - Dwarf Debug Framework -------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFFILE_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFFILE_H
#include "AddressPool.h"
#include "DwarfStringPool.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class AsmPrinter;
class DbgVariable;
class DwarfUnit;
class DIEAbbrev;
class MCSymbol;
class DIE;
class LexicalScope;
class StringRef;
class DwarfDebug;
class MCSection;
class MDNode;
class DwarfFile {
// Target of Dwarf emission, used for sizing of abbreviations.
AsmPrinter *Asm;
BumpPtrAllocator AbbrevAllocator;
// Used to uniquely define abbreviations.
FoldingSet<DIEAbbrev> AbbreviationsSet;
// A list of all the unique abbreviations in use.
std::vector<DIEAbbrev *> Abbreviations;
// A pointer to all units in the section.
SmallVector<std::unique_ptr<DwarfUnit>, 1> CUs;
DwarfStringPool StrPool;
// Collection of dbg variables of a scope.
DenseMap<LexicalScope *, SmallVector<DbgVariable *, 8>> ScopeVariables;
// Collection of abstract subprogram DIEs.
DenseMap<const MDNode *, DIE *> AbstractSPDies;
/// Maps MDNodes for type system with the corresponding DIEs. These DIEs can
/// be shared across CUs, that is why we keep the map here instead
/// of in DwarfCompileUnit.
DenseMap<const MDNode *, DIE *> DITypeNodeToDieMap;
public:
DwarfFile(AsmPrinter *AP, StringRef Pref, BumpPtrAllocator &DA);
~DwarfFile();
const SmallVectorImpl<std::unique_ptr<DwarfUnit>> &getUnits() { return CUs; }
/// \brief Compute the size and offset of a DIE given an incoming Offset.
unsigned computeSizeAndOffset(DIE &Die, unsigned Offset);
/// \brief Compute the size and offset of all the DIEs.
void computeSizeAndOffsets();
/// Define a unique number for the abbreviation.
///
/// Compute the abbreviation for \c Die, look up its unique number, and
/// return a reference to it in the uniquing table.
DIEAbbrev &assignAbbrevNumber(DIE &Die);
/// \brief Add a unit to the list of CUs.
void addUnit(std::unique_ptr<DwarfUnit> U);
/// \brief Emit all of the units to the section listed with the given
/// abbreviation section.
void emitUnits(bool UseOffsets);
/// \brief Emit a set of abbreviations to the specific section.
void emitAbbrevs(MCSection *);
/// \brief Emit all of the strings to the section given.
void emitStrings(MCSection *StrSection, MCSection *OffsetSection = nullptr);
/// \brief Returns the string pool.
DwarfStringPool &getStringPool() { return StrPool; }
/// \returns false if the variable was merged with a previous one.
bool addScopeVariable(LexicalScope *LS, DbgVariable *Var);
DenseMap<LexicalScope *, SmallVector<DbgVariable *, 8>> &getScopeVariables() {
return ScopeVariables;
}
DenseMap<const MDNode *, DIE *> &getAbstractSPDies() {
return AbstractSPDies;
}
void insertDIE(const MDNode *TypeMD, DIE *Die) {
DITypeNodeToDieMap.insert(std::make_pair(TypeMD, Die));
}
DIE *getDIE(const MDNode *TypeMD) {
return DITypeNodeToDieMap.lookup(TypeMD);
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfStringPool.cpp | //===-- llvm/CodeGen/DwarfStringPool.cpp - Dwarf Debug Framework ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "DwarfStringPool.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
using namespace llvm;
DwarfStringPool::DwarfStringPool(BumpPtrAllocator &A, AsmPrinter &Asm,
StringRef Prefix)
: Pool(A), Prefix(Prefix),
ShouldCreateSymbols(Asm.MAI->doesDwarfUseRelocationsAcrossSections()) {}
DwarfStringPool::EntryRef DwarfStringPool::getEntry(AsmPrinter &Asm,
StringRef Str) {
auto I = Pool.insert(std::make_pair(Str, EntryTy()));
if (I.second) {
auto &Entry = I.first->second;
Entry.Index = Pool.size() - 1;
Entry.Offset = NumBytes;
Entry.Symbol = ShouldCreateSymbols ? Asm.createTempSymbol(Prefix) : nullptr;
NumBytes += Str.size() + 1;
assert(NumBytes > Entry.Offset && "Unexpected overflow");
}
return EntryRef(*I.first);
}
void DwarfStringPool::emit(AsmPrinter &Asm, MCSection *StrSection,
MCSection *OffsetSection) {
if (Pool.empty())
return;
// Start the dwarf str section.
Asm.OutStreamer->SwitchSection(StrSection);
// Get all of the string pool entries and put them in an array by their ID so
// we can sort them.
SmallVector<const StringMapEntry<EntryTy> *, 64> Entries(Pool.size());
for (const auto &E : Pool)
Entries[E.getValue().Index] = &E;
for (const auto &Entry : Entries) {
assert(ShouldCreateSymbols == static_cast<bool>(Entry->getValue().Symbol) &&
"Mismatch between setting and entry");
// Emit a label for reference from debug information entries.
if (ShouldCreateSymbols)
Asm.OutStreamer->EmitLabel(Entry->getValue().Symbol);
// Emit the string itself with a terminating null byte.
Asm.OutStreamer->AddComment("string offset=" +
Twine(Entry->getValue().Offset));
Asm.OutStreamer->EmitBytes(
StringRef(Entry->getKeyData(), Entry->getKeyLength() + 1));
}
// If we've got an offset section go ahead and emit that now as well.
if (OffsetSection) {
Asm.OutStreamer->SwitchSection(OffsetSection);
unsigned size = 4; // FIXME: DWARF64 is 8.
for (const auto &Entry : Entries)
Asm.OutStreamer->EmitIntValue(Entry->getValue().Offset, size);
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DIE.cpp | //===--- lib/CodeGen/DIE.cpp - DWARF Info Entries -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Data structures for DWARF info entries.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/DIE.h"
#include "DwarfCompileUnit.h"
#include "DwarfDebug.h"
#include "DwarfUnit.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
// DIEAbbrevData Implementation
//===----------------------------------------------------------------------===//
/// Profile - Used to gather unique data for the abbreviation folding set.
///
void DIEAbbrevData::Profile(FoldingSetNodeID &ID) const {
// Explicitly cast to an integer type for which FoldingSetNodeID has
// overloads. Otherwise MSVC 2010 thinks this call is ambiguous.
ID.AddInteger(unsigned(Attribute));
ID.AddInteger(unsigned(Form));
}
//===----------------------------------------------------------------------===//
// DIEAbbrev Implementation
//===----------------------------------------------------------------------===//
/// Profile - Used to gather unique data for the abbreviation folding set.
///
void DIEAbbrev::Profile(FoldingSetNodeID &ID) const {
ID.AddInteger(unsigned(Tag));
ID.AddInteger(unsigned(Children));
// For each attribute description.
for (unsigned i = 0, N = Data.size(); i < N; ++i)
Data[i].Profile(ID);
}
/// Emit - Print the abbreviation using the specified asm printer.
///
void DIEAbbrev::Emit(const AsmPrinter *AP) const {
// Emit its Dwarf tag type.
AP->EmitULEB128(Tag, dwarf::TagString(Tag));
// Emit whether it has children DIEs.
AP->EmitULEB128((unsigned)Children, dwarf::ChildrenString(Children));
// For each attribute description.
for (unsigned i = 0, N = Data.size(); i < N; ++i) {
const DIEAbbrevData &AttrData = Data[i];
// Emit attribute type.
AP->EmitULEB128(AttrData.getAttribute(),
dwarf::AttributeString(AttrData.getAttribute()));
// Emit form type.
AP->EmitULEB128(AttrData.getForm(),
dwarf::FormEncodingString(AttrData.getForm()));
}
// Mark end of abbreviation.
AP->EmitULEB128(0, "EOM(1)");
AP->EmitULEB128(0, "EOM(2)");
}
#ifndef NDEBUG
void DIEAbbrev::print(raw_ostream &O) {
O << "Abbreviation @"
<< format("0x%lx", (long)(intptr_t)this)
<< " "
<< dwarf::TagString(Tag)
<< " "
<< dwarf::ChildrenString(Children)
<< '\n';
for (unsigned i = 0, N = Data.size(); i < N; ++i) {
O << " "
<< dwarf::AttributeString(Data[i].getAttribute())
<< " "
<< dwarf::FormEncodingString(Data[i].getForm())
<< '\n';
}
}
void DIEAbbrev::dump() { print(dbgs()); }
#endif
DIEAbbrev DIE::generateAbbrev() const {
DIEAbbrev Abbrev(Tag, hasChildren());
for (const DIEValue &V : Values)
Abbrev.AddAttribute(V.getAttribute(), V.getForm());
return Abbrev;
}
/// Climb up the parent chain to get the unit DIE to which this DIE
/// belongs.
const DIE *DIE::getUnit() const {
const DIE *Cu = getUnitOrNull();
assert(Cu && "We should not have orphaned DIEs.");
return Cu;
}
/// Climb up the parent chain to get the unit DIE this DIE belongs
/// to. Return NULL if DIE is not added to an owner yet.
const DIE *DIE::getUnitOrNull() const {
const DIE *p = this;
while (p) {
if (p->getTag() == dwarf::DW_TAG_compile_unit ||
p->getTag() == dwarf::DW_TAG_type_unit)
return p;
p = p->getParent();
}
return nullptr;
}
DIEValue DIE::findAttribute(dwarf::Attribute Attribute) const {
// Iterate through all the attributes until we find the one we're
// looking for, if we can't find it return NULL.
for (const auto &V : values())
if (V.getAttribute() == Attribute)
return V;
return DIEValue();
}
#ifndef NDEBUG
void DIE::print(raw_ostream &O, unsigned IndentCount) const {
const std::string Indent(IndentCount, ' ');
bool isBlock = getTag() == 0;
if (!isBlock) {
O << Indent
<< "Die: "
<< format("0x%lx", (long)(intptr_t)this)
<< ", Offset: " << Offset
<< ", Size: " << Size << "\n";
O << Indent
<< dwarf::TagString(getTag())
<< " "
<< dwarf::ChildrenString(hasChildren()) << "\n";
} else {
O << "Size: " << Size << "\n";
}
IndentCount += 2;
unsigned I = 0;
for (const auto &V : Values) {
O << Indent;
if (!isBlock)
O << dwarf::AttributeString(V.getAttribute());
else
O << "Blk[" << I++ << "]";
O << " " << dwarf::FormEncodingString(V.getForm()) << " ";
V.print(O);
O << "\n";
}
IndentCount -= 2;
for (const auto &Child : children())
Child.print(O, IndentCount + 4);
if (!isBlock) O << "\n";
}
void DIE::dump() {
print(dbgs());
}
#endif
void DIEValue::EmitValue(const AsmPrinter *AP) const {
switch (Ty) {
case isNone:
llvm_unreachable("Expected valid DIEValue");
#define HANDLE_DIEVALUE(T) \
case is##T: \
getDIE##T().EmitValue(AP, Form); \
break;
#include "llvm/CodeGen/DIEValue.def"
}
}
unsigned DIEValue::SizeOf(const AsmPrinter *AP) const {
switch (Ty) {
case isNone:
llvm_unreachable("Expected valid DIEValue");
#define HANDLE_DIEVALUE(T) \
case is##T: \
return getDIE##T().SizeOf(AP, Form);
#include "llvm/CodeGen/DIEValue.def"
}
llvm_unreachable("Unknown DIE kind");
}
#ifndef NDEBUG
void DIEValue::print(raw_ostream &O) const {
switch (Ty) {
case isNone:
llvm_unreachable("Expected valid DIEValue");
#define HANDLE_DIEVALUE(T) \
case is##T: \
getDIE##T().print(O); \
break;
#include "llvm/CodeGen/DIEValue.def"
}
}
void DIEValue::dump() const {
print(dbgs());
}
#endif
//===----------------------------------------------------------------------===//
// DIEInteger Implementation
//===----------------------------------------------------------------------===//
/// EmitValue - Emit integer of appropriate size.
///
void DIEInteger::EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
unsigned Size = ~0U;
switch (Form) {
case dwarf::DW_FORM_flag_present:
// Emit something to keep the lines and comments in sync.
// FIXME: Is there a better way to do this?
Asm->OutStreamer->AddBlankLine();
return;
case dwarf::DW_FORM_flag: // Fall thru
case dwarf::DW_FORM_ref1: // Fall thru
case dwarf::DW_FORM_data1: Size = 1; break;
case dwarf::DW_FORM_ref2: // Fall thru
case dwarf::DW_FORM_data2: Size = 2; break;
case dwarf::DW_FORM_sec_offset: // Fall thru
case dwarf::DW_FORM_strp: // Fall thru
case dwarf::DW_FORM_ref4: // Fall thru
case dwarf::DW_FORM_data4: Size = 4; break;
case dwarf::DW_FORM_ref8: // Fall thru
case dwarf::DW_FORM_ref_sig8: // Fall thru
case dwarf::DW_FORM_data8: Size = 8; break;
case dwarf::DW_FORM_GNU_str_index: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_GNU_addr_index: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
case dwarf::DW_FORM_addr:
Size = Asm->getDataLayout().getPointerSize(); break;
case dwarf::DW_FORM_ref_addr:
Size = SizeOf(Asm, dwarf::DW_FORM_ref_addr);
break;
default: llvm_unreachable("DIE Value form not supported yet");
}
Asm->OutStreamer->EmitIntValue(Integer, Size);
}
/// SizeOf - Determine size of integer value in bytes.
///
unsigned DIEInteger::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_flag_present: return 0;
case dwarf::DW_FORM_flag: // Fall thru
case dwarf::DW_FORM_ref1: // Fall thru
case dwarf::DW_FORM_data1: return sizeof(int8_t);
case dwarf::DW_FORM_ref2: // Fall thru
case dwarf::DW_FORM_data2: return sizeof(int16_t);
case dwarf::DW_FORM_sec_offset: // Fall thru
case dwarf::DW_FORM_strp: // Fall thru
case dwarf::DW_FORM_ref4: // Fall thru
case dwarf::DW_FORM_data4: return sizeof(int32_t);
case dwarf::DW_FORM_ref8: // Fall thru
case dwarf::DW_FORM_ref_sig8: // Fall thru
case dwarf::DW_FORM_data8: return sizeof(int64_t);
case dwarf::DW_FORM_GNU_str_index: return getULEB128Size(Integer);
case dwarf::DW_FORM_GNU_addr_index: return getULEB128Size(Integer);
case dwarf::DW_FORM_udata: return getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return getSLEB128Size(Integer);
case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize();
case dwarf::DW_FORM_ref_addr:
if (AP->OutStreamer->getContext().getDwarfVersion() == 2)
return AP->getDataLayout().getPointerSize();
return sizeof(int32_t);
default: llvm_unreachable("DIE Value form not supported yet");
}
}
#ifndef NDEBUG
void DIEInteger::print(raw_ostream &O) const {
O << "Int: " << (int64_t)Integer << " 0x";
O.write_hex(Integer);
}
#endif
//===----------------------------------------------------------------------===//
// DIEExpr Implementation
//===----------------------------------------------------------------------===//
/// EmitValue - Emit expression value.
///
void DIEExpr::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
AP->OutStreamer->EmitValue(Expr, SizeOf(AP, Form));
}
/// SizeOf - Determine size of expression value in bytes.
///
unsigned DIEExpr::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_sec_offset) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getDataLayout().getPointerSize();
}
#ifndef NDEBUG
void DIEExpr::print(raw_ostream &O) const { O << "Expr: " << *Expr; }
#endif
//===----------------------------------------------------------------------===//
// DIELabel Implementation
//===----------------------------------------------------------------------===//
/// EmitValue - Emit label value.
///
void DIELabel::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
AP->EmitLabelReference(Label, SizeOf(AP, Form),
Form == dwarf::DW_FORM_strp ||
Form == dwarf::DW_FORM_sec_offset ||
Form == dwarf::DW_FORM_ref_addr);
}
/// SizeOf - Determine size of label value in bytes.
///
unsigned DIELabel::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_sec_offset) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getDataLayout().getPointerSize();
}
#ifndef NDEBUG
void DIELabel::print(raw_ostream &O) const { O << "Lbl: " << Label->getName(); }
#endif
//===----------------------------------------------------------------------===//
// DIEDelta Implementation
//===----------------------------------------------------------------------===//
/// EmitValue - Emit delta value.
///
void DIEDelta::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
AP->EmitLabelDifference(LabelHi, LabelLo, SizeOf(AP, Form));
}
/// SizeOf - Determine size of delta value in bytes.
///
unsigned DIEDelta::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_sec_offset) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getDataLayout().getPointerSize();
}
#ifndef NDEBUG
void DIEDelta::print(raw_ostream &O) const {
O << "Del: " << LabelHi->getName() << "-" << LabelLo->getName();
}
#endif
//===----------------------------------------------------------------------===//
// DIEString Implementation
//===----------------------------------------------------------------------===//
/// EmitValue - Emit string value.
///
void DIEString::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
assert(
(Form == dwarf::DW_FORM_strp || Form == dwarf::DW_FORM_GNU_str_index) &&
"Expected valid string form");
// Index of string in symbol table.
if (Form == dwarf::DW_FORM_GNU_str_index) {
DIEInteger(S.getIndex()).EmitValue(AP, Form);
return;
}
// Relocatable symbol.
assert(Form == dwarf::DW_FORM_strp);
if (AP->MAI->doesDwarfUseRelocationsAcrossSections()) {
DIELabel(S.getSymbol()).EmitValue(AP, Form);
return;
}
// Offset into symbol table.
DIEInteger(S.getOffset()).EmitValue(AP, Form);
}
/// SizeOf - Determine size of delta value in bytes.
///
unsigned DIEString::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
assert(
(Form == dwarf::DW_FORM_strp || Form == dwarf::DW_FORM_GNU_str_index) &&
"Expected valid string form");
// Index of string in symbol table.
if (Form == dwarf::DW_FORM_GNU_str_index)
return DIEInteger(S.getIndex()).SizeOf(AP, Form);
// Relocatable symbol.
if (AP->MAI->doesDwarfUseRelocationsAcrossSections())
return DIELabel(S.getSymbol()).SizeOf(AP, Form);
// Offset into symbol table.
return DIEInteger(S.getOffset()).SizeOf(AP, Form);
}
#ifndef NDEBUG
void DIEString::print(raw_ostream &O) const {
O << "String: " << S.getString();
}
#endif
//===----------------------------------------------------------------------===//
// DIEEntry Implementation
//===----------------------------------------------------------------------===//
/// EmitValue - Emit debug information entry offset.
///
void DIEEntry::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
if (Form == dwarf::DW_FORM_ref_addr) {
const DwarfDebug *DD = AP->getDwarfDebug();
unsigned Addr = Entry->getOffset();
assert(!DD->useSplitDwarf() && "TODO: dwo files can't have relocations.");
// For DW_FORM_ref_addr, output the offset from beginning of debug info
// section. Entry->getOffset() returns the offset from start of the
// compile unit.
DwarfCompileUnit *CU = DD->lookupUnit(Entry->getUnit());
assert(CU && "CUDie should belong to a CU.");
Addr += CU->getDebugInfoOffset();
if (AP->MAI->doesDwarfUseRelocationsAcrossSections())
AP->EmitLabelPlusOffset(CU->getSectionSym(), Addr,
DIEEntry::getRefAddrSize(AP));
else
AP->OutStreamer->EmitIntValue(Addr, DIEEntry::getRefAddrSize(AP));
} else
AP->EmitInt32(Entry->getOffset());
}
unsigned DIEEntry::getRefAddrSize(const AsmPrinter *AP) {
// DWARF4: References that use the attribute form DW_FORM_ref_addr are
// specified to be four bytes in the DWARF 32-bit format and eight bytes
// in the DWARF 64-bit format, while DWARF Version 2 specifies that such
// references have the same size as an address on the target system.
const DwarfDebug *DD = AP->getDwarfDebug();
assert(DD && "Expected Dwarf Debug info to be available");
if (DD->getDwarfVersion() == 2)
return AP->getDataLayout().getPointerSize();
return sizeof(int32_t);
}
#ifndef NDEBUG
void DIEEntry::print(raw_ostream &O) const {
O << format("Die: 0x%lx", (long)(intptr_t)&Entry);
}
#endif
//===----------------------------------------------------------------------===//
// DIETypeSignature Implementation
//===----------------------------------------------------------------------===//
void DIETypeSignature::EmitValue(const AsmPrinter *Asm,
dwarf::Form Form) const {
assert(Form == dwarf::DW_FORM_ref_sig8);
Asm->OutStreamer->EmitIntValue(Unit->getTypeSignature(), 8);
}
#ifndef NDEBUG
void DIETypeSignature::print(raw_ostream &O) const {
O << format("Type Unit: 0x%lx", Unit->getTypeSignature());
}
#endif
//===----------------------------------------------------------------------===//
// DIELoc Implementation
//===----------------------------------------------------------------------===//
/// ComputeSize - calculate the size of the location expression.
///
unsigned DIELoc::ComputeSize(const AsmPrinter *AP) const {
if (!Size) {
for (const auto &V : Values)
Size += V.SizeOf(AP);
}
return Size;
}
/// EmitValue - Emit location data.
///
void DIELoc::EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
switch (Form) {
default: llvm_unreachable("Improper form for block");
case dwarf::DW_FORM_block1: Asm->EmitInt8(Size); break;
case dwarf::DW_FORM_block2: Asm->EmitInt16(Size); break;
case dwarf::DW_FORM_block4: Asm->EmitInt32(Size); break;
case dwarf::DW_FORM_block:
case dwarf::DW_FORM_exprloc:
Asm->EmitULEB128(Size); break;
}
for (const auto &V : Values)
V.EmitValue(Asm);
}
/// SizeOf - Determine size of location data in bytes.
///
unsigned DIELoc::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
case dwarf::DW_FORM_block4: return Size + sizeof(int32_t);
case dwarf::DW_FORM_block:
case dwarf::DW_FORM_exprloc:
return Size + getULEB128Size(Size);
default: llvm_unreachable("Improper form for block");
}
}
#ifndef NDEBUG
void DIELoc::print(raw_ostream &O) const {
O << "ExprLoc: ";
DIE::print(O, 5);
}
#endif
//===----------------------------------------------------------------------===//
// DIEBlock Implementation
//===----------------------------------------------------------------------===//
/// ComputeSize - calculate the size of the block.
///
unsigned DIEBlock::ComputeSize(const AsmPrinter *AP) const {
if (!Size) {
for (const auto &V : Values)
Size += V.SizeOf(AP);
}
return Size;
}
/// EmitValue - Emit block data.
///
void DIEBlock::EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
switch (Form) {
default: llvm_unreachable("Improper form for block");
case dwarf::DW_FORM_block1: Asm->EmitInt8(Size); break;
case dwarf::DW_FORM_block2: Asm->EmitInt16(Size); break;
case dwarf::DW_FORM_block4: Asm->EmitInt32(Size); break;
case dwarf::DW_FORM_block: Asm->EmitULEB128(Size); break;
}
for (const auto &V : Values)
V.EmitValue(Asm);
}
/// SizeOf - Determine size of block data in bytes.
///
unsigned DIEBlock::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
case dwarf::DW_FORM_block4: return Size + sizeof(int32_t);
case dwarf::DW_FORM_block: return Size + getULEB128Size(Size);
default: llvm_unreachable("Improper form for block");
}
}
#ifndef NDEBUG
void DIEBlock::print(raw_ostream &O) const {
O << "Blk: ";
DIE::print(O, 5);
}
#endif
//===----------------------------------------------------------------------===//
// DIELocList Implementation
//===----------------------------------------------------------------------===//
unsigned DIELocList::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
if (Form == dwarf::DW_FORM_data4)
return 4;
if (Form == dwarf::DW_FORM_sec_offset)
return 4;
return AP->getDataLayout().getPointerSize();
}
/// EmitValue - Emit label value.
///
void DIELocList::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
DwarfDebug *DD = AP->getDwarfDebug();
MCSymbol *Label = DD->getDebugLocs().getList(Index).Label;
AP->emitDwarfSymbolReference(Label, /*ForceOffset*/ DD->useSplitDwarf());
}
#ifndef NDEBUG
void DIELocList::print(raw_ostream &O) const { O << "LocList: " << Index; }
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp | //===-- llvm/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "DbgValueHistoryCalculator.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <algorithm>
#include <map>
using namespace llvm;
#define DEBUG_TYPE "dwarfdebug"
// \brief If @MI is a DBG_VALUE with debug value described by a
// defined register, returns the number of this register.
// In the other case, returns 0.
static unsigned isDescribedByReg(const MachineInstr &MI) {
assert(MI.isDebugValue());
assert(MI.getNumOperands() == 4);
// If location of variable is described using a register (directly or
// indirecltly), this register is always a first operand.
return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
}
void DbgValueHistoryMap::startInstrRange(InlinedVariable Var,
const MachineInstr &MI) {
// Instruction range should start with a DBG_VALUE instruction for the
// variable.
assert(MI.isDebugValue() && "not a DBG_VALUE");
auto &Ranges = VarInstrRanges[Var];
if (!Ranges.empty() && Ranges.back().second == nullptr &&
Ranges.back().first->isIdenticalTo(&MI)) {
DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n"
<< "\t" << Ranges.back().first << "\t" << MI << "\n");
return;
}
Ranges.push_back(std::make_pair(&MI, nullptr));
}
void DbgValueHistoryMap::endInstrRange(InlinedVariable Var,
const MachineInstr &MI) {
auto &Ranges = VarInstrRanges[Var];
// Verify that the current instruction range is not yet closed.
assert(!Ranges.empty() && Ranges.back().second == nullptr);
// For now, instruction ranges are not allowed to cross basic block
// boundaries.
assert(Ranges.back().first->getParent() == MI.getParent());
Ranges.back().second = &MI;
}
unsigned DbgValueHistoryMap::getRegisterForVar(InlinedVariable Var) const {
const auto &I = VarInstrRanges.find(Var);
if (I == VarInstrRanges.end())
return 0;
const auto &Ranges = I->second;
if (Ranges.empty() || Ranges.back().second != nullptr)
return 0;
return isDescribedByReg(*Ranges.back().first);
}
namespace {
// Maps physreg numbers to the variables they describe.
typedef DbgValueHistoryMap::InlinedVariable InlinedVariable;
typedef std::map<unsigned, SmallVector<InlinedVariable, 1>> RegDescribedVarsMap;
}
// \brief Claim that @Var is not described by @RegNo anymore.
static void dropRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo,
InlinedVariable Var) {
const auto &I = RegVars.find(RegNo);
assert(RegNo != 0U && I != RegVars.end());
auto &VarSet = I->second;
const auto &VarPos = std::find(VarSet.begin(), VarSet.end(), Var);
assert(VarPos != VarSet.end());
VarSet.erase(VarPos);
// Don't keep empty sets in a map to keep it as small as possible.
if (VarSet.empty())
RegVars.erase(I);
}
// \brief Claim that @Var is now described by @RegNo.
static void addRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo,
InlinedVariable Var) {
assert(RegNo != 0U);
auto &VarSet = RegVars[RegNo];
assert(std::find(VarSet.begin(), VarSet.end(), Var) == VarSet.end());
VarSet.push_back(Var);
}
// \brief Terminate the location range for variables described by register at
// @I by inserting @ClobberingInstr to their history.
static void clobberRegisterUses(RegDescribedVarsMap &RegVars,
RegDescribedVarsMap::iterator I,
DbgValueHistoryMap &HistMap,
const MachineInstr &ClobberingInstr) {
// Iterate over all variables described by this register and add this
// instruction to their history, clobbering it.
for (const auto &Var : I->second)
HistMap.endInstrRange(Var, ClobberingInstr);
RegVars.erase(I);
}
// \brief Terminate the location range for variables described by register
// @RegNo by inserting @ClobberingInstr to their history.
static void clobberRegisterUses(RegDescribedVarsMap &RegVars, unsigned RegNo,
DbgValueHistoryMap &HistMap,
const MachineInstr &ClobberingInstr) {
const auto &I = RegVars.find(RegNo);
if (I == RegVars.end())
return;
clobberRegisterUses(RegVars, I, HistMap, ClobberingInstr);
}
// \brief Collect all registers clobbered by @MI and apply the functor
// @Func to their RegNo.
// @Func should be a functor with a void(unsigned) signature. We're
// not using std::function here for performance reasons. It has a
// small but measurable impact. By using a functor instead of a
// std::set& here, we can avoid the overhead of constructing
// temporaries in calculateDbgValueHistory, which has a significant
// performance impact.
template<typename Callable>
static void applyToClobberedRegisters(const MachineInstr &MI,
const TargetRegisterInfo *TRI,
Callable Func) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef() || !MO.getReg())
continue;
for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
Func(*AI);
}
}
// \brief Returns the first instruction in @MBB which corresponds to
// the function epilogue, or nullptr if @MBB doesn't contain an epilogue.
static const MachineInstr *getFirstEpilogueInst(const MachineBasicBlock &MBB) {
auto LastMI = MBB.getLastNonDebugInstr();
if (LastMI == MBB.end() || !LastMI->isReturn())
return nullptr;
// Assume that epilogue starts with instruction having the same debug location
// as the return instruction.
DebugLoc LastLoc = LastMI->getDebugLoc();
auto Res = LastMI;
for (MachineBasicBlock::const_reverse_iterator I(std::next(LastMI)),
E = MBB.rend();
I != E; ++I) {
if (I->getDebugLoc() != LastLoc)
return Res;
Res = &*I;
}
// If all instructions have the same debug location, assume whole MBB is
// an epilogue.
return MBB.begin();
}
// \brief Collect registers that are modified in the function body (their
// contents is changed outside of the prologue and epilogue).
static void collectChangingRegs(const MachineFunction *MF,
const TargetRegisterInfo *TRI,
BitVector &Regs) {
for (const auto &MBB : *MF) {
auto FirstEpilogueInst = getFirstEpilogueInst(MBB);
for (const auto &MI : MBB) {
if (&MI == FirstEpilogueInst)
break;
if (!MI.getFlag(MachineInstr::FrameSetup))
applyToClobberedRegisters(MI, TRI, [&](unsigned r) { Regs.set(r); });
}
}
}
void llvm::calculateDbgValueHistory(const MachineFunction *MF,
const TargetRegisterInfo *TRI,
DbgValueHistoryMap &Result) {
BitVector ChangingRegs(TRI->getNumRegs());
collectChangingRegs(MF, TRI, ChangingRegs);
RegDescribedVarsMap RegVars;
for (const auto &MBB : *MF) {
for (const auto &MI : MBB) {
if (!MI.isDebugValue()) {
// Not a DBG_VALUE instruction. It may clobber registers which describe
// some variables.
applyToClobberedRegisters(MI, TRI, [&](unsigned RegNo) {
if (ChangingRegs.test(RegNo))
clobberRegisterUses(RegVars, RegNo, Result, MI);
});
continue;
}
assert(MI.getNumOperands() > 1 && "Invalid DBG_VALUE instruction!");
// Use the base variable (without any DW_OP_piece expressions)
// as index into History. The full variables including the
// piece expressions are attached to the MI.
const DILocalVariable *RawVar = MI.getDebugVariable();
assert(RawVar->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
"Expected inlined-at fields to agree");
InlinedVariable Var(RawVar, MI.getDebugLoc()->getInlinedAt());
if (unsigned PrevReg = Result.getRegisterForVar(Var))
dropRegDescribedVar(RegVars, PrevReg, Var);
Result.startInstrRange(Var, MI);
if (unsigned NewReg = isDescribedByReg(MI))
addRegDescribedVar(RegVars, NewReg, Var);
}
// Make sure locations for register-described variables are valid only
// until the end of the basic block (unless it's the last basic block, in
// which case let their liveness run off to the end of the function).
if (!MBB.empty() && &MBB != &MF->back()) {
for (auto I = RegVars.begin(), E = RegVars.end(); I != E;) {
auto CurElem = I++; // CurElem can be erased below.
if (ChangingRegs.test(CurElem->first))
clobberRegisterUses(RegVars, CurElem, Result, MBB.back());
}
}
}
}
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DebugLocEntry.h | //===-- llvm/CodeGen/DebugLocEntry.h - Entry in debug_loc list -*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DEBUGLOCENTRY_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_DEBUGLOCENTRY_H
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
namespace llvm {
class AsmPrinter;
class DebugLocStream;
/// \brief This struct describes location entries emitted in the .debug_loc
/// section.
class DebugLocEntry {
/// Begin and end symbols for the address range that this location is valid.
const MCSymbol *Begin;
const MCSymbol *End;
public:
/// \brief A single location or constant.
struct Value {
Value(const DIExpression *Expr, int64_t i)
: Expression(Expr), EntryKind(E_Integer) {
Constant.Int = i;
}
Value(const DIExpression *Expr, const ConstantFP *CFP)
: Expression(Expr), EntryKind(E_ConstantFP) {
Constant.CFP = CFP;
}
Value(const DIExpression *Expr, const ConstantInt *CIP)
: Expression(Expr), EntryKind(E_ConstantInt) {
Constant.CIP = CIP;
}
Value(const DIExpression *Expr, MachineLocation Loc)
: Expression(Expr), EntryKind(E_Location), Loc(Loc) {
assert(cast<DIExpression>(Expr)->isValid());
}
/// Any complex address location expression for this Value.
const DIExpression *Expression;
/// Type of entry that this represents.
enum EntryType { E_Location, E_Integer, E_ConstantFP, E_ConstantInt };
enum EntryType EntryKind;
/// Either a constant,
union {
int64_t Int;
const ConstantFP *CFP;
const ConstantInt *CIP;
} Constant;
// Or a location in the machine frame.
MachineLocation Loc;
bool isLocation() const { return EntryKind == E_Location; }
bool isInt() const { return EntryKind == E_Integer; }
bool isConstantFP() const { return EntryKind == E_ConstantFP; }
bool isConstantInt() const { return EntryKind == E_ConstantInt; }
int64_t getInt() const { return Constant.Int; }
const ConstantFP *getConstantFP() const { return Constant.CFP; }
const ConstantInt *getConstantInt() const { return Constant.CIP; }
MachineLocation getLoc() const { return Loc; }
bool isBitPiece() const { return getExpression()->isBitPiece(); }
const DIExpression *getExpression() const { return Expression; }
friend bool operator==(const Value &, const Value &);
friend bool operator<(const Value &, const Value &);
};
private:
/// A nonempty list of locations/constants belonging to this entry,
/// sorted by offset.
SmallVector<Value, 1> Values;
public:
DebugLocEntry(const MCSymbol *B, const MCSymbol *E, Value Val)
: Begin(B), End(E) {
Values.push_back(std::move(Val));
}
/// \brief If this and Next are describing different pieces of the same
/// variable, merge them by appending Next's values to the current
/// list of values.
/// Return true if the merge was successful.
bool MergeValues(const DebugLocEntry &Next) {
if (Begin == Next.Begin) {
auto *Expr = cast_or_null<DIExpression>(Values[0].Expression);
auto *NextExpr = cast_or_null<DIExpression>(Next.Values[0].Expression);
if (Expr->isBitPiece() && NextExpr->isBitPiece()) {
addValues(Next.Values);
End = Next.End;
return true;
}
}
return false;
}
/// \brief Attempt to merge this DebugLocEntry with Next and return
/// true if the merge was successful. Entries can be merged if they
/// share the same Loc/Constant and if Next immediately follows this
/// Entry.
bool MergeRanges(const DebugLocEntry &Next) {
// If this and Next are describing the same variable, merge them.
if ((End == Next.Begin && Values == Next.Values)) {
End = Next.End;
return true;
}
return false;
}
const MCSymbol *getBeginSym() const { return Begin; }
const MCSymbol *getEndSym() const { return End; }
ArrayRef<Value> getValues() const { return Values; }
void addValues(ArrayRef<DebugLocEntry::Value> Vals) {
Values.append(Vals.begin(), Vals.end());
sortUniqueValues();
assert(std::all_of(Values.begin(), Values.end(), [](DebugLocEntry::Value V){
return V.isBitPiece();
}) && "value must be a piece");
}
// \brief Sort the pieces by offset.
// Remove any duplicate entries by dropping all but the first.
void sortUniqueValues() {
std::sort(Values.begin(), Values.end());
Values.erase(
std::unique(
Values.begin(), Values.end(), [](const Value &A, const Value &B) {
return A.getExpression() == B.getExpression();
}),
Values.end());
}
/// \brief Lower this entry into a DWARF expression.
void finalize(const AsmPrinter &AP, DebugLocStream::ListBuilder &List,
const DIBasicType *BT);
};
/// \brief Compare two Values for equality.
inline bool operator==(const DebugLocEntry::Value &A,
const DebugLocEntry::Value &B) {
if (A.EntryKind != B.EntryKind)
return false;
if (A.Expression != B.Expression)
return false;
switch (A.EntryKind) {
case DebugLocEntry::Value::E_Location:
return A.Loc == B.Loc;
case DebugLocEntry::Value::E_Integer:
return A.Constant.Int == B.Constant.Int;
case DebugLocEntry::Value::E_ConstantFP:
return A.Constant.CFP == B.Constant.CFP;
case DebugLocEntry::Value::E_ConstantInt:
return A.Constant.CIP == B.Constant.CIP;
}
llvm_unreachable("unhandled EntryKind");
}
/// \brief Compare two pieces based on their offset.
inline bool operator<(const DebugLocEntry::Value &A,
const DebugLocEntry::Value &B) {
return A.getExpression()->getBitPieceOffset() <
B.getExpression()->getBitPieceOffset();
}
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/CodeGen | repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/EHStreamer.cpp | //===-- CodeGen/AsmPrinter/EHStreamer.cpp - Exception Directive Streamer --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing exception info into assembly files.
//
//===----------------------------------------------------------------------===//
#include "EHStreamer.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
EHStreamer::EHStreamer(AsmPrinter *A) : Asm(A), MMI(Asm->MMI) {}
EHStreamer::~EHStreamer() {}
/// How many leading type ids two landing pads have in common.
unsigned EHStreamer::sharedTypeIDs(const LandingPadInfo *L,
const LandingPadInfo *R) {
const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
unsigned LSize = LIds.size(), RSize = RIds.size();
unsigned MinSize = LSize < RSize ? LSize : RSize;
unsigned Count = 0;
for (; Count != MinSize; ++Count)
if (LIds[Count] != RIds[Count])
return Count;
return Count;
}
/// Compute the actions table and gather the first action index for each landing
/// pad site.
unsigned EHStreamer::
computeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
SmallVectorImpl<ActionEntry> &Actions,
SmallVectorImpl<unsigned> &FirstActions) {
// The action table follows the call-site table in the LSDA. The individual
// records are of two types:
//
// * Catch clause
// * Exception specification
//
// The two record kinds have the same format, with only small differences.
// They are distinguished by the "switch value" field: Catch clauses
// (TypeInfos) have strictly positive switch values, and exception
// specifications (FilterIds) have strictly negative switch values. Value 0
// indicates a catch-all clause.
//
// Negative type IDs index into FilterIds. Positive type IDs index into
// TypeInfos. The value written for a positive type ID is just the type ID
// itself. For a negative type ID, however, the value written is the
// (negative) byte offset of the corresponding FilterIds entry. The byte
// offset is usually equal to the type ID (because the FilterIds entries are
// written using a variable width encoding, which outputs one byte per entry
// as long as the value written is not too large) but can differ. This kind
// of complication does not occur for positive type IDs because type infos are
// output using a fixed width encoding. FilterOffsets[i] holds the byte
// offset corresponding to FilterIds[i].
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
SmallVector<int, 16> FilterOffsets;
FilterOffsets.reserve(FilterIds.size());
int Offset = -1;
for (std::vector<unsigned>::const_iterator
I = FilterIds.begin(), E = FilterIds.end(); I != E; ++I) {
FilterOffsets.push_back(Offset);
Offset -= getULEB128Size(*I);
}
FirstActions.reserve(LandingPads.size());
int FirstAction = 0;
unsigned SizeActions = 0;
const LandingPadInfo *PrevLPI = nullptr;
for (SmallVectorImpl<const LandingPadInfo *>::const_iterator
I = LandingPads.begin(), E = LandingPads.end(); I != E; ++I) {
const LandingPadInfo *LPI = *I;
const std::vector<int> &TypeIds = LPI->TypeIds;
unsigned NumShared = PrevLPI ? sharedTypeIDs(LPI, PrevLPI) : 0;
unsigned SizeSiteActions = 0;
if (NumShared < TypeIds.size()) {
unsigned SizeAction = 0;
unsigned PrevAction = (unsigned)-1;
if (NumShared) {
unsigned SizePrevIds = PrevLPI->TypeIds.size();
assert(Actions.size());
PrevAction = Actions.size() - 1;
SizeAction = getSLEB128Size(Actions[PrevAction].NextAction) +
getSLEB128Size(Actions[PrevAction].ValueForTypeID);
for (unsigned j = NumShared; j != SizePrevIds; ++j) {
assert(PrevAction != (unsigned)-1 && "PrevAction is invalid!");
SizeAction -= getSLEB128Size(Actions[PrevAction].ValueForTypeID);
SizeAction += -Actions[PrevAction].NextAction;
PrevAction = Actions[PrevAction].Previous;
}
}
// Compute the actions.
for (unsigned J = NumShared, M = TypeIds.size(); J != M; ++J) {
int TypeID = TypeIds[J];
assert(-1 - TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
int ValueForTypeID =
isFilterEHSelector(TypeID) ? FilterOffsets[-1 - TypeID] : TypeID;
unsigned SizeTypeID = getSLEB128Size(ValueForTypeID);
int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
SizeAction = SizeTypeID + getSLEB128Size(NextAction);
SizeSiteActions += SizeAction;
ActionEntry Action = { ValueForTypeID, NextAction, PrevAction };
Actions.push_back(Action);
PrevAction = Actions.size() - 1;
}
// Record the first action of the landing pad site.
FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
} // else identical - re-use previous FirstAction
// Information used when created the call-site table. The action record
// field of the call site record is the offset of the first associated
// action record, relative to the start of the actions table. This value is
// biased by 1 (1 indicating the start of the actions table), and 0
// indicates that there are no actions.
FirstActions.push_back(FirstAction);
// Compute this sites contribution to size.
SizeActions += SizeSiteActions;
PrevLPI = LPI;
}
return SizeActions;
}
/// Return `true' if this is a call to a function marked `nounwind'. Return
/// `false' otherwise.
bool EHStreamer::callToNoUnwindFunction(const MachineInstr *MI) {
assert(MI->isCall() && "This should be a call instruction!");
bool MarkedNoUnwind = false;
bool SawFunc = false;
for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI->getOperand(I);
if (!MO.isGlobal()) continue;
const Function *F = dyn_cast<Function>(MO.getGlobal());
if (!F) continue;
if (SawFunc) {
// Be conservative. If we have more than one function operand for this
// call, then we can't make the assumption that it's the callee and
// not a parameter to the call.
//
// FIXME: Determine if there's a way to say that `F' is the callee or
// parameter.
MarkedNoUnwind = false;
break;
}
MarkedNoUnwind = F->doesNotThrow();
SawFunc = true;
}
return MarkedNoUnwind;
}
void EHStreamer::computePadMap(
const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
RangeMapType &PadMap) {
// Invokes and nounwind calls have entries in PadMap (due to being bracketed
// by try-range labels when lowered). Ordinary calls do not, so appropriate
// try-ranges for them need be deduced so we can put them in the LSDA.
for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
const LandingPadInfo *LandingPad = LandingPads[i];
for (unsigned j = 0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
MCSymbol *BeginLabel = LandingPad->BeginLabels[j];
assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
PadRange P = { i, j };
PadMap[BeginLabel] = P;
}
}
}
/// Compute the call-site table. The entry for an invoke has a try-range
/// containing the call, a non-zero landing pad, and an appropriate action. The
/// entry for an ordinary call has a try-range containing the call and zero for
/// the landing pad and the action. Calls marked 'nounwind' have no entry and
/// must not be contained in the try-range of any entry - they form gaps in the
/// table. Entries must be ordered by try-range address.
void EHStreamer::
computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
const SmallVectorImpl<unsigned> &FirstActions) {
RangeMapType PadMap;
computePadMap(LandingPads, PadMap);
// The end label of the previous invoke or nounwind try-range.
MCSymbol *LastLabel = nullptr;
// Whether there is a potentially throwing instruction (currently this means
// an ordinary call) between the end of the previous try-range and now.
bool SawPotentiallyThrowing = false;
// Whether the last CallSite entry was for an invoke.
bool PreviousIsInvoke = false;
bool IsSJLJ = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
// Visit all instructions in order of address.
for (const auto &MBB : *Asm->MF) {
for (const auto &MI : MBB) {
if (!MI.isEHLabel()) {
if (MI.isCall())
SawPotentiallyThrowing |= !callToNoUnwindFunction(&MI);
continue;
}
// End of the previous try-range?
MCSymbol *BeginLabel = MI.getOperand(0).getMCSymbol();
if (BeginLabel == LastLabel)
SawPotentiallyThrowing = false;
// Beginning of a new try-range?
RangeMapType::const_iterator L = PadMap.find(BeginLabel);
if (L == PadMap.end())
// Nope, it was just some random label.
continue;
const PadRange &P = L->second;
const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
"Inconsistent landing pad map!");
// For Dwarf exception handling (SjLj handling doesn't use this). If some
// instruction between the previous try-range and this one may throw,
// create a call-site entry with no landing pad for the region between the
// try-ranges.
if (SawPotentiallyThrowing && Asm->MAI->usesCFIForEH()) {
CallSiteEntry Site = { LastLabel, BeginLabel, nullptr, 0 };
CallSites.push_back(Site);
PreviousIsInvoke = false;
}
LastLabel = LandingPad->EndLabels[P.RangeIndex];
assert(BeginLabel && LastLabel && "Invalid landing pad!");
if (!LandingPad->LandingPadLabel) {
// Create a gap.
PreviousIsInvoke = false;
} else {
// This try-range is for an invoke.
CallSiteEntry Site = {
BeginLabel,
LastLabel,
LandingPad,
FirstActions[P.PadIndex]
};
// Try to merge with the previous call-site. SJLJ doesn't do this
if (PreviousIsInvoke && !IsSJLJ) {
CallSiteEntry &Prev = CallSites.back();
if (Site.LPad == Prev.LPad && Site.Action == Prev.Action) {
// Extend the range of the previous entry.
Prev.EndLabel = Site.EndLabel;
continue;
}
}
// Otherwise, create a new call-site.
if (!IsSJLJ)
CallSites.push_back(Site);
else {
// SjLj EH must maintain the call sites in the order assigned
// to them by the SjLjPrepare pass.
unsigned SiteNo = MMI->getCallSiteBeginLabel(BeginLabel);
if (CallSites.size() < SiteNo)
CallSites.resize(SiteNo);
CallSites[SiteNo - 1] = Site;
}
PreviousIsInvoke = true;
}
}
}
// If some instruction between the previous try-range and the end of the
// function may throw, create a call-site entry with no landing pad for the
// region following the try-range.
if (SawPotentiallyThrowing && !IsSJLJ && LastLabel != nullptr) {
CallSiteEntry Site = { LastLabel, nullptr, nullptr, 0 };
CallSites.push_back(Site);
}
}
/// Emit landing pads and actions.
///
/// The general organization of the table is complex, but the basic concepts are
/// easy. First there is a header which describes the location and organization
/// of the three components that follow.
///
/// 1. The landing pad site information describes the range of code covered by
/// the try. In our case it's an accumulation of the ranges covered by the
/// invokes in the try. There is also a reference to the landing pad that
/// handles the exception once processed. Finally an index into the actions
/// table.
/// 2. The action table, in our case, is composed of pairs of type IDs and next
/// action offset. Starting with the action index from the landing pad
/// site, each type ID is checked for a match to the current exception. If
/// it matches then the exception and type id are passed on to the landing
/// pad. Otherwise the next action is looked up. This chain is terminated
/// with a next action of zero. If no type id is found then the frame is
/// unwound and handling continues.
/// 3. Type ID table contains references to all the C++ typeinfo for all
/// catches in the function. This tables is reverse indexed base 1.
void EHStreamer::emitExceptionTable() {
const std::vector<const GlobalValue *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
// Sort the landing pads in order of their type ids. This is used to fold
// duplicate actions.
SmallVector<const LandingPadInfo *, 64> LandingPads;
LandingPads.reserve(PadInfos.size());
for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
LandingPads.push_back(&PadInfos[i]);
// Order landing pads lexicographically by type id.
std::sort(LandingPads.begin(), LandingPads.end(),
[](const LandingPadInfo *L,
const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });
// Compute the actions table and gather the first action index for each
// landing pad site.
SmallVector<ActionEntry, 32> Actions;
SmallVector<unsigned, 64> FirstActions;
unsigned SizeActions =
computeActionsTable(LandingPads, Actions, FirstActions);
// Compute the call-site table.
SmallVector<CallSiteEntry, 64> CallSites;
computeCallSiteTable(CallSites, LandingPads, FirstActions);
// Final tallies.
// Call sites.
bool IsSJLJ = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
bool HaveTTData = IsSJLJ ? (!TypeInfos.empty() || !FilterIds.empty()) : true;
unsigned CallSiteTableLength;
if (IsSJLJ)
CallSiteTableLength = 0;
else {
unsigned SiteStartSize = 4; // dwarf::DW_EH_PE_udata4
unsigned SiteLengthSize = 4; // dwarf::DW_EH_PE_udata4
unsigned LandingPadSize = 4; // dwarf::DW_EH_PE_udata4
CallSiteTableLength =
CallSites.size() * (SiteStartSize + SiteLengthSize + LandingPadSize);
}
for (unsigned i = 0, e = CallSites.size(); i < e; ++i) {
CallSiteTableLength += getULEB128Size(CallSites[i].Action);
if (IsSJLJ)
CallSiteTableLength += getULEB128Size(i);
}
// Type infos.
MCSection *LSDASection = Asm->getObjFileLowering().getLSDASection();
unsigned TTypeEncoding;
unsigned TypeFormatSize;
if (!HaveTTData) {
// For SjLj exceptions, if there is no TypeInfo, then we just explicitly say
// that we're omitting that bit.
TTypeEncoding = dwarf::DW_EH_PE_omit;
// dwarf::DW_EH_PE_absptr
TypeFormatSize = Asm->getDataLayout().getPointerSize();
} else {
// Okay, we have actual filters or typeinfos to emit. As such, we need to
// pick a type encoding for them. We're about to emit a list of pointers to
// typeinfo objects at the end of the LSDA. However, unless we're in static
// mode, this reference will require a relocation by the dynamic linker.
//
// Because of this, we have a couple of options:
//
// 1) If we are in -static mode, we can always use an absolute reference
// from the LSDA, because the static linker will resolve it.
//
// 2) Otherwise, if the LSDA section is writable, we can output the direct
// reference to the typeinfo and allow the dynamic linker to relocate
// it. Since it is in a writable section, the dynamic linker won't
// have a problem.
//
// 3) Finally, if we're in PIC mode and the LDSA section isn't writable,
// we need to use some form of indirection. For example, on Darwin,
// we can output a statically-relocatable reference to a dyld stub. The
// offset to the stub is constant, but the contents are in a section
// that is updated by the dynamic linker. This is easy enough, but we
// need to tell the personality function of the unwinder to indirect
// through the dyld stub.
//
// FIXME: When (3) is actually implemented, we'll have to emit the stubs
// somewhere. This predicate should be moved to a shared location that is
// in target-independent code.
//
TTypeEncoding = Asm->getObjFileLowering().getTTypeEncoding();
TypeFormatSize = Asm->GetSizeOfEncodedValue(TTypeEncoding);
}
// Begin the exception table.
// Sometimes we want not to emit the data into separate section (e.g. ARM
// EHABI). In this case LSDASection will be NULL.
if (LSDASection)
Asm->OutStreamer->SwitchSection(LSDASection);
Asm->EmitAlignment(2);
// Emit the LSDA.
MCSymbol *GCCETSym =
Asm->OutContext.getOrCreateSymbol(Twine("GCC_except_table")+
Twine(Asm->getFunctionNumber()));
Asm->OutStreamer->EmitLabel(GCCETSym);
Asm->OutStreamer->EmitLabel(Asm->getCurExceptionSym());
// Emit the LSDA header.
Asm->EmitEncodingByte(dwarf::DW_EH_PE_omit, "@LPStart");
Asm->EmitEncodingByte(TTypeEncoding, "@TType");
// The type infos need to be aligned. GCC does this by inserting padding just
// before the type infos. However, this changes the size of the exception
// table, so you need to take this into account when you output the exception
// table size. However, the size is output using a variable length encoding.
// So by increasing the size by inserting padding, you may increase the number
// of bytes used for writing the size. If it increases, say by one byte, then
// you now need to output one less byte of padding to get the type infos
// aligned. However this decreases the size of the exception table. This
// changes the value you have to output for the exception table size. Due to
// the variable length encoding, the number of bytes used for writing the
// length may decrease. If so, you then have to increase the amount of
// padding. And so on. If you look carefully at the GCC code you will see that
// it indeed does this in a loop, going on and on until the values stabilize.
// We chose another solution: don't output padding inside the table like GCC
// does, instead output it before the table.
unsigned SizeTypes = TypeInfos.size() * TypeFormatSize;
unsigned CallSiteTableLengthSize = getULEB128Size(CallSiteTableLength);
unsigned TTypeBaseOffset =
sizeof(int8_t) + // Call site format
CallSiteTableLengthSize + // Call site table length size
CallSiteTableLength + // Call site table length
SizeActions + // Actions size
SizeTypes;
unsigned TTypeBaseOffsetSize = getULEB128Size(TTypeBaseOffset);
unsigned TotalSize =
sizeof(int8_t) + // LPStart format
sizeof(int8_t) + // TType format
(HaveTTData ? TTypeBaseOffsetSize : 0) + // TType base offset size
TTypeBaseOffset; // TType base offset
unsigned SizeAlign = (4 - TotalSize) & 3;
if (HaveTTData) {
// Account for any extra padding that will be added to the call site table
// length.
Asm->EmitULEB128(TTypeBaseOffset, "@TType base offset", SizeAlign);
SizeAlign = 0;
}
bool VerboseAsm = Asm->OutStreamer->isVerboseAsm();
// SjLj Exception handling
if (IsSJLJ) {
Asm->EmitEncodingByte(dwarf::DW_EH_PE_udata4, "Call site");
// Add extra padding if it wasn't added to the TType base offset.
Asm->EmitULEB128(CallSiteTableLength, "Call site table length", SizeAlign);
// Emit the landing pad site information.
unsigned idx = 0;
for (SmallVectorImpl<CallSiteEntry>::const_iterator
I = CallSites.begin(), E = CallSites.end(); I != E; ++I, ++idx) {
const CallSiteEntry &S = *I;
// Offset of the landing pad, counted in 16-byte bundles relative to the
// @LPStart address.
if (VerboseAsm) {
Asm->OutStreamer->AddComment(">> Call Site " + Twine(idx) + " <<");
Asm->OutStreamer->AddComment(" On exception at call site "+Twine(idx));
}
Asm->EmitULEB128(idx);
// Offset of the first associated action record, relative to the start of
// the action table. This value is biased by 1 (1 indicates the start of
// the action table), and 0 indicates that there are no actions.
if (VerboseAsm) {
if (S.Action == 0)
Asm->OutStreamer->AddComment(" Action: cleanup");
else
Asm->OutStreamer->AddComment(" Action: " +
Twine((S.Action - 1) / 2 + 1));
}
Asm->EmitULEB128(S.Action);
}
} else {
// Itanium LSDA exception handling
// The call-site table is a list of all call sites that may throw an
// exception (including C++ 'throw' statements) in the procedure
// fragment. It immediately follows the LSDA header. Each entry indicates,
// for a given call, the first corresponding action record and corresponding
// landing pad.
//
// The table begins with the number of bytes, stored as an LEB128
// compressed, unsigned integer. The records immediately follow the record
// count. They are sorted in increasing call-site address. Each record
// indicates:
//
// * The position of the call-site.
// * The position of the landing pad.
// * The first action record for that call site.
//
// A missing entry in the call-site table indicates that a call is not
// supposed to throw.
// Emit the landing pad call site table.
Asm->EmitEncodingByte(dwarf::DW_EH_PE_udata4, "Call site");
// Add extra padding if it wasn't added to the TType base offset.
Asm->EmitULEB128(CallSiteTableLength, "Call site table length", SizeAlign);
unsigned Entry = 0;
for (SmallVectorImpl<CallSiteEntry>::const_iterator
I = CallSites.begin(), E = CallSites.end(); I != E; ++I) {
const CallSiteEntry &S = *I;
MCSymbol *EHFuncBeginSym = Asm->getFunctionBegin();
MCSymbol *BeginLabel = S.BeginLabel;
if (!BeginLabel)
BeginLabel = EHFuncBeginSym;
MCSymbol *EndLabel = S.EndLabel;
if (!EndLabel)
EndLabel = Asm->getFunctionEnd();
// Offset of the call site relative to the previous call site, counted in
// number of 16-byte bundles. The first call site is counted relative to
// the start of the procedure fragment.
if (VerboseAsm)
Asm->OutStreamer->AddComment(">> Call Site " + Twine(++Entry) + " <<");
Asm->EmitLabelDifference(BeginLabel, EHFuncBeginSym, 4);
if (VerboseAsm)
Asm->OutStreamer->AddComment(Twine(" Call between ") +
BeginLabel->getName() + " and " +
EndLabel->getName());
Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);
// Offset of the landing pad, counted in 16-byte bundles relative to the
// @LPStart address.
if (!S.LPad) {
if (VerboseAsm)
Asm->OutStreamer->AddComment(" has no landing pad");
Asm->OutStreamer->EmitIntValue(0, 4/*size*/);
} else {
if (VerboseAsm)
Asm->OutStreamer->AddComment(Twine(" jumps to ") +
S.LPad->LandingPadLabel->getName());
Asm->EmitLabelDifference(S.LPad->LandingPadLabel, EHFuncBeginSym, 4);
}
// Offset of the first associated action record, relative to the start of
// the action table. This value is biased by 1 (1 indicates the start of
// the action table), and 0 indicates that there are no actions.
if (VerboseAsm) {
if (S.Action == 0)
Asm->OutStreamer->AddComment(" On action: cleanup");
else
Asm->OutStreamer->AddComment(" On action: " +
Twine((S.Action - 1) / 2 + 1));
}
Asm->EmitULEB128(S.Action);
}
}
// Emit the Action Table.
int Entry = 0;
for (SmallVectorImpl<ActionEntry>::const_iterator
I = Actions.begin(), E = Actions.end(); I != E; ++I) {
const ActionEntry &Action = *I;
if (VerboseAsm) {
// Emit comments that decode the action table.
Asm->OutStreamer->AddComment(">> Action Record " + Twine(++Entry) + " <<");
}
// Type Filter
//
// Used by the runtime to match the type of the thrown exception to the
// type of the catch clauses or the types in the exception specification.
if (VerboseAsm) {
if (Action.ValueForTypeID > 0)
Asm->OutStreamer->AddComment(" Catch TypeInfo " +
Twine(Action.ValueForTypeID));
else if (Action.ValueForTypeID < 0)
Asm->OutStreamer->AddComment(" Filter TypeInfo " +
Twine(Action.ValueForTypeID));
else
Asm->OutStreamer->AddComment(" Cleanup");
}
Asm->EmitSLEB128(Action.ValueForTypeID);
// Action Record
//
// Self-relative signed displacement in bytes of the next action record,
// or 0 if there is no next action record.
if (VerboseAsm) {
if (Action.NextAction == 0) {
Asm->OutStreamer->AddComment(" No further actions");
} else {
unsigned NextAction = Entry + (Action.NextAction + 1) / 2;
Asm->OutStreamer->AddComment(" Continue to action "+Twine(NextAction));
}
}
Asm->EmitSLEB128(Action.NextAction);
}
emitTypeInfos(TTypeEncoding);
Asm->EmitAlignment(2);
}
void EHStreamer::emitTypeInfos(unsigned TTypeEncoding) {
const std::vector<const GlobalValue *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
bool VerboseAsm = Asm->OutStreamer->isVerboseAsm();
int Entry = 0;
// Emit the Catch TypeInfos.
if (VerboseAsm && !TypeInfos.empty()) {
Asm->OutStreamer->AddComment(">> Catch TypeInfos <<");
Asm->OutStreamer->AddBlankLine();
Entry = TypeInfos.size();
}
for (std::vector<const GlobalValue *>::const_reverse_iterator
I = TypeInfos.rbegin(), E = TypeInfos.rend(); I != E; ++I) {
const GlobalValue *GV = *I;
if (VerboseAsm)
Asm->OutStreamer->AddComment("TypeInfo " + Twine(Entry--));
Asm->EmitTTypeReference(GV, TTypeEncoding);
}
// Emit the Exception Specifications.
if (VerboseAsm && !FilterIds.empty()) {
Asm->OutStreamer->AddComment(">> Filter TypeInfos <<");
Asm->OutStreamer->AddBlankLine();
Entry = 0;
}
for (std::vector<unsigned>::const_iterator
I = FilterIds.begin(), E = FilterIds.end(); I < E; ++I) {
unsigned TypeID = *I;
if (VerboseAsm) {
--Entry;
if (isFilterEHSelector(TypeID))
Asm->OutStreamer->AddComment("FilterInfo " + Twine(Entry));
}
Asm->EmitULEB128(TypeID);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.