diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 69c82d4f0fbae2c1124f1ada2c52f410dba0fe83..f20d07a4c7d3b4a823352632b51c4c275c73ab5b 100644 (file)
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "isel"
#include "SelectionDAGBuilder.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "isel"
+
/// LimitFloatPrecision - Generate low-precision inline sequences for
/// some float libcalls (6, 8 or 12 bits).
static unsigned LimitFloatPrecision;
llvm_unreachable("Unknown mismatch!");
}
+static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
+ const Twine &ErrMsg) {
+ const Instruction *I = dyn_cast_or_null<Instruction>(V);
+ if (!V)
+ return Ctx.emitError(ErrMsg);
+
+ const char *AsmError = ", possible invalid constraint for vector type";
+ if (const CallInst *CI = dyn_cast<CallInst>(I))
+ if (isa<InlineAsm>(CI->getCalledValue()))
+ return Ctx.emitError(I, ErrMsg + AsmError);
+
+ return Ctx.emitError(I, ErrMsg);
+}
+
/// getCopyFromPartsVector - Create a value that contains the specified legal
/// parts combined into the value they represent. If the parts combine to a
/// type larger then ValueVT then AssertOp can be used to specify whether the
// Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() != 1) {
- LLVMContext &Ctx = *DAG.getContext();
- Twine ErrMsg("non-trivial scalar-to-vector conversion");
- if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
- if (const CallInst *CI = dyn_cast<CallInst>(I))
- if (isa<InlineAsm>(CI->getCalledValue()))
- ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
- Ctx.emitError(I, ErrMsg);
- } else {
- Ctx.emitError(ErrMsg);
- }
+ diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
+ "non-trivial scalar-to-vector conversion");
return DAG.getUNDEF(ValueVT);
}
"Failed to tile the value with PartVT!");
if (NumParts == 1) {
- if (PartEVT != ValueVT) {
- LLVMContext &Ctx = *DAG.getContext();
- Twine ErrMsg("scalar-to-vector conversion failed");
- if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
- if (const CallInst *CI = dyn_cast<CallInst>(I))
- if (isa<InlineAsm>(CI->getCalledValue()))
- ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
- Ctx.emitError(I, ErrMsg);
- } else {
- Ctx.emitError(ErrMsg);
- }
- }
+ if (PartEVT != ValueVT)
+ diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
+ "scalar-to-vector conversion failed");
Parts[0] = Val;
return;
}
}
- /// areValueTypesLegal - Return true if types of all the values are legal.
- bool areValueTypesLegal(const TargetLowering &TLI) {
- for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- MVT RegisterVT = RegVTs[Value];
- if (!TLI.isTypeLegal(RegisterVT))
- return false;
- }
- return true;
- }
-
/// append - Add the specified values to this one.
void append(const RegsForValue &RHS) {
ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
SDLoc dl,
SDValue &Chain, SDValue *Flag,
- const Value *V = 0) const;
+ const Value *V = nullptr) const;
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
/// specified value into the registers specified by this object. This uses
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue P;
- if (Flag == 0) {
+ if (!Flag) {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
} else {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
}
return DAG.getNode(ISD::MERGE_VALUES, dl,
- DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
+ DAG.getVTList(ValueVTs),
&Values[0], ValueVTs.size());
}
SmallVector<SDValue, 8> Chains(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue Part;
- if (Flag == 0) {
+ if (!Flag) {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
} else {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
Ops.push_back(Res);
+ unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
MVT RegisterVT = RegVTs[Value];
for (unsigned i = 0; i != NumRegs; ++i) {
assert(Reg < Regs.size() && "Mismatch in # registers expected");
- Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
+ unsigned TheReg = Regs[Reg++];
+ Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
+
+ if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
+ // If we clobbered the stack pointer, MFI should know about it.
+ assert(DAG.getMachineFunction().getFrameInfo()->
+ hasInlineAsmWithSPAdjust());
+ }
}
}
}
AA = &aa;
GFI = gfi;
LibInfo = li;
- TD = DAG.getTarget().getDataLayout();
+ DL = DAG.getTarget().getDataLayout();
Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
UnusedArgNodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
- CurInst = NULL;
+ CurInst = nullptr;
HasTailCall = false;
+ SDNodeOrder = LowestSDNodeOrder;
}
/// clearDanglingDebugInfo - Clear the dangling debug information
if (!isa<TerminatorInst>(&I) && !HasTailCall)
CopyToExportRegsIfNeeded(&I);
- CurInst = NULL;
+ CurInst = nullptr;
}
void SelectionDAGBuilder::visitPHI(const PHINode &) {
RegsForValue RFV(*DAG.getContext(), *TM.getTargetLowering(),
InReg, V->getType());
SDValue Chain = DAG.getEntryNode();
- N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
+ N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
resolveDanglingDebugInfo(V, N);
return N;
}
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
- if (isa<ConstantPointerNull>(C))
- return DAG.getConstant(0, TLI->getPointerTy());
+ if (isa<ConstantPointerNull>(C)) {
+ unsigned AS = V->getType()->getPointerAddressSpace();
+ return DAG.getConstant(0, TLI->getPointerTy(AS));
+ }
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return DAG.getConstantFP(*CFP, VT);
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
RegsForValue RFV(*DAG.getContext(), *TLI, InReg, Inst->getType());
SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
+ return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
llvm_unreachable("Can't get register for value!");
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB) {
+ MachineBasicBlock *SwitchBB,
+ uint32_t TWeight,
+ uint32_t FWeight) {
const BasicBlock *BB = CurBB->getBasicBlock();
// If the leaf of the tree is a comparison, merge the condition into
llvm_unreachable("Unknown compare instruction");
}
- CaseBlock CB(Condition, BOp->getOperand(0),
- BOp->getOperand(1), NULL, TBB, FBB, CurBB);
+ CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
+ TBB, FBB, CurBB, TWeight, FWeight);
SwitchCases.push_back(CB);
return;
}
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
- NULL, TBB, FBB, CurBB);
+ nullptr, TBB, FBB, CurBB, TWeight, FWeight);
SwitchCases.push_back(CB);
}
+/// Scale down both weights to fit into uint32_t.
+static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
+ uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
+ uint32_t Scale = (NewMax / UINT32_MAX) + 1;
+ NewTrue = NewTrue / Scale;
+ NewFalse = NewFalse / Scale;
+}
+
/// FindMergedConditions - If Cond is an expression like
void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
- unsigned Opc) {
+ unsigned Opc, uint32_t TWeight,
+ uint32_t FWeight) {
// If this node is not part of the or/and tree, emit it as a branch.
const Instruction *BOp = dyn_cast<Instruction>(Cond);
if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
BOp->getParent() != CurBB->getBasicBlock() ||
!InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
!InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
- EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB);
+ EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
+ TWeight, FWeight);
return;
}
if (Opc == Instruction::Or) {
// Codegen X | Y as:
+ // BB1:
// jmp_if_X TBB
// jmp TmpBB
// TmpBB:
// jmp FBB
//
+ // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
+ // The requirement is that
+ // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
+ // = TrueProb for orignal BB.
+ // Assuming the orignal weights are A and B, one choice is to set BB1's
+ // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
+ // assumes that
+ // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
+ // Another choice is to assume TrueProb for BB1 equals to TrueProb for
+ // TmpBB, but the math is more complicated.
+
+ uint64_t NewTrueWeight = TWeight;
+ uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the LHS condition.
- FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
+ NewTrueWeight = TWeight;
+ NewFalseWeight = 2 * (uint64_t)FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
} else {
assert(Opc == Instruction::And && "Unknown merge op!");
// Codegen X & Y as:
+ // BB1:
// jmp_if_X TmpBB
// jmp FBB
// TmpBB:
//
// This requires creation of TmpBB after CurBB.
+ // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
+ // The requirement is that
+ // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
+ // = FalseProb for orignal BB.
+ // Assuming the orignal weights are A and B, one choice is to set BB1's
+ // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
+ // assumes that
+ // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
+
+ uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight;
+ uint64_t NewFalseWeight = FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the LHS condition.
- FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
+ NewTrueWeight = 2 * (uint64_t)TWeight;
+ NewFalseWeight = FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
}
}
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = BrMBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
// Update machine-CFG edges.
BrMBB->addSuccessor(Succ0MBB);
- // If this is not a fall-through branch, emit the branch.
- if (Succ0MBB != NextBlock)
+ // If this is not a fall-through branch or optimizations are switched off,
+ // emit the branch.
+ if (Succ0MBB != NextBlock || TM.getOptLevel() == CodeGenOpt::None)
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Succ0MBB)));
(BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
- BOp->getOpcode());
+ BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
+ getEdgeWeight(BrMBB, Succ1MBB));
// If the compares in later blocks need to use values not currently
// exported from this block, export them now. This block should always
// be the first entry.
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
- NULL, Succ0MBB, Succ1MBB, BrMBB);
+ nullptr, Succ0MBB, Succ1MBB, BrMBB);
// Use visitSwitchCase to actually insert the fast branch sequence for this
// cond branch.
SDLoc dl = getCurSDLoc();
// Build the setcc now.
- if (CB.CmpMHS == NULL) {
+ if (!CB.CmpMHS) {
// Fold "(X == true)" to X and "(X == false)" to !X to
// handle common cases produced by branch lowering.
if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
const TargetLowering *TLI = TM.getTargetLowering();
SDValue Chain = TLI->makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
- MVT::isVoid, 0, 0, false, getCurSDLoc(),
- false, false).second;
+ MVT::isVoid, nullptr, 0, false,
+ getCurSDLoc(), false, false).second;
DAG.setRoot(Chain);
}
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
// Merge into one.
SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
+ DAG.getVTList(ValueVTs),
&Ops[0], 2);
setValue(&LP, Res);
}
MachineFunction *CurMF = FuncInfo.MF;
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = CR.CaseBB;
if (++BBI != FuncInfo.MF->end())
if (I->High == I->Low) {
// This is just small small case range :) containing exactly 1 case
CC = ISD::SETEQ;
- LHS = SV; RHS = I->High; MHS = NULL;
+ LHS = SV; RHS = I->High; MHS = nullptr;
} else {
CC = ISD::SETLE;
LHS = I->Low; MHS = SV; RHS = I->High;
volatile double RDensity =
(double)RSize.roundToDouble() /
(Last - RBegin + 1ULL).roundToDouble();
- double Metric = Range.logBase2()*(LDensity+RDensity);
+ volatile double Metric = Range.logBase2()*(LDensity+RDensity);
// Should always split in some non-trivial place
DEBUG(dbgs() <<"=>Step\n"
<< "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
CaseRange LHSR(CR.Range.first, Pivot);
CaseRange RHSR(Pivot, CR.Range.second);
const Constant *C = Pivot->Low;
- MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
+ MachineBasicBlock *FalseBB = nullptr, *TrueBB = nullptr;
// We know that we branch to the LHS if the Value being switched on is
// less than the Pivot value, C. We use this to optimize our binary
// Create a CaseBlock record representing a conditional branch to
// the LHS node if the value being switched on SV is less than C.
// Otherwise, branch to LHS.
- CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
+ CaseBlock CB(ISD::SETLT, SV, C, nullptr, TrueBB, FalseBB, CR.CaseBB);
if (CR.CaseBB == SwitchBB)
visitSwitchCase(CB, SwitchBB);
if (Cases.size() >= 2)
// Must recompute end() each iteration because it may be
// invalidated by erase if we hold on to it
- for (CaseItr I = Cases.begin(), J = llvm::next(Cases.begin());
+ for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
J != Cases.end(); ) {
const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
// If there is only the default destination, branch to it if it is not the
// Push the initial CaseRec onto the worklist
CaseRecVector WorkList;
- WorkList.push_back(CaseRec(SwitchMBB,0,0,
+ WorkList.push_back(CaseRec(SwitchMBB,nullptr,nullptr,
CaseRange(Cases.begin(),Cases.end())));
while (!WorkList.empty()) {
getValue(I.getAddress())));
}
+void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
+ if (DAG.getTarget().Options.TrapUnreachable)
+ DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
+}
+
void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
Type *Ty = I.getType();
FalseVal.getResNo() + i));
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValueVTs[0], NumValues),
+ DAG.getVTList(ValueVTs),
&Values[0], NumValues));
}
if (DestVT != N.getValueType())
setValue(&I, DAG.getNode(ISD::BITCAST, getCurSDLoc(),
DestVT, N)); // convert types.
+ // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
+ // might fold any kind of constant expression to an integer constant and that
+ // is not what we are looking for. Only regcognize a bitcast of a genuine
+ // constant integer as an opaque constant.
+ else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
+ setValue(&I, DAG.getConstant(C->getValue(), DestVT, /*isTarget=*/false,
+ /*isOpaque*/true));
else
setValue(&I, N); // noop cast.
}
+void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ const Value *SV = I.getOperand(0);
+ SDValue N = getValue(SV);
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+
+ unsigned SrcAS = SV->getType()->getPointerAddressSpace();
+ unsigned DestAS = I.getType()->getPointerAddressSpace();
+
+ if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
+ N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
+
+ setValue(&I, N);
+}
+
void SelectionDAGBuilder::visitInsertElement(const User &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue InVec = getValue(I.getOperand(0));
SDValue(Agg.getNode(), Agg.getResNo() + i);
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&AggValueVTs[0], NumAggValues),
+ DAG.getVTList(AggValueVTs),
&Values[0], NumAggValues));
}
SDValue(Agg.getNode(), Agg.getResNo() + i);
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValValueVTs[0], NumValValues),
+ DAG.getVTList(ValValueVTs),
&Values[0], NumValValues));
}
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
if (Field) {
// N = N + Offset
- uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
+ uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
DAG.getConstant(Offset, N.getValueType()));
}
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
uint64_t Offs =
- TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ DL->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
EVT PTy = TLI->getPointerTy(AS);
unsigned PtrBits = PTy.getSizeInBits();
// N = N + Idx * ElementSize;
APInt ElementSize = APInt(TLI->getPointerSizeInBits(AS),
- TD->getTypeAllocSize(Ty));
+ DL->getTypeAllocSize(Ty));
SDValue IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
setValue(&I, DSA);
DAG.setRoot(DSA.getValue(1));
- // Inform the Frame Information that we have just allocated a variable-sized
- // object.
- FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1);
+ assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects());
}
void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
Type *Ty = I.getType();
bool isVolatile = I.isVolatile();
- bool isNonTemporal = I.getMetadata("nontemporal") != 0;
- bool isInvariant = I.getMetadata("invariant.load") != 0;
+ bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
+ bool isInvariant = I.getMetadata("invariant.load") != nullptr;
unsigned Alignment = I.getAlignment();
const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SDValue Root;
bool ConstantMemory = false;
- if (I.isVolatile() || NumValues > MaxParallelChains)
+ if (isVolatile || NumValues > MaxParallelChains)
// Serialize volatile loads with other side effects.
Root = getRoot();
else if (AA->pointsToConstantMemory(
Root = DAG.getRoot();
}
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (isVolatile)
+ Root = TLI->prepareVolatileOrAtomicLoad(Root, getCurSDLoc(), DAG);
+
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
NumValues));
}
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValueVTs[0], NumValues),
+ DAG.getVTList(ValueVTs),
&Values[0], NumValues));
}
NumValues));
EVT PtrVT = Ptr.getValueType();
bool isVolatile = I.isVolatile();
- bool isNonTemporal = I.getMetadata("nontemporal") != 0;
+ bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
unsigned Alignment = I.getAlignment();
const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
SDLoc dl = getCurSDLoc();
- AtomicOrdering Order = I.getOrdering();
+ AtomicOrdering SuccessOrder = I.getSuccessOrdering();
+ AtomicOrdering FailureOrder = I.getFailureOrdering();
SynchronizationScope Scope = I.getSynchScope();
SDValue InChain = getRoot();
const TargetLowering *TLI = TM.getTargetLowering();
if (TLI->getInsertFencesForAtomic())
- InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
+ InChain = InsertFenceForAtomic(InChain, SuccessOrder, Scope, true, dl,
DAG, *TLI);
SDValue L =
getValue(I.getCompareOperand()),
getValue(I.getNewValOperand()),
MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
- TLI->getInsertFencesForAtomic() ? Monotonic : Order,
+ TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder,
+ TLI->getInsertFencesForAtomic() ? Monotonic : FailureOrder,
Scope);
SDValue OutChain = L.getValue(1);
if (TLI->getInsertFencesForAtomic())
- OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
+ OutChain = InsertFenceForAtomic(OutChain, SuccessOrder, Scope, false, dl,
DAG, *TLI);
setValue(&I, L);
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic load");
+ MachineMemOperand *MMO =
+ DAG.getMachineFunction().
+ getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
+ MachineMemOperand::MOVolatile |
+ MachineMemOperand::MOLoad,
+ VT.getStoreSize(),
+ I.getAlignment() ? I.getAlignment() :
+ DAG.getEVTAlignment(VT));
+
+ InChain = TLI->prepareVolatileOrAtomicLoad(InChain, dl, DAG);
SDValue L =
- DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
- getValue(I.getPointerOperand()),
- I.getPointerOperand(), I.getAlignment(),
- TLI->getInsertFencesForAtomic() ? Monotonic : Order,
- Scope);
+ DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
+ getValue(I.getPointerOperand()), MMO,
+ TLI->getInsertFencesForAtomic() ? Monotonic : Order,
+ Scope);
SDValue OutChain = L.getValue(1);
if (HasChain)
ValueVTs.push_back(MVT::Other);
- SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
+ SDVTList VTs = DAG.getVTList(ValueVTs);
// Create the node.
SDValue Result;
static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
SelectionDAG &DAG, const TargetLowering &TLI) {
bool IsExp10 = false;
- if (LHS.getValueType() == MVT::f32 && LHS.getValueType() == MVT::f32 &&
+ if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
APFloat Ten(10.0f);
@@ -4541,18 +4633,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
default:
// By default, turn this into a target intrinsic node.
visitTargetIntrinsic(I, Intrinsic);
- return 0;
- case Intrinsic::vastart: visitVAStart(I); return 0;
- case Intrinsic::vaend: visitVAEnd(I); return 0;
- case Intrinsic::vacopy: visitVACopy(I); return 0;
+ return nullptr;
+ case Intrinsic::vastart: visitVAStart(I); return nullptr;
+ case Intrinsic::vaend: visitVAEnd(I); return nullptr;
+ case Intrinsic::vacopy: visitVACopy(I); return nullptr;
case Intrinsic::returnaddress:
setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
case Intrinsic::frameaddress:
setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
case Intrinsic::setjmp:
return &"_setjmp"[!TLI->usesUnderscoreSetJmp()];
case Intrinsic::longjmp:
@@ -4575,7 +4667,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false,
MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1))));
- return 0;
+ return nullptr;
}
case Intrinsic::memset: {
// Assert for address < 256 since we support only user defined address
@@ -4592,7 +4684,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
MachinePointerInfo(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::memmove: {
// Assert for address < 256 since we support only user defined address
@@ -4612,7 +4704,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1))));
- return 0;
+ return nullptr;
}
case Intrinsic::dbg_declare: {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
@@ -4623,14 +4715,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
"Variable in DbgDeclareInst should be either null or a DIVariable.");
if (!Address || !DIVar) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
- return 0;
+ return nullptr;
}
// Check if address has undef value.
if (isa<UndefValue>(Address) ||
(Address->use_empty() && !isa<Argument>(Address))) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
- return 0;
+ return nullptr;
}
SDValue &N = NodeMap[Address];
@@ -4658,7 +4750,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Address is an argument, so try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
EmitFuncArgumentDbgValue(Address, Variable, 0, N);
- return 0;
+ return nullptr;
}
} else if (AI)
SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
@@ -4668,7 +4760,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
DEBUG(Address->dump());
- return 0;
+ return nullptr;
}
DAG.AddDbgValue(SDV, N.getNode(), isParameter);
} else {
@@ -4684,15 +4776,15 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (SI != FuncInfo.StaticAllocaMap.end()) {
SDV = DAG.getDbgValue(Variable, SI->second,
0, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
- return 0;
+ DAG.AddDbgValue(SDV, nullptr, false);
+ return nullptr;
}
}
}
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
- return 0;
+ return nullptr;
}
case Intrinsic::dbg_value: {
const DbgValueInst &DI = cast<DbgValueInst>(I);
@@ -4700,18 +4792,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
assert((!DIVar || DIVar.isVariable()) &&
"Variable in DbgValueInst should be either null or a DIVariable.");
if (!DIVar)
- return 0;
+ return nullptr;
MDNode *Variable = DI.getVariable();
uint64_t Offset = DI.getOffset();
const Value *V = DI.getValue();
if (!V)
- return 0;
+ return nullptr;
SDDbgValue *SDV;
if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
SDV = DAG.getDbgValue(Variable, V, Offset, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
+ DAG.AddDbgValue(SDV, nullptr, false);
} else {
// Do not use getValue() in here; we don't want to generate code at
// this point if it hasn't been done yet.
@@ -4745,18 +4837,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (!AI) {
DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
- return 0;
+ return nullptr;
}
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI == FuncInfo.StaticAllocaMap.end())
- return 0; // VLAs.
+ return nullptr; // VLAs.
int FI = SI->second;
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo())
MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
- return 0;
+ return nullptr;
}
case Intrinsic::eh_typeid_for: {
@@ -4765,7 +4857,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
Res = DAG.getConstant(TypeID, MVT::i32);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::eh_return_i32:
@@ -4776,10 +4868,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getControlRoot(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
- return 0;
+ return nullptr;
case Intrinsic::eh_unwind_init:
DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
- return 0;
+ return nullptr;
case Intrinsic::eh_dwarf_cfa: {
SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
TLI->getPointerTy());
@@ -4793,7 +4885,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.getConstant(0, TLI->getPointerTy()));
setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(),
FA, Offset));
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_callsite: {
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
@@ -4802,7 +4894,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
MMI.setCurrentCallSite(CI->getZExtValue());
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_functioncontext: {
// Get and store the index of the function context.
@@ -4811,7 +4903,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
int FI = FuncInfo.StaticAllocaMap[FnCtx];
MFI->setFunctionContextIndex(FI);
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_setjmp: {
SDValue Ops[2];
@@ -4822,12 +4914,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Ops, 2);
setValue(&I, Op.getValue(0));
DAG.setRoot(Op.getValue(1));
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_longjmp: {
DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
getRoot(), getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::x86_mmx_pslli_w:
@@ -4841,7 +4933,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue ShAmt = getValue(I.getArgOperand(1));
if (isa<ConstantSDNode>(ShAmt)) {
visitTargetIntrinsic(I, Intrinsic);
- return 0;
+ return nullptr;
}
unsigned NewIntrinsic = 0;
EVT ShAmtVT = MVT::v2i32;
@@ -4887,7 +4979,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.getConstant(NewIntrinsic, MVT::i32),
getValue(I.getArgOperand(0)), ShAmt);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::x86_avx_vinsertf128_pd_256:
case Intrinsic::x86_avx_vinsertf128_ps_256:
@@ -4902,7 +4994,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(1)),
DAG.getConstant(Idx, TLI->getVectorIdxTy()));
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::x86_avx_vextractf128_pd_256:
case Intrinsic::x86_avx_vextractf128_ps_256:
@@ -4915,7 +5007,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(0)),
DAG.getConstant(Idx, TLI->getVectorIdxTy()));
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::convertff:
case Intrinsic::convertfsi:
@@ -4948,31 +5040,31 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(2)),
Code);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::powi:
setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)), DAG));
- return 0;
+ return nullptr;
case Intrinsic::log:
setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::log2:
setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::log10:
setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::exp:
setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::exp2:
setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::pow:
setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::sqrt:
case Intrinsic::fabs:
case Intrinsic::sin:
@@ -5001,21 +5093,21 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
setValue(&I, DAG.getNode(Opcode, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::copysign:
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
- return 0;
+ return nullptr;
case Intrinsic::fma:
setValue(&I, DAG.getNode(ISD::FMA, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
getValue(I.getArgOperand(2))));
- return 0;
+ return nullptr;
case Intrinsic::fmuladd: {
EVT VT = TLI->getValueType(I.getType());
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
@@ -5036,20 +5128,20 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(2)));
setValue(&I, Add);
}
- return 0;
+ return nullptr;
}
case Intrinsic::convert_to_fp16:
setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, sdl,
MVT::i16, getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
case Intrinsic::convert_from_fp16:
setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, sdl,
MVT::f32, getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
case Intrinsic::pcmarker: {
SDValue Tmp = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
- return 0;
+ return nullptr;
}
case Intrinsic::readcyclecounter: {
SDValue Op = getRoot();
@@ -5058,20 +5150,20 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
&Op, 1);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
- return 0;
+ return nullptr;
}
case Intrinsic::bswap:
setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
case Intrinsic::cttz: {
SDValue Arg = getValue(I.getArgOperand(0));
ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
sdl, Ty, Arg));
- return 0;
+ return nullptr;
}
case Intrinsic::ctlz: {
SDValue Arg = getValue(I.getArgOperand(0));
@@ -5079,13 +5171,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
sdl, Ty, Arg));
- return 0;
+ return nullptr;
}
case Intrinsic::ctpop: {
SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
- return 0;
+ return nullptr;
}
case Intrinsic::stacksave: {
SDValue Op = getRoot();
@@ -5093,12 +5185,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.getVTList(TLI->getPointerTy(), MVT::Other), &Op, 1);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
- return 0;
+ return nullptr;
}
case Intrinsic::stackrestore: {
Res = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
- return 0;
+ return nullptr;
}
case Intrinsic::stackprotector: {
// Emit code into the DAG to store the stack guard onto the stack.
@@ -5120,7 +5212,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
true, false, 0);
setValue(&I, Res);
DAG.setRoot(Res);
- return 0;
+ return nullptr;
}
case Intrinsic::objectsize: {
// If we don't know by now, we're never going to know.
@@ -5137,16 +5229,16 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Res = DAG.getConstant(0, Ty);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
- return 0;
+ return nullptr;
case Intrinsic::var_annotation:
// Discard annotate attributes
- return 0;
+ return nullptr;
case Intrinsic::init_trampoline: {
const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
@@ -5162,13 +5254,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops, 6);
DAG.setRoot(Res);
- return 0;
+ return nullptr;
}
case Intrinsic::adjust_trampoline: {
setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::gcroot:
if (GFI) {
@@ -5178,18 +5270,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
GFI->addStackRoot(FI->getIndex(), TypeMap);
}
- return 0;
+ return nullptr;
case Intrinsic::gcread:
case Intrinsic::gcwrite:
llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
case Intrinsic::flt_rounds:
setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
- return 0;
+ return nullptr;
case Intrinsic::expect: {
// Just replace __builtin_expect(exp, c) with EXP.
setValue(&I, getValue(I.getArgOperand(0)));
- return 0;
+ return nullptr;
}
case Intrinsic::debugtrap:
@@ -5199,7 +5291,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
ISD::TRAP : ISD::DEBUGTRAP;
DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
- return 0;
+ return nullptr;
}
TargetLowering::ArgListTy Args;
TargetLowering::
@@ -5212,7 +5304,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Args, DAG, sdl);
std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
DAG.setRoot(Result.second);
- return 0;
+ return nullptr;
}
case Intrinsic::uadd_with_overflow:
@@ -5236,7 +5328,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
- return 0;
+ return nullptr;
}
case Intrinsic::prefetch: {
SDValue Ops[5];
@@ -5255,17 +5347,17 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
false, /* volatile */
rw==0, /* read */
rw==1)); /* write */
- return 0;
+ return nullptr;
}
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
// Stack coloring is not enabled in O0, discard region information.
if (TM.getOptLevel() == CodeGenOpt::None)
- return 0;
+ return nullptr;
SmallVector<Value *, 4> Allocas;
- GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
+ GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL);
for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
@@ -5285,15 +5377,15 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops, 2);
DAG.setRoot(Res);
}
- return 0;
+ return nullptr;
}
case Intrinsic::invariant_start:
// Discard region information.
setValue(&I, DAG.getUNDEF(TLI->getPointerTy()));
- return 0;
+ return nullptr;
case Intrinsic::invariant_end:
// Discard region information.
- return 0;
+ return nullptr;
case Intrinsic::stackprotectorcheck: {
// Do not actually emit anything for this basic block. Instead we initialize
// the stack protector descriptor and export the guard variable so we can
@@ -5304,19 +5396,21 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Flush our exports since we are going to process a terminator.
(void)getControlRoot();
- return 0;
+ return nullptr;
}
+ case Intrinsic::clear_cache:
+ return TLI->getClearCacheBuiltinName();
case Intrinsic::donothing:
// ignore
- return 0;
+ return nullptr;
case Intrinsic::experimental_stackmap: {
visitStackmap(I);
- return 0;
+ return nullptr;
}
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64: {
visitPatchpoint(I);
- return 0;
+ return nullptr;
}
}
}
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Type *RetTy = FTy->getReturnType();
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
- MCSymbol *BeginLabel = 0;
+ MCSymbol *BeginLabel = nullptr;
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
int DemoteStackIdx = -100;
if (!CanLowerReturn) {
+ assert(!CS.hasInAllocaArgument() &&
+ "sret demotion is incompatible with inalloca");
uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(
FTy->getReturnType());
unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(
setValue(CS.getInstruction(),
DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&RetTys[0], RetTys.size()),
+ DAG.getVTList(RetTys),
&Values[0], Values.size()));
}
/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
/// value is equal or not-equal to zero.
static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ for (const User *U : V->users()) {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
if (IC->isEquality())
if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
if (C->isNullValue())
if (const Constant *LoadCst =
ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
- Builder.TD))
+ Builder.DL))
return Builder.getValue(LoadCst);
}
switch (CSize->getZExtValue()) {
default:
LoadVT = MVT::Other;
- LoadTy = 0;
+ LoadTy = nullptr;
ActuallyDoIt = false;
break;
case 2:
// bloat the code.
const TargetLowering *TLI = TM.getTargetLowering();
if (ActuallyDoIt && CSize->getZExtValue() > 4) {
+ unsigned DstAS = LHS->getType()->getPointerAddressSpace();
+ unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
// TODO: Handle 5 byte compare as 4-byte + 1 byte.
// TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
- if (!TLI->isTypeLegal(LoadVT) ||!TLI->allowsUnalignedMemoryAccesses(LoadVT))
+ if (!TLI->isTypeLegal(LoadVT) ||
+ !TLI->allowsUnalignedMemoryAccesses(LoadVT, SrcAS) ||
+ !TLI->allowsUnalignedMemoryAccesses(LoadVT, DstAS))
ActuallyDoIt = false;
}
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
ComputeUsesVAFloatArgument(I, &MMI);
- const char *RenameFn = 0;
+ const char *RenameFn = nullptr;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
RegsForValue AssignedRegs;
explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
- : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
+ : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
}
/// getCallOperandValEVT - Return the EVT of the Value* that this operand
/// MVT::Other.
EVT getCallOperandValEVT(LLVMContext &Context,
const TargetLowering &TLI,
- const DataLayout *TD) const {
- if (CallOperandVal == 0) return MVT::Other;
+ const DataLayout *DL) const {
+ if (!CallOperandVal) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal))
return TLI.getPointerTy();
// If OpTy is not a single value, it may be a struct/union that we
// can tile with integers.
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
- unsigned BitSize = TD->getTypeSizeInBits(OpTy);
+ unsigned BitSize = DL->getTypeSizeInBits(OpTy);
switch (BitSize) {
default: break;
case 1:
// types are identical size, use a bitcast to convert (e.g. two differing
// vector types).
MVT RegVT = *PhysReg.second->vt_begin();
- if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
+ if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
- OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, TD).
+ OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, DL).
getSimpleVT();
}
}
// There is no longer a Value* corresponding to this operand.
- OpInfo.CallOperandVal = 0;
+ OpInfo.CallOperandVal = nullptr;
// It is now an indirect operand.
OpInfo.isIndirect = true;
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
const TargetLowering *TLI = TM.getTargetLowering();
- const DataLayout &TD = *TLI->getDataLayout();
+ const DataLayout &DL = *TLI->getDataLayout();
SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)),
- TD.getABITypeAlignment(I.getType()));
+ DL.getABITypeAlignment(I.getType()));
setValue(&I, V);
DAG.setRoot(V.getValue(1));
}
/// intrinsic's operands need to participate in the calling convention.
std::pair<SDValue, SDValue>
SelectionDAGBuilder::LowerCallOperands(const CallInst &CI, unsigned ArgIdx,
- unsigned NumArgs, SDValue Callee) {
+ unsigned NumArgs, SDValue Callee,
+ bool useVoidTy) {
TargetLowering::ArgListTy Args;
Args.reserve(NumArgs);
Args.push_back(Entry);
}
- TargetLowering::CallLoweringInfo CLI(getRoot(), CI.getType(),
- /*retSExt*/ false, /*retZExt*/ false, /*isVarArg*/ false, /*isInReg*/ false,
- NumArgs, CI.getCallingConv(), /*isTailCall*/ false, /*doesNotReturn*/ false,
+ Type *retTy = useVoidTy ? Type::getVoidTy(*DAG.getContext()) : CI.getType();
+ TargetLowering::CallLoweringInfo CLI(getRoot(), retTy, /*retSExt*/ false,
+ /*retZExt*/ false, /*isVarArg*/ false, /*isInReg*/ false, NumArgs,
+ CI.getCallingConv(), /*isTailCall*/ false, /*doesNotReturn*/ false,
/*isReturnValueUsed*/ CI.use_empty(), Callee, Args, DAG, getCurSDLoc());
const TargetLowering *TLI = TM.getTargetLowering();
return TLI->LowerCallTo(CLI);
}
+/// \brief Add a stack map intrinsic call's live variable operands to a stackmap
+/// or patchpoint target node's operand list.
+///
+/// Constants are converted to TargetConstants purely as an optimization to
+/// avoid constant materialization and register allocation.
+///
+/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
+/// generate addess computation nodes, and so ExpandISelPseudo can convert the
+/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
+/// address materialization and register allocation, but may also be required
+/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
+/// alloca in the entry block, then the runtime may assume that the alloca's
+/// StackMap location can be read immediately after compilation and that the
+/// location is valid at any point during execution (this is similar to the
+/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
+/// only available in a register, then the runtime would need to trap when
+/// execution reaches the StackMap in order to read the alloca's location.
+static void addStackMapLiveVars(const CallInst &CI, unsigned StartIdx,
+ SmallVectorImpl<SDValue> &Ops,
+ SelectionDAGBuilder &Builder) {
+ for (unsigned i = StartIdx, e = CI.getNumArgOperands(); i != e; ++i) {
+ SDValue OpVal = Builder.getValue(CI.getArgOperand(i));
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
+ Ops.push_back(
+ Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
+ Ops.push_back(
+ Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
+ } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
+ const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
+ Ops.push_back(
+ Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
+ } else
+ Ops.push_back(OpVal);
+ }
+}
+
/// \brief Lower llvm.experimental.stackmap directly to its target opcode.
void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
// void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
- SDValue Callee = getValue(CI.getCalledValue());
+ SDValue Chain, InFlag, Callee, NullPtr;
+ SmallVector<SDValue, 32> Ops;
- // Lower into a call sequence with no args and no return value.
- std::pair<SDValue, SDValue> Result = LowerCallOperands(CI, 0, 0, Callee);
- // Set the root to the target-lowered call chain.
- SDValue Chain = Result.second;
- DAG.setRoot(Chain);
+ SDLoc DL = getCurSDLoc();
+ Callee = getValue(CI.getCalledValue());
+ NullPtr = DAG.getIntPtrConstant(0, true);
- /// Get a call instruction from the call sequence chain.
- /// Tail calls are not allowed.
- SDNode *CallEnd = Chain.getNode();
- assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
- "Expected a callseq node.");
- SDNode *Call = CallEnd->getOperand(0).getNode();
- bool hasGlue = Call->getGluedNode();
-
- assert(Call->getNumOperands() == hasGlue ? 2 : 1 &&
- "Unexpected extra stackmap call arguments.");
+ // The stackmap intrinsic only records the live variables (the arguemnts
+ // passed to it) and emits NOPS (if requested). Unlike the patchpoint
+ // intrinsic, this won't be lowered to a function call. This means we don't
+ // have to worry about calling conventions and target specific lowering code.
+ // Instead we perform the call lowering right here.
+ //
+ // chain, flag = CALLSEQ_START(chain, 0)
+ // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
+ // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
+ //
+ Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
+ InFlag = Chain.getValue(1);
- // Replace the target specific call node with the stackmap intrinsic.
- SmallVector<SDValue, 8> Ops;
+ // Add the <id> and <numBytes> constants.
+ SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
+ SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
- // Add the <id> and <numShadowBytes> constants.
- for (unsigned i = 0; i < 2; ++i) {
- SDValue tmp = getValue(CI.getOperand(i));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(tmp)->getZExtValue(), MVT::i32));
- }
// Push live variables for the stack map.
- for (unsigned i = 2, e = CI.getNumArgOperands(); i != e; ++i)
- Ops.push_back(getValue(CI.getArgOperand(i)));
+ addStackMapLiveVars(CI, 2, Ops, *this);
- // Push the chain (this is originally the first operand of the call, but
- // becomes now the last or second to last operand).
- Ops.push_back(*(Call->op_begin()));
+ // We are not pushing any register mask info here on the operands list,
+ // because the stackmap doesn't clobber anything.
- // Push the glue flag (last operand).
- if (hasGlue)
- Ops.push_back(*(Call->op_end()-1));
+ // Push the chain and the glue flag.
+ Ops.push_back(Chain);
+ Ops.push_back(InFlag);
- // Replace the target specific call node with STACKMAP in-place. This way we
- // don't have to call ReplaceAllUsesWith and STACKMAP will take the call's
- // place in the chain.
+ // Create the STACKMAP node.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- DAG.SelectNodeTo(Call, TargetOpcode::STACKMAP, NodeTys, &Ops[0], Ops.size());
+ SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
+ Chain = SDValue(SM, 0);
+ InFlag = Chain.getValue(1);
+
+ Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
+
+ // Stackmaps don't generate values, so nothing goes into the NodeMap.
+
+ // Set the root to the target-lowered call chain.
+ DAG.setRoot(Chain);
+
+ // Inform the Frame Information that we have a stackmap in this function.
+ FuncInfo.MF->getFrameInfo()->setHasStackMap();
}
/// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
- // void|i64 @llvm.experimental.patchpoint.void|i64(i32 <id>,
- // i32 <numNopBytes>,
- // i8* <target>, i32 <numArgs>,
- // [Args...], [live variables...])
-
+ // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
+ // i32 <numBytes>,
+ // i8* <target>,
+ // i32 <numArgs>,
+ // [Args...],
+ // [live variables...])
+
+ CallingConv::ID CC = CI.getCallingConv();
+ bool isAnyRegCC = CC == CallingConv::AnyReg;
+ bool hasDef = !CI.getType()->isVoidTy();
SDValue Callee = getValue(CI.getOperand(2)); // <target>
// Get the real number of arguments participating in the call <numArgs>
- unsigned NumArgs =
- cast<ConstantSDNode>(getValue(CI.getArgOperand(3)))->getZExtValue();
+ SDValue NArgVal = getValue(CI.getArgOperand(PatchPointOpers::NArgPos));
+ unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
- assert(CI.getNumArgOperands() >= NumArgs + 4 &&
+ // Intrinsics include all meta-operands up to but not including CC.
+ unsigned NumMetaOpers = PatchPointOpers::CCPos;
+ assert(CI.getNumArgOperands() >= NumMetaOpers + NumArgs &&
"Not enough arguments provided to the patchpoint intrinsic");
+ // For AnyRegCC the arguments are lowered later on manually.
+ unsigned NumCallArgs = isAnyRegCC ? 0 : NumArgs;
std::pair<SDValue, SDValue> Result =
- LowerCallOperands(CI, 4, NumArgs, Callee);
+ LowerCallOperands(CI, NumMetaOpers, NumCallArgs, Callee, isAnyRegCC);
+
// Set the root to the target-lowered call chain.
SDValue Chain = Result.second;
DAG.setRoot(Chain);
SDNode *CallEnd = Chain.getNode();
- if (!CI.getType()->isVoidTy()) {
- setValue(&CI, Result.first);
- if (CallEnd->getOpcode() == ISD::CopyFromReg)
- CallEnd = CallEnd->getOperand(0).getNode();
- }
+ if (hasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
+ CallEnd = CallEnd->getOperand(0).getNode();
+
/// Get a call instruction from the call sequence chain.
/// Tail calls are not allowed.
assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
// Replace the target specific call node with the patchable intrinsic.
SmallVector<SDValue, 8> Ops;
- // Add the <id> and <numNopBytes> constants.
- for (unsigned i = 0; i < 2; ++i) {
- SDValue tmp = getValue(CI.getOperand(i));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(tmp)->getZExtValue(), MVT::i32));
- }
+ // Add the <id> and <numBytes> constants.
+ SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
+ SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
+
// Assume that the Callee is a constant address.
+ // FIXME: handle function symbols in the future.
Ops.push_back(
- DAG.getIntPtrConstant(cast<ConstantSDNode>(Callee)->getZExtValue()));
+ DAG.getIntPtrConstant(cast<ConstantSDNode>(Callee)->getZExtValue(),
+ /*isTarget=*/true));
- // Adjust <numArgs> to account for any stack arguments.
+ // Adjust <numArgs> to account for any arguments that have been passed on the
+ // stack instead.
// Call Node: Chain, Target, {Args}, RegMask, [Glue]
- unsigned NumCallArgs = Call->getNumOperands() - (hasGlue ? 4 : 3);
- Ops.push_back(DAG.getTargetConstant(NumCallArgs, MVT::i32));
+ unsigned NumCallRegArgs = Call->getNumOperands() - (hasGlue ? 4 : 3);
+ NumCallRegArgs = isAnyRegCC ? NumArgs : NumCallRegArgs;
+ Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, MVT::i32));
+
+ // Add the calling convention
+ Ops.push_back(DAG.getTargetConstant((unsigned)CC, MVT::i32));
- // Push the arguments from the call instruction.
+ // Add the arguments we omitted previously. The register allocator should
+ // place these in any free register.
+ if (isAnyRegCC)
+ for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
+ Ops.push_back(getValue(CI.getArgOperand(i)));
+
+ // Push the arguments from the call instruction up to the register mask.
SDNode::op_iterator e = hasGlue ? Call->op_end()-2 : Call->op_end()-1;
for (SDNode::op_iterator i = Call->op_begin()+2; i != e; ++i)
Ops.push_back(*i);
// Push live variables for the stack map.
- for (unsigned i = NumArgs + 4, e = CI.getNumArgOperands(); i != e; ++i) {
- SDValue OpVal = getValue(CI.getArgOperand(i));
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
- Ops.push_back(
- DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
- } else
- Ops.push_back(OpVal);
- }
+ addStackMapLiveVars(CI, NumMetaOpers + NumArgs, Ops, *this);
// Push the register mask info.
if (hasGlue)
if (hasGlue)
Ops.push_back(*(Call->op_end()-1));
- // Replace the target specific call node with PATCHPOINT in-place. This
- // way we don't have to call ReplaceAllUsesWith and PATCHPOINT will
- // take the call's place in the chain.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- DAG.SelectNodeTo(Call, TargetOpcode::PATCHPOINT, NodeTys, &Ops[0],
- Ops.size());
+ SDVTList NodeTys;
+ if (isAnyRegCC && hasDef) {
+ // Create the return types based on the intrinsic definition
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SmallVector<EVT, 3> ValueVTs;
+ ComputeValueVTs(TLI, CI.getType(), ValueVTs);
+ assert(ValueVTs.size() == 1 && "Expected only one return value type.");
+
+ // There is always a chain and a glue type at the end
+ ValueVTs.push_back(MVT::Other);
+ ValueVTs.push_back(MVT::Glue);
+ NodeTys = DAG.getVTList(ValueVTs);
+ } else
+ NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+
+ // Replace the target specific call node with a PATCHPOINT node.
+ MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
+ getCurSDLoc(), NodeTys, Ops);
+
+ // Update the NodeMap.
+ if (hasDef) {
+ if (isAnyRegCC)
+ setValue(&CI, SDValue(MN, 0));
+ else
+ setValue(&CI, Result.first);
+ }
+
+ // Fixup the consumers of the intrinsic. The chain and glue may be used in the
+ // call sequence. Furthermore the location of the chain and glue can change
+ // when the AnyReg calling convention is used and the intrinsic returns a
+ // value.
+ if (isAnyRegCC && hasDef) {
+ SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
+ SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
+ DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
+ } else
+ DAG.ReplaceAllUsesWith(Call, MN);
+ DAG.DeleteNode(Call);
+
+ // Inform the Frame Information that we have a patchpoint in this function.
+ FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
}
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment =
- getDataLayout()->getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy);
if (Args[i].isZExt)
Flags.setZExt();
Flags.setInReg();
if (Args[i].isSRet)
Flags.setSRet();
- if (Args[i].isByVal) {
+ if (Args[i].isByVal)
+ Flags.setByVal();
+ if (Args[i].isInAlloca) {
+ Flags.setInAlloca();
+ // Set the byval flag for CCAssignFn callbacks that don't know about
+ // inalloca. This way we can know how many bytes we should've allocated
+ // and how many bytes a callee cleanup function will pop. If we port
+ // inalloca to more targets, we'll have to add custom inalloca handling
+ // in the various CC lowering callbacks.
Flags.setByVal();
+ }
+ if (Args[i].isByVal || Args[i].isInAlloca) {
PointerType *Ty = cast<PointerType>(Args[i].Ty);
Type *ElementTy = Ty->getElementType();
Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
Flags.setReturned();
}
- getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts,
- PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind);
+ getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
+ CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
- NumRegs, RegisterVT, VT, NULL,
+ NumRegs, RegisterVT, VT, nullptr,
AssertOp));
CurReg += NumRegs;
}
return std::make_pair(SDValue(), CLI.Chain);
SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
- CLI.DAG.getVTList(&RetTys[0], RetTys.size()),
+ CLI.DAG.getVTList(RetTys),
&ReturnValues[0], ReturnValues.size());
return std::make_pair(Res, CLI.Chain);
}
@@ -7109,7 +7314,7 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
const TargetLowering *TLI = TM.getTargetLowering();
RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType());
SDValue Chain = DAG.getEntryNode();
- RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, 0, V);
+ RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V);
PendingExports.push_back(Chain);
}
return A->use_empty();
const BasicBlock *Entry = A->getParent()->begin();
- for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
- UI != E; ++UI) {
- const User *U = *UI;
+ for (const User *U : A->users())
if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
return false; // Use not in entry block.
- }
+
return true;
}
SelectionDAG &DAG = SDB->DAG;
SDLoc dl = SDB->getCurSDLoc();
const TargetLowering *TLI = getTargetLowering();
- const DataLayout *TD = TLI->getDataLayout();
+ const DataLayout *DL = TLI->getDataLayout();
SmallVector<ISD::InputArg, 16> Ins;
if (!FuncInfo->CanLowerReturn) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment =
- TD->getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy);
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
Flags.setZExt();
Flags.setInReg();
if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
Flags.setSRet();
- if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal)) {
+ if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
Flags.setByVal();
+ if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
+ Flags.setInAlloca();
+ // Set the byval flag for CCAssignFn callbacks that don't know about
+ // inalloca. This way we can know how many bytes we should've allocated
+ // and how many bytes a callee cleanup function will pop. If we port
+ // inalloca to more targets, we'll have to add custom inalloca handling
+ // in the various CC lowering callbacks.
+ Flags.setByVal();
+ }
+ if (Flags.isByVal() || Flags.isInAlloca()) {
PointerType *Ty = cast<PointerType>(I->getType());
Type *ElementTy = Ty->getElementType();
- Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
+ Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
unsigned FrameAlign;
MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
ISD::NodeType AssertOp = ISD::DELETED_NODE;
SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
- RegVT, VT, NULL, AssertOp);
+ RegVT, VT, nullptr, AssertOp);
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
NumParts, PartVT, VT,
- NULL, AssertOp));
+ nullptr, AssertOp));
}
i += NumParts;