diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index e079eee7d2c4d2c446cfe7cc0f0797ab6e98174d..33f024b2729fb25882aba3be73de26b610acb1b3 100644 (file)
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
#include <climits>
STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc , "Number of reassociations");
-static cl::opt<bool> UnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
- cl::init(false),
- cl::desc("Enable unsafe double to float "
- "shrinking for math lib calls"));
-
-// Initialization Routines
-void llvm::initializeInstCombine(PassRegistry &Registry) {
- initializeInstCombinerPass(Registry);
-}
-
-void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
- initializeInstCombine(*unwrap(R));
-}
-
-char InstCombiner::ID = 0;
-INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine",
- "Combine redundant instructions", false, false)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
-INITIALIZE_PASS_END(InstCombiner, "instcombine",
- "Combine redundant instructions", false, false)
-
-void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<TargetLibraryInfo>();
-}
-
-
Value *InstCombiner::EmitGEPOffset(User *GEP) {
return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
}
Instruction::BinaryOps ROp) {
if (Instruction::isCommutative(ROp))
return LeftDistributesOverRight(ROp, LOp);
+
+ switch (LOp) {
+ default:
+ return false;
+ // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
+ // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
+ // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ switch (ROp) {
+ default:
+ return false;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ return true;
+ }
+ }
// TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
// but this requires knowing that the addition does not overflow and other
// such subtleties.
}
/// This function factors binary ops which can be combined using distributive
-/// laws. This also factor SHL as MUL e.g. SHL(X, 2) ==> MUL(X, 4).
+/// laws. This function tries to transform 'Op' based TopLevelOpcode to enable
+/// factorization e.g for ADD(SHL(X , 2), MUL(X, 5)), When this function called
+/// with TopLevelOpcode == Instruction::Add and Op = SHL(X, 2), transforms
+/// SHL(X, 2) to MUL(X, 4) i.e. returns Instruction::Mul with LHS set to 'X' and
+/// RHS to 4.
static Instruction::BinaryOps
-getBinOpsForFactorization(BinaryOperator *Op, Value *&LHS, Value *&RHS) {
+getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
+ BinaryOperator *Op, Value *&LHS, Value *&RHS) {
if (!Op)
return Instruction::BinaryOpsEnd;
- if (Op->getOpcode() == Instruction::Shl) {
- if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
- // The multiplier is really 1 << CST.
- RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
- LHS = Op->getOperand(0);
- return Instruction::Mul;
+ LHS = Op->getOperand(0);
+ RHS = Op->getOperand(1);
+
+ switch (TopLevelOpcode) {
+ default:
+ return Op->getOpcode();
+
+ case Instruction::Add:
+ case Instruction::Sub:
+ if (Op->getOpcode() == Instruction::Shl) {
+ if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
+ // The multiplier is really 1 << CST.
+ RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
+ return Instruction::Mul;
+ }
}
+ return Op->getOpcode();
}
// TODO: We can add other conversions e.g. shr => div etc.
-
- LHS = Op->getOperand(0);
- RHS = Op->getOperand(1);
- return Op->getOpcode();
}
/// This tries to simplify binary operations by factorizing out common terms
// Factorization.
Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
- Instruction::BinaryOps LHSOpcode = getBinOpsForFactorization(Op0, A, B);
- Instruction::BinaryOps RHSOpcode = getBinOpsForFactorization(Op1, C, D);
+ auto TopLevelOpcode = I.getOpcode();
+ auto LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
+ auto RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
// The instruction has the form "(A op' B) op (C op' D)". Try to factorize
// a common term.
return V;
// Expansion.
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
// The instruction has the form "(A op' B) op C". See if expanding it out
// to "(A op C) op' (B op C)" results in simplifications.
// If the incoming non-constant value is in I's block, we will remove one
// instruction, but insert another equivalent one, leading to infinite
// instcombine.
- if (NonConstBB == I.getParent())
+ if (isPotentiallyReachable(I.getParent(), NonConstBB, DT, LI))
return nullptr;
}
// If there is exactly one non-constant value, we can insert a copy of the
// operation in that block. However, if this is a critical edge, we would be
- // inserting the computation one some other paths (e.g. inside a loop). Only
+ // inserting the computation on some other paths (e.g. inside a loop). Only
// do this if the pred block is unconditionally branching into the phi block.
if (NonConstBB != nullptr) {
BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
- if (Value *V = SimplifyGEPInst(Ops, DL))
+ if (Value *V = SimplifyGEPInst(Ops, DL, TLI, DT, AC))
return ReplaceInstUsesWith(GEP, V);
Value *PtrOp = GEP.getOperand(0);
GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
}
- // Canonicalize (gep i8* X, -(ptrtoint Y)) to (sub (ptrtoint X), (ptrtoint Y))
- // The GEP pattern is emitted by the SCEV expander for certain kinds of
- // pointer arithmetic.
- if (DL && GEP.getNumIndices() == 1 &&
- match(GEP.getOperand(1), m_Neg(m_PtrToInt(m_Value())))) {
+ if (DL && GEP.getNumIndices() == 1) {
unsigned AS = GEP.getPointerAddressSpace();
- if (GEP.getType() == Builder->getInt8PtrTy(AS) &&
- GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
+ if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
DL->getPointerSizeInBits(AS)) {
- Operator *Index = cast<Operator>(GEP.getOperand(1));
- Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
- Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
- return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
+ Type *PtrTy = GEP.getPointerOperandType();
+ Type *Ty = PtrTy->getPointerElementType();
+ uint64_t TyAllocSize = DL->getTypeAllocSize(Ty);
+
+ bool Matched = false;
+ uint64_t C;
+ Value *V = nullptr;
+ if (TyAllocSize == 1) {
+ V = GEP.getOperand(1);
+ Matched = true;
+ } else if (match(GEP.getOperand(1),
+ m_AShr(m_Value(V), m_ConstantInt(C)))) {
+ if (TyAllocSize == 1ULL << C)
+ Matched = true;
+ } else if (match(GEP.getOperand(1),
+ m_SDiv(m_Value(V), m_ConstantInt(C)))) {
+ if (TyAllocSize == C)
+ Matched = true;
+ }
+
+ if (Matched) {
+ // Canonicalize (gep i8* X, -(ptrtoint Y))
+ // to (inttoptr (sub (ptrtoint X), (ptrtoint Y)))
+ // The GEP pattern is emitted by the SCEV expander for certain kinds of
+ // pointer arithmetic.
+ if (match(V, m_Neg(m_PtrToInt(m_Value())))) {
+ Operator *Index = cast<Operator>(V);
+ Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
+ Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
+ return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
+ }
+ // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
+ // to (bitcast Y)
+ Value *Y;
+ if (match(V, m_Sub(m_PtrToInt(m_Value(Y)),
+ m_PtrToInt(m_Specific(GEP.getOperand(0)))))) {
+ return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y,
+ GEP.getType());
+ }
+ }
}
}
return nullptr;
}
+Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
+ if (RI.getNumOperands() == 0) // ret void
+ return nullptr;
+ Value *ResultOp = RI.getOperand(0);
+ Type *VTy = ResultOp->getType();
+ if (!VTy->isIntegerTy())
+ return nullptr;
+
+ // There might be assume intrinsics dominating this return that completely
+ // determine the value. If so, constant fold it.
+ unsigned BitWidth = VTy->getPrimitiveSizeInBits();
+ APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ computeKnownBits(ResultOp, KnownZero, KnownOne, 0, &RI);
+ if ((KnownZero|KnownOne).isAllOnesValue())
+ RI.setOperand(0, Constant::getIntegerValue(VTy, KnownOne));
+
+ return nullptr;
+}
Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
// Change br (not X), label True, label False to: br X, label False, True
Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
Value *Cond = SI.getCondition();
+ unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
+ APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ computeKnownBits(Cond, KnownZero, KnownOne);
+ unsigned LeadingKnownZeros = KnownZero.countLeadingOnes();
+ unsigned LeadingKnownOnes = KnownOne.countLeadingOnes();
+
+ // Compute the number of leading bits we can ignore.
+ for (auto &C : SI.cases()) {
+ LeadingKnownZeros = std::min(
+ LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros());
+ LeadingKnownOnes = std::min(
+ LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
+ }
+
+ unsigned NewWidth = BitWidth - std::max(LeadingKnownZeros, LeadingKnownOnes);
+
+ // Truncate the condition operand if the new type is equal to or larger than
+ // the largest legal integer type. We need to be conservative here since
+ // x86 generates redundant zero-extenstion instructions if the operand is
+ // truncated to i8 or i16.
+ bool TruncCond = false;
+ if (DL && BitWidth > NewWidth &&
+ NewWidth >= DL->getLargestLegalIntTypeSize()) {
+ TruncCond = true;
+ IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
+ Builder->SetInsertPoint(&SI);
+ Value *NewCond = Builder->CreateTrunc(SI.getCondition(), Ty, "trunc");
+ SI.setCondition(NewCond);
+
+ for (auto &C : SI.cases())
+ static_cast<SwitchInst::CaseIt *>(&C)->setValue(ConstantInt::get(
+ SI.getContext(), C.getCaseValue()->getValue().trunc(NewWidth)));
+ }
+
if (Instruction *I = dyn_cast<Instruction>(Cond)) {
if (I->getOpcode() == Instruction::Add)
if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
i != e; ++i) {
ConstantInt* CaseVal = i.getCaseValue();
- Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
- AddRHS);
+ Constant *LHS = CaseVal;
+ if (TruncCond)
+ LHS = LeadingKnownZeros
+ ? ConstantExpr::getZExt(CaseVal, Cond->getType())
+ : ConstantExpr::getSExt(CaseVal, Cond->getType());
+ Constant* NewCaseVal = ConstantExpr::getSub(LHS, AddRHS);
assert(isa<ConstantInt>(NewCaseVal) &&
"Result of expression should be constant");
i.setValue(cast<ConstantInt>(NewCaseVal));
return &SI;
}
}
- return nullptr;
+
+ return TruncCond ? &SI : nullptr;
}
Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// If we already saw this clause, there is no point in having a second
// copy of it.
- if (AlreadyCaught.insert(TypeInfo)) {
+ if (AlreadyCaught.insert(TypeInfo).second) {
// This catch clause was not already seen.
NewClauses.push_back(CatchClause);
} else {
continue;
// There is no point in having multiple copies of the same typeinfo in
// a filter, so only add it if we didn't already.
- if (SeenInFilter.insert(TypeInfo))
+ if (SeenInFilter.insert(TypeInfo).second)
NewFilterElts.push_back(cast<Constant>(Elt));
}
// A filter containing a catch-all cannot match anything by definition.
/// whose condition is a known constant, we only visit the reachable successors.
///
static bool AddReachableCodeToWorklist(BasicBlock *BB,
- SmallPtrSet<BasicBlock*, 64> &Visited,
+ SmallPtrSetImpl<BasicBlock*> &Visited,
InstCombiner &IC,
const DataLayout *DL,
const TargetLibraryInfo *TLI) {
BB = Worklist.pop_back_val();
// We have now visited this block! If we've already been here, ignore it.
- if (!Visited.insert(BB)) continue;
+ if (!Visited.insert(BB).second)
+ continue;
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
Instruction *Inst = BBI++;
return MadeIRChange;
}
-namespace {
-class InstCombinerLibCallSimplifier : public LibCallSimplifier {
- InstCombiner *IC;
-public:
- InstCombinerLibCallSimplifier(const DataLayout *DL,
- const TargetLibraryInfo *TLI,
- InstCombiner *IC)
- : LibCallSimplifier(DL, TLI, UnsafeFPShrink) {
- this->IC = IC;
- }
-
- /// replaceAllUsesWith - override so that instruction replacement
- /// can be defined in terms of the instruction combiner framework.
- void replaceAllUsesWith(Instruction *I, Value *With) const override {
- IC->ReplaceInstUsesWith(*I, With);
- }
-};
-}
-
-bool InstCombiner::runOnFunction(Function &F) {
- if (skipOptnoneFunction(F))
- return false;
+// FIXME: Passing all of the analyses here in the run method is ugly. We should
+// separate out the worklist from the combiner so that we can construct
+// a combiner once per function while re-using the storage of an external
+// worklist.
+bool InstCombiner::run(Function &F, AssumptionCache *AC, const DataLayout *DL,
+ TargetLibraryInfo *TLI, DominatorTree *DT,
+ LoopInfo *LI) {
+ // Set up our analysis pointers.
+ this->AC = AC;
+ this->DL = DL;
+ this->TLI = TLI;
+ this->DT = DT;
+ this->LI = LI;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- TLI = &getAnalysis<TargetLibraryInfo>();
// Minimizing size?
MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::MinSize);
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
- IRBuilder<true, TargetFolder, InstCombineIRInserter>
- TheBuilder(F.getContext(), TargetFolder(DL),
- InstCombineIRInserter(Worklist));
+ IRBuilder<true, TargetFolder, InstCombineIRInserter> TheBuilder(
+ F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, AC));
Builder = &TheBuilder;
- InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this);
+ auto InstCombineRAUW = [this](Instruction *From, Value *With) {
+ ReplaceInstUsesWith(*From, With);
+ };
+ LibCallSimplifier TheSimplifier(DL, TLI, InstCombineRAUW);
Simplifier = &TheSimplifier;
bool EverMadeChange = false;
return EverMadeChange;
}
+namespace {
+/// \brief The legacy pass manager's instcombine pass.
+///
+/// This is a basic whole-function wrapper around the instcombine utility. It
+/// will try to combine all instructions in the function.
+class InstructionCombiningPass : public FunctionPass {
+ InstCombiner IC;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ InstructionCombiningPass() : FunctionPass(ID) {
+ initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
+};
+}
+
+void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<AssumptionCacheTracker>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+}
+
+bool InstructionCombiningPass::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+ auto *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ auto *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
+ auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
+
+ return IC.run(F, &AC, DL, &TLI, &DT, LI);
+}
+
+char InstructionCombiningPass::ID = 0;
+INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
+ "Combine redundant instructions", false, false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
+ "Combine redundant instructions", false, false)
+
+// Initialization Routines
+void llvm::initializeInstCombine(PassRegistry &Registry) {
+ initializeInstructionCombiningPassPass(Registry);
+}
+
+void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
+ initializeInstructionCombiningPassPass(*unwrap(R));
+}
+
FunctionPass *llvm::createInstructionCombiningPass() {
- return new InstCombiner();
+ return new InstructionCombiningPass();
}