1 //===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run
11 // both before and after the DAG is legalized.
12 //
13 // This pass is not a substitute for the LLVM IR instcombine pass. This pass is
14 // primarily intended to handle simplification opportunities that are implicit
15 // in the LLVM IR and exposed by the various codegen lowering phases.
16 //
17 //===----------------------------------------------------------------------===//
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/ADT/SmallBitVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetLowering.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include "llvm/Target/TargetOptions.h"
39 #include "llvm/Target/TargetRegisterInfo.h"
40 #include "llvm/Target/TargetSubtargetInfo.h"
41 #include <algorithm>
42 using namespace llvm;
44 #define DEBUG_TYPE "dagcombine"
46 STATISTIC(NodesCombined , "Number of dag nodes combined");
47 STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
48 STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
49 STATISTIC(OpsNarrowed , "Number of load/op/store narrowed");
50 STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int");
51 STATISTIC(SlicedLoads, "Number of load sliced");
53 namespace {
54 static cl::opt<bool>
55 CombinerAA("combiner-alias-analysis", cl::Hidden,
56 cl::desc("Enable DAG combiner alias-analysis heuristics"));
58 static cl::opt<bool>
59 CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
60 cl::desc("Enable DAG combiner's use of IR alias analysis"));
62 static cl::opt<bool>
63 UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
64 cl::desc("Enable DAG combiner's use of TBAA"));
66 #ifndef NDEBUG
67 static cl::opt<std::string>
68 CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
69 cl::desc("Only use DAG-combiner alias analysis in this"
70 " function"));
71 #endif
73 /// Hidden option to stress test load slicing, i.e., when this option
74 /// is enabled, load slicing bypasses most of its profitability guards.
75 static cl::opt<bool>
76 StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden,
77 cl::desc("Bypass the profitability model of load "
78 "slicing"),
79 cl::init(false));
81 static cl::opt<bool>
82 MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true),
83 cl::desc("DAG combiner may split indexing from loads"));
85 //------------------------------ DAGCombiner ---------------------------------//
87 class DAGCombiner {
88 SelectionDAG &DAG;
89 const TargetLowering &TLI;
90 CombineLevel Level;
91 CodeGenOpt::Level OptLevel;
92 bool LegalOperations;
93 bool LegalTypes;
94 bool ForCodeSize;
96 /// \brief Worklist of all of the nodes that need to be simplified.
97 ///
98 /// This must behave as a stack -- new nodes to process are pushed onto the
99 /// back and when processing we pop off of the back.
100 ///
101 /// The worklist will not contain duplicates but may contain null entries
102 /// due to nodes being deleted from the underlying DAG.
103 SmallVector<SDNode *, 64> Worklist;
105 /// \brief Mapping from an SDNode to its position on the worklist.
106 ///
107 /// This is used to find and remove nodes from the worklist (by nulling
108 /// them) when they are deleted from the underlying DAG. It relies on
109 /// stable indices of nodes within the worklist.
110 DenseMap<SDNode *, unsigned> WorklistMap;
112 /// \brief Set of nodes which have been combined (at least once).
113 ///
114 /// This is used to allow us to reliably add any operands of a DAG node
115 /// which have not yet been combined to the worklist.
116 SmallPtrSet<SDNode *, 64> CombinedNodes;
118 // AA - Used for DAG load/store alias analysis.
119 AliasAnalysis &AA;
121 /// When an instruction is simplified, add all users of the instruction to
122 /// the work lists because they might get more simplified now.
123 void AddUsersToWorklist(SDNode *N) {
124 for (SDNode *Node : N->uses())
125 AddToWorklist(Node);
126 }
128 /// Call the node-specific routine that folds each particular type of node.
129 SDValue visit(SDNode *N);
131 public:
132 /// Add to the worklist making sure its instance is at the back (next to be
133 /// processed.)
134 void AddToWorklist(SDNode *N) {
135 // Skip handle nodes as they can't usefully be combined and confuse the
136 // zero-use deletion strategy.
137 if (N->getOpcode() == ISD::HANDLENODE)
138 return;
140 if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second)
141 Worklist.push_back(N);
142 }
144 /// Remove all instances of N from the worklist.
145 void removeFromWorklist(SDNode *N) {
146 CombinedNodes.erase(N);
148 auto It = WorklistMap.find(N);
149 if (It == WorklistMap.end())
150 return; // Not in the worklist.
152 // Null out the entry rather than erasing it to avoid a linear operation.
153 Worklist[It->second] = nullptr;
154 WorklistMap.erase(It);
155 }
157 void deleteAndRecombine(SDNode *N);
158 bool recursivelyDeleteUnusedNodes(SDNode *N);
160 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
161 bool AddTo = true);
163 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
164 return CombineTo(N, &Res, 1, AddTo);
165 }
167 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
168 bool AddTo = true) {
169 SDValue To[] = { Res0, Res1 };
170 return CombineTo(N, To, 2, AddTo);
171 }
173 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
175 private:
177 /// Check the specified integer node value to see if it can be simplified or
178 /// if things it uses can be simplified by bit propagation.
179 /// If so, return true.
180 bool SimplifyDemandedBits(SDValue Op) {
181 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
182 APInt Demanded = APInt::getAllOnesValue(BitWidth);
183 return SimplifyDemandedBits(Op, Demanded);
184 }
186 bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded);
188 bool CombineToPreIndexedLoadStore(SDNode *N);
189 bool CombineToPostIndexedLoadStore(SDNode *N);
190 SDValue SplitIndexingFromLoad(LoadSDNode *LD);
191 bool SliceUpLoad(SDNode *N);
193 /// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed
194 /// load.
195 ///
196 /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced.
197 /// \param InVecVT type of the input vector to EVE with bitcasts resolved.
198 /// \param EltNo index of the vector element to load.
199 /// \param OriginalLoad load that EVE came from to be replaced.
200 /// \returns EVE on success SDValue() on failure.
201 SDValue ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
202 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad);
203 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
204 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
205 SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
206 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
207 SDValue PromoteIntBinOp(SDValue Op);
208 SDValue PromoteIntShiftOp(SDValue Op);
209 SDValue PromoteExtend(SDValue Op);
210 bool PromoteLoad(SDValue Op);
212 void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
213 SDValue Trunc, SDValue ExtLoad, SDLoc DL,
214 ISD::NodeType ExtType);
216 /// Call the node-specific routine that knows how to fold each
217 /// particular type of node. If that doesn't do anything, try the
218 /// target-specific DAG combines.
219 SDValue combine(SDNode *N);
221 // Visitation implementation - Implement dag node combining for different
222 // node types. The semantics are as follows:
223 // Return Value:
224 // SDValue.getNode() == 0 - No change was made
225 // SDValue.getNode() == N - N was replaced, is dead and has been handled.
226 // otherwise - N should be replaced by the returned Operand.
227 //
228 SDValue visitTokenFactor(SDNode *N);
229 SDValue visitMERGE_VALUES(SDNode *N);
230 SDValue visitADD(SDNode *N);
231 SDValue visitSUB(SDNode *N);
232 SDValue visitADDC(SDNode *N);
233 SDValue visitSUBC(SDNode *N);
234 SDValue visitADDE(SDNode *N);
235 SDValue visitSUBE(SDNode *N);
236 SDValue visitMUL(SDNode *N);
237 SDValue visitSDIV(SDNode *N);
238 SDValue visitUDIV(SDNode *N);
239 SDValue visitSREM(SDNode *N);
240 SDValue visitUREM(SDNode *N);
241 SDValue visitMULHU(SDNode *N);
242 SDValue visitMULHS(SDNode *N);
243 SDValue visitSMUL_LOHI(SDNode *N);
244 SDValue visitUMUL_LOHI(SDNode *N);
245 SDValue visitSMULO(SDNode *N);
246 SDValue visitUMULO(SDNode *N);
247 SDValue visitSDIVREM(SDNode *N);
248 SDValue visitUDIVREM(SDNode *N);
249 SDValue visitAND(SDNode *N);
250 SDValue visitOR(SDNode *N);
251 SDValue visitXOR(SDNode *N);
252 SDValue SimplifyVBinOp(SDNode *N);
253 SDValue SimplifyVUnaryOp(SDNode *N);
254 SDValue visitSHL(SDNode *N);
255 SDValue visitSRA(SDNode *N);
256 SDValue visitSRL(SDNode *N);
257 SDValue visitRotate(SDNode *N);
258 SDValue visitCTLZ(SDNode *N);
259 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
260 SDValue visitCTTZ(SDNode *N);
261 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N);
262 SDValue visitCTPOP(SDNode *N);
263 SDValue visitSELECT(SDNode *N);
264 SDValue visitVSELECT(SDNode *N);
265 SDValue visitSELECT_CC(SDNode *N);
266 SDValue visitSETCC(SDNode *N);
267 SDValue visitSIGN_EXTEND(SDNode *N);
268 SDValue visitZERO_EXTEND(SDNode *N);
269 SDValue visitANY_EXTEND(SDNode *N);
270 SDValue visitSIGN_EXTEND_INREG(SDNode *N);
271 SDValue visitTRUNCATE(SDNode *N);
272 SDValue visitBITCAST(SDNode *N);
273 SDValue visitBUILD_PAIR(SDNode *N);
274 SDValue visitFADD(SDNode *N);
275 SDValue visitFSUB(SDNode *N);
276 SDValue visitFMUL(SDNode *N);
277 SDValue visitFMA(SDNode *N);
278 SDValue visitFDIV(SDNode *N);
279 SDValue visitFREM(SDNode *N);
280 SDValue visitFSQRT(SDNode *N);
281 SDValue visitFCOPYSIGN(SDNode *N);
282 SDValue visitSINT_TO_FP(SDNode *N);
283 SDValue visitUINT_TO_FP(SDNode *N);
284 SDValue visitFP_TO_SINT(SDNode *N);
285 SDValue visitFP_TO_UINT(SDNode *N);
286 SDValue visitFP_ROUND(SDNode *N);
287 SDValue visitFP_ROUND_INREG(SDNode *N);
288 SDValue visitFP_EXTEND(SDNode *N);
289 SDValue visitFNEG(SDNode *N);
290 SDValue visitFABS(SDNode *N);
291 SDValue visitFCEIL(SDNode *N);
292 SDValue visitFTRUNC(SDNode *N);
293 SDValue visitFFLOOR(SDNode *N);
294 SDValue visitBRCOND(SDNode *N);
295 SDValue visitBR_CC(SDNode *N);
296 SDValue visitLOAD(SDNode *N);
297 SDValue visitSTORE(SDNode *N);
298 SDValue visitINSERT_VECTOR_ELT(SDNode *N);
299 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
300 SDValue visitBUILD_VECTOR(SDNode *N);
301 SDValue visitCONCAT_VECTORS(SDNode *N);
302 SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
303 SDValue visitVECTOR_SHUFFLE(SDNode *N);
304 SDValue visitINSERT_SUBVECTOR(SDNode *N);
306 SDValue XformToShuffleWithZero(SDNode *N);
307 SDValue ReassociateOps(unsigned Opc, SDLoc DL, SDValue LHS, SDValue RHS);
309 SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt);
311 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
312 SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
313 SDValue SimplifySelect(SDLoc DL, SDValue N0, SDValue N1, SDValue N2);
314 SDValue SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1, SDValue N2,
315 SDValue N3, ISD::CondCode CC,
316 bool NotExtCompare = false);
317 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
318 SDLoc DL, bool foldBooleans = true);
320 bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
321 SDValue &CC) const;
322 bool isOneUseSetCC(SDValue N) const;
324 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
325 unsigned HiOp);
326 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
327 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
328 SDValue BuildSDIV(SDNode *N);
329 SDValue BuildSDIVPow2(SDNode *N);
330 SDValue BuildUDIV(SDNode *N);
331 SDValue BuildReciprocalEstimate(SDValue Op);
332 SDValue BuildRsqrtEstimate(SDValue Op);
333 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
334 bool DemandHighBits = true);
335 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
336 SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
337 SDValue InnerPos, SDValue InnerNeg,
338 unsigned PosOpcode, unsigned NegOpcode,
339 SDLoc DL);
340 SDNode *MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL);
341 SDValue ReduceLoadWidth(SDNode *N);
342 SDValue ReduceLoadOpStoreWidth(SDNode *N);
343 SDValue TransformFPLoadStorePair(SDNode *N);
344 SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
345 SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N);
347 SDValue GetDemandedBits(SDValue V, const APInt &Mask);
349 /// Walk up chain skipping non-aliasing memory nodes,
350 /// looking for aliasing nodes and adding them to the Aliases vector.
351 void GatherAllAliases(SDNode *N, SDValue OriginalChain,
352 SmallVectorImpl<SDValue> &Aliases);
354 /// Return true if there is any possibility that the two addresses overlap.
355 bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const;
357 /// Walk up chain skipping non-aliasing memory nodes, looking for a better
358 /// chain (aliasing node.)
359 SDValue FindBetterChain(SDNode *N, SDValue Chain);
361 /// Merge consecutive store operations into a wide store.
362 /// This optimization uses wide integers or vectors when possible.
363 /// \return True if some memory operations were changed.
364 bool MergeConsecutiveStores(StoreSDNode *N);
366 /// \brief Try to transform a truncation where C is a constant:
367 /// (trunc (and X, C)) -> (and (trunc X), (trunc C))
368 ///
369 /// \p N needs to be a truncation and its first operand an AND. Other
370 /// requirements are checked by the function (e.g. that trunc is
371 /// single-use) and if missed an empty SDValue is returned.
372 SDValue distributeTruncateThroughAnd(SDNode *N);
374 public:
375 DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
376 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
377 OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {
378 AttributeSet FnAttrs =
379 DAG.getMachineFunction().getFunction()->getAttributes();
380 ForCodeSize =
381 FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
382 Attribute::OptimizeForSize) ||
383 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
384 }
386 /// Runs the dag combiner on all nodes in the work list
387 void Run(CombineLevel AtLevel);
389 SelectionDAG &getDAG() const { return DAG; }
391 /// Returns a type large enough to hold any valid shift amount - before type
392 /// legalization these can be huge.
393 EVT getShiftAmountTy(EVT LHSTy) {
394 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
395 if (LHSTy.isVector())
396 return LHSTy;
397 return LegalTypes ? TLI.getScalarShiftAmountTy(LHSTy)
398 : TLI.getPointerTy();
399 }
401 /// This method returns true if we are running before type legalization or
402 /// if the specified VT is legal.
403 bool isTypeLegal(const EVT &VT) {
404 if (!LegalTypes) return true;
405 return TLI.isTypeLegal(VT);
406 }
408 /// Convenience wrapper around TargetLowering::getSetCCResultType
409 EVT getSetCCResultType(EVT VT) const {
410 return TLI.getSetCCResultType(*DAG.getContext(), VT);
411 }
412 };
413 }
416 namespace {
417 /// This class is a DAGUpdateListener that removes any deleted
418 /// nodes from the worklist.
419 class WorklistRemover : public SelectionDAG::DAGUpdateListener {
420 DAGCombiner &DC;
421 public:
422 explicit WorklistRemover(DAGCombiner &dc)
423 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
425 void NodeDeleted(SDNode *N, SDNode *E) override {
426 DC.removeFromWorklist(N);
427 }
428 };
429 }
431 //===----------------------------------------------------------------------===//
432 // TargetLowering::DAGCombinerInfo implementation
433 //===----------------------------------------------------------------------===//
435 void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
436 ((DAGCombiner*)DC)->AddToWorklist(N);
437 }
439 void TargetLowering::DAGCombinerInfo::RemoveFromWorklist(SDNode *N) {
440 ((DAGCombiner*)DC)->removeFromWorklist(N);
441 }
443 SDValue TargetLowering::DAGCombinerInfo::
444 CombineTo(SDNode *N, const std::vector<SDValue> &To, bool AddTo) {
445 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
446 }
448 SDValue TargetLowering::DAGCombinerInfo::
449 CombineTo(SDNode *N, SDValue Res, bool AddTo) {
450 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
451 }
454 SDValue TargetLowering::DAGCombinerInfo::
455 CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
456 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
457 }
459 void TargetLowering::DAGCombinerInfo::
460 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
461 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
462 }
464 //===----------------------------------------------------------------------===//
465 // Helper Functions
466 //===----------------------------------------------------------------------===//
468 void DAGCombiner::deleteAndRecombine(SDNode *N) {
469 removeFromWorklist(N);
471 // If the operands of this node are only used by the node, they will now be
472 // dead. Make sure to re-visit them and recursively delete dead nodes.
473 for (const SDValue &Op : N->ops())
474 // For an operand generating multiple values, one of the values may
475 // become dead allowing further simplification (e.g. split index
476 // arithmetic from an indexed load).
477 if (Op->hasOneUse() || Op->getNumValues() > 1)
478 AddToWorklist(Op.getNode());
480 DAG.DeleteNode(N);
481 }
483 /// Return 1 if we can compute the negated form of the specified expression for
484 /// the same cost as the expression itself, or 2 if we can compute the negated
485 /// form more cheaply than the expression itself.
486 static char isNegatibleForFree(SDValue Op, bool LegalOperations,
487 const TargetLowering &TLI,
488 const TargetOptions *Options,
489 unsigned Depth = 0) {
490 // fneg is removable even if it has multiple uses.
491 if (Op.getOpcode() == ISD::FNEG) return 2;
493 // Don't allow anything with multiple uses.
494 if (!Op.hasOneUse()) return 0;
496 // Don't recurse exponentially.
497 if (Depth > 6) return 0;
499 switch (Op.getOpcode()) {
500 default: return false;
501 case ISD::ConstantFP:
502 // Don't invert constant FP values after legalize. The negated constant
503 // isn't necessarily legal.
504 return LegalOperations ? 0 : 1;
505 case ISD::FADD:
506 // FIXME: determine better conditions for this xform.
507 if (!Options->UnsafeFPMath) return 0;
509 // After operation legalization, it might not be legal to create new FSUBs.
510 if (LegalOperations &&
511 !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType()))
512 return 0;
514 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
515 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
516 Options, Depth + 1))
517 return V;
518 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
519 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
520 Depth + 1);
521 case ISD::FSUB:
522 // We can't turn -(A-B) into B-A when we honor signed zeros.
523 if (!Options->UnsafeFPMath) return 0;
525 // fold (fneg (fsub A, B)) -> (fsub B, A)
526 return 1;
528 case ISD::FMUL:
529 case ISD::FDIV:
530 if (Options->HonorSignDependentRoundingFPMath()) return 0;
532 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
533 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
534 Options, Depth + 1))
535 return V;
537 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
538 Depth + 1);
540 case ISD::FP_EXTEND:
541 case ISD::FP_ROUND:
542 case ISD::FSIN:
543 return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options,
544 Depth + 1);
545 }
546 }
548 /// If isNegatibleForFree returns true, return the newly negated expression.
549 static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
550 bool LegalOperations, unsigned Depth = 0) {
551 const TargetOptions &Options = DAG.getTarget().Options;
552 // fneg is removable even if it has multiple uses.
553 if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
555 // Don't allow anything with multiple uses.
556 assert(Op.hasOneUse() && "Unknown reuse!");
558 assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
559 switch (Op.getOpcode()) {
560 default: llvm_unreachable("Unknown code");
561 case ISD::ConstantFP: {
562 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
563 V.changeSign();
564 return DAG.getConstantFP(V, Op.getValueType());
565 }
566 case ISD::FADD:
567 // FIXME: determine better conditions for this xform.
568 assert(Options.UnsafeFPMath);
570 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
571 if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
572 DAG.getTargetLoweringInfo(), &Options, Depth+1))
573 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
574 GetNegatedExpression(Op.getOperand(0), DAG,
575 LegalOperations, Depth+1),
576 Op.getOperand(1));
577 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
578 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
579 GetNegatedExpression(Op.getOperand(1), DAG,
580 LegalOperations, Depth+1),
581 Op.getOperand(0));
582 case ISD::FSUB:
583 // We can't turn -(A-B) into B-A when we honor signed zeros.
584 assert(Options.UnsafeFPMath);
586 // fold (fneg (fsub 0, B)) -> B
587 if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
588 if (N0CFP->getValueAPF().isZero())
589 return Op.getOperand(1);
591 // fold (fneg (fsub A, B)) -> (fsub B, A)
592 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
593 Op.getOperand(1), Op.getOperand(0));
595 case ISD::FMUL:
596 case ISD::FDIV:
597 assert(!Options.HonorSignDependentRoundingFPMath());
599 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
600 if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
601 DAG.getTargetLoweringInfo(), &Options, Depth+1))
602 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
603 GetNegatedExpression(Op.getOperand(0), DAG,
604 LegalOperations, Depth+1),
605 Op.getOperand(1));
607 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
608 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
609 Op.getOperand(0),
610 GetNegatedExpression(Op.getOperand(1), DAG,
611 LegalOperations, Depth+1));
613 case ISD::FP_EXTEND:
614 case ISD::FSIN:
615 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
616 GetNegatedExpression(Op.getOperand(0), DAG,
617 LegalOperations, Depth+1));
618 case ISD::FP_ROUND:
619 return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(),
620 GetNegatedExpression(Op.getOperand(0), DAG,
621 LegalOperations, Depth+1),
622 Op.getOperand(1));
623 }
624 }
626 // Return true if this node is a setcc, or is a select_cc
627 // that selects between the target values used for true and false, making it
628 // equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
629 // the appropriate nodes based on the type of node we are checking. This
630 // simplifies life a bit for the callers.
631 bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
632 SDValue &CC) const {
633 if (N.getOpcode() == ISD::SETCC) {
634 LHS = N.getOperand(0);
635 RHS = N.getOperand(1);
636 CC = N.getOperand(2);
637 return true;
638 }
640 if (N.getOpcode() != ISD::SELECT_CC ||
641 !TLI.isConstTrueVal(N.getOperand(2).getNode()) ||
642 !TLI.isConstFalseVal(N.getOperand(3).getNode()))
643 return false;
645 LHS = N.getOperand(0);
646 RHS = N.getOperand(1);
647 CC = N.getOperand(4);
648 return true;
649 }
651 /// Return true if this is a SetCC-equivalent operation with only one use.
652 /// If this is true, it allows the users to invert the operation for free when
653 /// it is profitable to do so.
654 bool DAGCombiner::isOneUseSetCC(SDValue N) const {
655 SDValue N0, N1, N2;
656 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
657 return true;
658 return false;
659 }
661 /// Returns true if N is a BUILD_VECTOR node whose
662 /// elements are all the same constant or undefined.
663 static bool isConstantSplatVector(SDNode *N, APInt& SplatValue) {
664 BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(N);
665 if (!C)
666 return false;
668 APInt SplatUndef;
669 unsigned SplatBitSize;
670 bool HasAnyUndefs;
671 EVT EltVT = N->getValueType(0).getVectorElementType();
672 return (C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
673 HasAnyUndefs) &&
674 EltVT.getSizeInBits() >= SplatBitSize);
675 }
677 // \brief Returns the SDNode if it is a constant BuildVector or constant.
678 static SDNode *isConstantBuildVectorOrConstantInt(SDValue N) {
679 if (isa<ConstantSDNode>(N))
680 return N.getNode();
681 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
682 if (BV && BV->isConstant())
683 return BV;
684 return nullptr;
685 }
687 // \brief Returns the SDNode if it is a constant splat BuildVector or constant
688 // int.
689 static ConstantSDNode *isConstOrConstSplat(SDValue N) {
690 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
691 return CN;
693 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
694 BitVector UndefElements;
695 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
697 // BuildVectors can truncate their operands. Ignore that case here.
698 // FIXME: We blindly ignore splats which include undef which is overly
699 // pessimistic.
700 if (CN && UndefElements.none() &&
701 CN->getValueType(0) == N.getValueType().getScalarType())
702 return CN;
703 }
705 return nullptr;
706 }
708 // \brief Returns the SDNode if it is a constant splat BuildVector or constant
709 // float.
710 static ConstantFPSDNode *isConstOrConstSplatFP(SDValue N) {
711 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
712 return CN;
714 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
715 BitVector UndefElements;
716 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
718 if (CN && UndefElements.none())
719 return CN;
720 }
722 return nullptr;
723 }
725 SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL,
726 SDValue N0, SDValue N1) {
727 EVT VT = N0.getValueType();
728 if (N0.getOpcode() == Opc) {
729 if (SDNode *L = isConstantBuildVectorOrConstantInt(N0.getOperand(1))) {
730 if (SDNode *R = isConstantBuildVectorOrConstantInt(N1)) {
731 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
732 SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, L, R);
733 if (!OpNode.getNode())
734 return SDValue();
735 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
736 }
737 if (N0.hasOneUse()) {
738 // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one
739 // use
740 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1);
741 if (!OpNode.getNode())
742 return SDValue();
743 AddToWorklist(OpNode.getNode());
744 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
745 }
746 }
747 }
749 if (N1.getOpcode() == Opc) {
750 if (SDNode *R = isConstantBuildVectorOrConstantInt(N1.getOperand(1))) {
751 if (SDNode *L = isConstantBuildVectorOrConstantInt(N0)) {
752 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
753 SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, R, L);
754 if (!OpNode.getNode())
755 return SDValue();
756 return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
757 }
758 if (N1.hasOneUse()) {
759 // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one
760 // use
761 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N1.getOperand(0), N0);
762 if (!OpNode.getNode())
763 return SDValue();
764 AddToWorklist(OpNode.getNode());
765 return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
766 }
767 }
768 }
770 return SDValue();
771 }
773 SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
774 bool AddTo) {
775 assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
776 ++NodesCombined;
777 DEBUG(dbgs() << "\nReplacing.1 ";
778 N->dump(&DAG);
779 dbgs() << "\nWith: ";
780 To[0].getNode()->dump(&DAG);
781 dbgs() << " and " << NumTo-1 << " other values\n";
782 for (unsigned i = 0, e = NumTo; i != e; ++i)
783 assert((!To[i].getNode() ||
784 N->getValueType(i) == To[i].getValueType()) &&
785 "Cannot combine value to value of different type!"));
786 WorklistRemover DeadNodes(*this);
787 DAG.ReplaceAllUsesWith(N, To);
788 if (AddTo) {
789 // Push the new nodes and any users onto the worklist
790 for (unsigned i = 0, e = NumTo; i != e; ++i) {
791 if (To[i].getNode()) {
792 AddToWorklist(To[i].getNode());
793 AddUsersToWorklist(To[i].getNode());
794 }
795 }
796 }
798 // Finally, if the node is now dead, remove it from the graph. The node
799 // may not be dead if the replacement process recursively simplified to
800 // something else needing this node.
801 if (N->use_empty())
802 deleteAndRecombine(N);
803 return SDValue(N, 0);
804 }
806 void DAGCombiner::
807 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
808 // Replace all uses. If any nodes become isomorphic to other nodes and
809 // are deleted, make sure to remove them from our worklist.
810 WorklistRemover DeadNodes(*this);
811 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);
813 // Push the new node and any (possibly new) users onto the worklist.
814 AddToWorklist(TLO.New.getNode());
815 AddUsersToWorklist(TLO.New.getNode());
817 // Finally, if the node is now dead, remove it from the graph. The node
818 // may not be dead if the replacement process recursively simplified to
819 // something else needing this node.
820 if (TLO.Old.getNode()->use_empty())
821 deleteAndRecombine(TLO.Old.getNode());
822 }
824 /// Check the specified integer node value to see if it can be simplified or if
825 /// things it uses can be simplified by bit propagation. If so, return true.
826 bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
827 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
828 APInt KnownZero, KnownOne;
829 if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
830 return false;
832 // Revisit the node.
833 AddToWorklist(Op.getNode());
835 // Replace the old value with the new one.
836 ++NodesCombined;
837 DEBUG(dbgs() << "\nReplacing.2 ";
838 TLO.Old.getNode()->dump(&DAG);
839 dbgs() << "\nWith: ";
840 TLO.New.getNode()->dump(&DAG);
841 dbgs() << '\n');
843 CommitTargetLoweringOpt(TLO);
844 return true;
845 }
847 void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
848 SDLoc dl(Load);
849 EVT VT = Load->getValueType(0);
850 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, SDValue(ExtLoad, 0));
852 DEBUG(dbgs() << "\nReplacing.9 ";
853 Load->dump(&DAG);
854 dbgs() << "\nWith: ";
855 Trunc.getNode()->dump(&DAG);
856 dbgs() << '\n');
857 WorklistRemover DeadNodes(*this);
858 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
859 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
860 deleteAndRecombine(Load);
861 AddToWorklist(Trunc.getNode());
862 }
864 SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
865 Replace = false;
866 SDLoc dl(Op);
867 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
868 EVT MemVT = LD->getMemoryVT();
869 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
870 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
871 : ISD::EXTLOAD)
872 : LD->getExtensionType();
873 Replace = true;
874 return DAG.getExtLoad(ExtType, dl, PVT,
875 LD->getChain(), LD->getBasePtr(),
876 MemVT, LD->getMemOperand());
877 }
879 unsigned Opc = Op.getOpcode();
880 switch (Opc) {
881 default: break;
882 case ISD::AssertSext:
883 return DAG.getNode(ISD::AssertSext, dl, PVT,
884 SExtPromoteOperand(Op.getOperand(0), PVT),
885 Op.getOperand(1));
886 case ISD::AssertZext:
887 return DAG.getNode(ISD::AssertZext, dl, PVT,
888 ZExtPromoteOperand(Op.getOperand(0), PVT),
889 Op.getOperand(1));
890 case ISD::Constant: {
891 unsigned ExtOpc =
892 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
893 return DAG.getNode(ExtOpc, dl, PVT, Op);
894 }
895 }
897 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
898 return SDValue();
899 return DAG.getNode(ISD::ANY_EXTEND, dl, PVT, Op);
900 }
902 SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
903 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
904 return SDValue();
905 EVT OldVT = Op.getValueType();
906 SDLoc dl(Op);
907 bool Replace = false;
908 SDValue NewOp = PromoteOperand(Op, PVT, Replace);
909 if (!NewOp.getNode())
910 return SDValue();
911 AddToWorklist(NewOp.getNode());
913 if (Replace)
914 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
915 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NewOp.getValueType(), NewOp,
916 DAG.getValueType(OldVT));
917 }
919 SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
920 EVT OldVT = Op.getValueType();
921 SDLoc dl(Op);
922 bool Replace = false;
923 SDValue NewOp = PromoteOperand(Op, PVT, Replace);
924 if (!NewOp.getNode())
925 return SDValue();
926 AddToWorklist(NewOp.getNode());
928 if (Replace)
929 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
930 return DAG.getZeroExtendInReg(NewOp, dl, OldVT);
931 }
933 /// Promote the specified integer binary operation if the target indicates it is
934 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to
935 /// i32 since i16 instructions are longer.
936 SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
937 if (!LegalOperations)
938 return SDValue();
940 EVT VT = Op.getValueType();
941 if (VT.isVector() || !VT.isInteger())
942 return SDValue();
944 // If operation type is 'undesirable', e.g. i16 on x86, consider
945 // promoting it.
946 unsigned Opc = Op.getOpcode();
947 if (TLI.isTypeDesirableForOp(Opc, VT))
948 return SDValue();
950 EVT PVT = VT;
951 // Consult target whether it is a good idea to promote this operation and
952 // what's the right type to promote it to.
953 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
954 assert(PVT != VT && "Don't know what type to promote to!");
956 bool Replace0 = false;
957 SDValue N0 = Op.getOperand(0);
958 SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
959 if (!NN0.getNode())
960 return SDValue();
962 bool Replace1 = false;
963 SDValue N1 = Op.getOperand(1);
964 SDValue NN1;
965 if (N0 == N1)
966 NN1 = NN0;
967 else {
968 NN1 = PromoteOperand(N1, PVT, Replace1);
969 if (!NN1.getNode())
970 return SDValue();
971 }
973 AddToWorklist(NN0.getNode());
974 if (NN1.getNode())
975 AddToWorklist(NN1.getNode());
977 if (Replace0)
978 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
979 if (Replace1)
980 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
982 DEBUG(dbgs() << "\nPromoting ";
983 Op.getNode()->dump(&DAG));
984 SDLoc dl(Op);
985 return DAG.getNode(ISD::TRUNCATE, dl, VT,
986 DAG.getNode(Opc, dl, PVT, NN0, NN1));
987 }
988 return SDValue();
989 }
991 /// Promote the specified integer shift operation if the target indicates it is
992 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to
993 /// i32 since i16 instructions are longer.
994 SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
995 if (!LegalOperations)
996 return SDValue();
998 EVT VT = Op.getValueType();
999 if (VT.isVector() || !VT.isInteger())
1000 return SDValue();
1002 // If operation type is 'undesirable', e.g. i16 on x86, consider
1003 // promoting it.
1004 unsigned Opc = Op.getOpcode();
1005 if (TLI.isTypeDesirableForOp(Opc, VT))
1006 return SDValue();
1008 EVT PVT = VT;
1009 // Consult target whether it is a good idea to promote this operation and
1010 // what's the right type to promote it to.
1011 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1012 assert(PVT != VT && "Don't know what type to promote to!");
1014 bool Replace = false;
1015 SDValue N0 = Op.getOperand(0);
1016 if (Opc == ISD::SRA)
1017 N0 = SExtPromoteOperand(Op.getOperand(0), PVT);
1018 else if (Opc == ISD::SRL)
1019 N0 = ZExtPromoteOperand(Op.getOperand(0), PVT);
1020 else
1021 N0 = PromoteOperand(N0, PVT, Replace);
1022 if (!N0.getNode())
1023 return SDValue();
1025 AddToWorklist(N0.getNode());
1026 if (Replace)
1027 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
1029 DEBUG(dbgs() << "\nPromoting ";
1030 Op.getNode()->dump(&DAG));
1031 SDLoc dl(Op);
1032 return DAG.getNode(ISD::TRUNCATE, dl, VT,
1033 DAG.getNode(Opc, dl, PVT, N0, Op.getOperand(1)));
1034 }
1035 return SDValue();
1036 }
1038 SDValue DAGCombiner::PromoteExtend(SDValue Op) {
1039 if (!LegalOperations)
1040 return SDValue();
1042 EVT VT = Op.getValueType();
1043 if (VT.isVector() || !VT.isInteger())
1044 return SDValue();
1046 // If operation type is 'undesirable', e.g. i16 on x86, consider
1047 // promoting it.
1048 unsigned Opc = Op.getOpcode();
1049 if (TLI.isTypeDesirableForOp(Opc, VT))
1050 return SDValue();
1052 EVT PVT = VT;
1053 // Consult target whether it is a good idea to promote this operation and
1054 // what's the right type to promote it to.
1055 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1056 assert(PVT != VT && "Don't know what type to promote to!");
1057 // fold (aext (aext x)) -> (aext x)
1058 // fold (aext (zext x)) -> (zext x)
1059 // fold (aext (sext x)) -> (sext x)
1060 DEBUG(dbgs() << "\nPromoting ";
1061 Op.getNode()->dump(&DAG));
1062 return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0));
1063 }
1064 return SDValue();
1065 }
1067 bool DAGCombiner::PromoteLoad(SDValue Op) {
1068 if (!LegalOperations)
1069 return false;
1071 EVT VT = Op.getValueType();
1072 if (VT.isVector() || !VT.isInteger())
1073 return false;
1075 // If operation type is 'undesirable', e.g. i16 on x86, consider
1076 // promoting it.
1077 unsigned Opc = Op.getOpcode();
1078 if (TLI.isTypeDesirableForOp(Opc, VT))
1079 return false;
1081 EVT PVT = VT;
1082 // Consult target whether it is a good idea to promote this operation and
1083 // what's the right type to promote it to.
1084 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1085 assert(PVT != VT && "Don't know what type to promote to!");
1087 SDLoc dl(Op);
1088 SDNode *N = Op.getNode();
1089 LoadSDNode *LD = cast<LoadSDNode>(N);
1090 EVT MemVT = LD->getMemoryVT();
1091 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
1092 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
1093 : ISD::EXTLOAD)
1094 : LD->getExtensionType();
1095 SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT,
1096 LD->getChain(), LD->getBasePtr(),
1097 MemVT, LD->getMemOperand());
1098 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD);
1100 DEBUG(dbgs() << "\nPromoting ";
1101 N->dump(&DAG);
1102 dbgs() << "\nTo: ";
1103 Result.getNode()->dump(&DAG);
1104 dbgs() << '\n');
1105 WorklistRemover DeadNodes(*this);
1106 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1107 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
1108 deleteAndRecombine(N);
1109 AddToWorklist(Result.getNode());
1110 return true;
1111 }
1112 return false;
1113 }
1115 /// \brief Recursively delete a node which has no uses and any operands for
1116 /// which it is the only use.
1117 ///
1118 /// Note that this both deletes the nodes and removes them from the worklist.
1119 /// It also adds any nodes who have had a user deleted to the worklist as they
1120 /// may now have only one use and subject to other combines.
1121 bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) {
1122 if (!N->use_empty())
1123 return false;
1125 SmallSetVector<SDNode *, 16> Nodes;
1126 Nodes.insert(N);
1127 do {
1128 N = Nodes.pop_back_val();
1129 if (!N)
1130 continue;
1132 if (N->use_empty()) {
1133 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1134 Nodes.insert(N->getOperand(i).getNode());
1136 removeFromWorklist(N);
1137 DAG.DeleteNode(N);
1138 } else {
1139 AddToWorklist(N);
1140 }
1141 } while (!Nodes.empty());
1142 return true;
1143 }
1145 //===----------------------------------------------------------------------===//
1146 // Main DAG Combiner implementation
1147 //===----------------------------------------------------------------------===//
1149 void DAGCombiner::Run(CombineLevel AtLevel) {
1150 // set the instance variables, so that the various visit routines may use it.
1151 Level = AtLevel;
1152 LegalOperations = Level >= AfterLegalizeVectorOps;
1153 LegalTypes = Level >= AfterLegalizeTypes;
1155 // Add all the dag nodes to the worklist.
1156 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
1157 E = DAG.allnodes_end(); I != E; ++I)
1158 AddToWorklist(I);
1160 // Create a dummy node (which is not added to allnodes), that adds a reference
1161 // to the root node, preventing it from being deleted, and tracking any
1162 // changes of the root.
1163 HandleSDNode Dummy(DAG.getRoot());
1165 // while the worklist isn't empty, find a node and
1166 // try and combine it.
1167 while (!WorklistMap.empty()) {
1168 SDNode *N;
1169 // The Worklist holds the SDNodes in order, but it may contain null entries.
1170 do {
1171 N = Worklist.pop_back_val();
1172 } while (!N);
1174 bool GoodWorklistEntry = WorklistMap.erase(N);
1175 (void)GoodWorklistEntry;
1176 assert(GoodWorklistEntry &&
1177 "Found a worklist entry without a corresponding map entry!");
1179 // If N has no uses, it is dead. Make sure to revisit all N's operands once
1180 // N is deleted from the DAG, since they too may now be dead or may have a
1181 // reduced number of uses, allowing other xforms.
1182 if (recursivelyDeleteUnusedNodes(N))
1183 continue;
1185 WorklistRemover DeadNodes(*this);
1187 // If this combine is running after legalizing the DAG, re-legalize any
1188 // nodes pulled off the worklist.
1189 if (Level == AfterLegalizeDAG) {
1190 SmallSetVector<SDNode *, 16> UpdatedNodes;
1191 bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes);
1193 for (SDNode *LN : UpdatedNodes) {
1194 AddToWorklist(LN);
1195 AddUsersToWorklist(LN);
1196 }
1197 if (!NIsValid)
1198 continue;
1199 }
1201 DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG));
1203 // Add any operands of the new node which have not yet been combined to the
1204 // worklist as well. Because the worklist uniques things already, this
1205 // won't repeatedly process the same operand.
1206 CombinedNodes.insert(N);
1207 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1208 if (!CombinedNodes.count(N->getOperand(i).getNode()))
1209 AddToWorklist(N->getOperand(i).getNode());
1211 SDValue RV = combine(N);
1213 if (!RV.getNode())
1214 continue;
1216 ++NodesCombined;
1218 // If we get back the same node we passed in, rather than a new node or
1219 // zero, we know that the node must have defined multiple values and
1220 // CombineTo was used. Since CombineTo takes care of the worklist
1221 // mechanics for us, we have no work to do in this case.
1222 if (RV.getNode() == N)
1223 continue;
1225 assert(N->getOpcode() != ISD::DELETED_NODE &&
1226 RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
1227 "Node was deleted but visit returned new node!");
1229 DEBUG(dbgs() << " ... into: ";
1230 RV.getNode()->dump(&DAG));
1232 // Transfer debug value.
1233 DAG.TransferDbgValues(SDValue(N, 0), RV);
1234 if (N->getNumValues() == RV.getNode()->getNumValues())
1235 DAG.ReplaceAllUsesWith(N, RV.getNode());
1236 else {
1237 assert(N->getValueType(0) == RV.getValueType() &&
1238 N->getNumValues() == 1 && "Type mismatch");
1239 SDValue OpV = RV;
1240 DAG.ReplaceAllUsesWith(N, &OpV);
1241 }
1243 // Push the new node and any users onto the worklist
1244 AddToWorklist(RV.getNode());
1245 AddUsersToWorklist(RV.getNode());
1247 // Finally, if the node is now dead, remove it from the graph. The node
1248 // may not be dead if the replacement process recursively simplified to
1249 // something else needing this node. This will also take care of adding any
1250 // operands which have lost a user to the worklist.
1251 recursivelyDeleteUnusedNodes(N);
1252 }
1254 // If the root changed (e.g. it was a dead load, update the root).
1255 DAG.setRoot(Dummy.getValue());
1256 DAG.RemoveDeadNodes();
1257 }
1259 SDValue DAGCombiner::visit(SDNode *N) {
1260 switch (N->getOpcode()) {
1261 default: break;
1262 case ISD::TokenFactor: return visitTokenFactor(N);
1263 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N);
1264 case ISD::ADD: return visitADD(N);
1265 case ISD::SUB: return visitSUB(N);
1266 case ISD::ADDC: return visitADDC(N);
1267 case ISD::SUBC: return visitSUBC(N);
1268 case ISD::ADDE: return visitADDE(N);
1269 case ISD::SUBE: return visitSUBE(N);
1270 case ISD::MUL: return visitMUL(N);
1271 case ISD::SDIV: return visitSDIV(N);
1272 case ISD::UDIV: return visitUDIV(N);
1273 case ISD::SREM: return visitSREM(N);
1274 case ISD::UREM: return visitUREM(N);
1275 case ISD::MULHU: return visitMULHU(N);
1276 case ISD::MULHS: return visitMULHS(N);
1277 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N);
1278 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N);
1279 case ISD::SMULO: return visitSMULO(N);
1280 case ISD::UMULO: return visitUMULO(N);
1281 case ISD::SDIVREM: return visitSDIVREM(N);
1282 case ISD::UDIVREM: return visitUDIVREM(N);
1283 case ISD::AND: return visitAND(N);
1284 case ISD::OR: return visitOR(N);
1285 case ISD::XOR: return visitXOR(N);
1286 case ISD::SHL: return visitSHL(N);
1287 case ISD::SRA: return visitSRA(N);
1288 case ISD::SRL: return visitSRL(N);
1289 case ISD::ROTR:
1290 case ISD::ROTL: return visitRotate(N);
1291 case ISD::CTLZ: return visitCTLZ(N);
1292 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
1293 case ISD::CTTZ: return visitCTTZ(N);
1294 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N);
1295 case ISD::CTPOP: return visitCTPOP(N);
1296 case ISD::SELECT: return visitSELECT(N);
1297 case ISD::VSELECT: return visitVSELECT(N);
1298 case ISD::SELECT_CC: return visitSELECT_CC(N);
1299 case ISD::SETCC: return visitSETCC(N);
1300 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N);
1301 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N);
1302 case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
1303 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
1304 case ISD::TRUNCATE: return visitTRUNCATE(N);
1305 case ISD::BITCAST: return visitBITCAST(N);
1306 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
1307 case ISD::FADD: return visitFADD(N);
1308 case ISD::FSUB: return visitFSUB(N);
1309 case ISD::FMUL: return visitFMUL(N);
1310 case ISD::FMA: return visitFMA(N);
1311 case ISD::FDIV: return visitFDIV(N);
1312 case ISD::FREM: return visitFREM(N);
1313 case ISD::FSQRT: return visitFSQRT(N);
1314 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N);
1315 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N);
1316 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N);
1317 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N);
1318 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N);
1319 case ISD::FP_ROUND: return visitFP_ROUND(N);
1320 case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N);
1321 case ISD::FP_EXTEND: return visitFP_EXTEND(N);
1322 case ISD::FNEG: return visitFNEG(N);
1323 case ISD::FABS: return visitFABS(N);
1324 case ISD::FFLOOR: return visitFFLOOR(N);
1325 case ISD::FCEIL: return visitFCEIL(N);
1326 case ISD::FTRUNC: return visitFTRUNC(N);
1327 case ISD::BRCOND: return visitBRCOND(N);
1328 case ISD::BR_CC: return visitBR_CC(N);
1329 case ISD::LOAD: return visitLOAD(N);
1330 case ISD::STORE: return visitSTORE(N);
1331 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N);
1332 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
1333 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
1334 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
1335 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
1336 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
1337 case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N);
1338 }
1339 return SDValue();
1340 }
1342 SDValue DAGCombiner::combine(SDNode *N) {
1343 SDValue RV = visit(N);
1345 // If nothing happened, try a target-specific DAG combine.
1346 if (!RV.getNode()) {
1347 assert(N->getOpcode() != ISD::DELETED_NODE &&
1348 "Node was deleted but visit returned NULL!");
1350 if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
1351 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
1353 // Expose the DAG combiner to the target combiner impls.
1354 TargetLowering::DAGCombinerInfo
1355 DagCombineInfo(DAG, Level, false, this);
1357 RV = TLI.PerformDAGCombine(N, DagCombineInfo);
1358 }
1359 }
1361 // If nothing happened still, try promoting the operation.
1362 if (!RV.getNode()) {
1363 switch (N->getOpcode()) {
1364 default: break;
1365 case ISD::ADD:
1366 case ISD::SUB:
1367 case ISD::MUL:
1368 case ISD::AND:
1369 case ISD::OR:
1370 case ISD::XOR:
1371 RV = PromoteIntBinOp(SDValue(N, 0));
1372 break;
1373 case ISD::SHL:
1374 case ISD::SRA:
1375 case ISD::SRL:
1376 RV = PromoteIntShiftOp(SDValue(N, 0));
1377 break;
1378 case ISD::SIGN_EXTEND:
1379 case ISD::ZERO_EXTEND:
1380 case ISD::ANY_EXTEND:
1381 RV = PromoteExtend(SDValue(N, 0));
1382 break;
1383 case ISD::LOAD:
1384 if (PromoteLoad(SDValue(N, 0)))
1385 RV = SDValue(N, 0);
1386 break;
1387 }
1388 }
1390 // If N is a commutative binary node, try commuting it to enable more
1391 // sdisel CSE.
1392 if (!RV.getNode() && SelectionDAG::isCommutativeBinOp(N->getOpcode()) &&
1393 N->getNumValues() == 1) {
1394 SDValue N0 = N->getOperand(0);
1395 SDValue N1 = N->getOperand(1);
1397 // Constant operands are canonicalized to RHS.
1398 if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
1399 SDValue Ops[] = {N1, N0};
1400 SDNode *CSENode;
1401 if (const BinaryWithFlagsSDNode *BinNode =
1402 dyn_cast<BinaryWithFlagsSDNode>(N)) {
1403 CSENode = DAG.getNodeIfExists(
1404 N->getOpcode(), N->getVTList(), Ops, BinNode->hasNoUnsignedWrap(),
1405 BinNode->hasNoSignedWrap(), BinNode->isExact());
1406 } else {
1407 CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops);
1408 }
1409 if (CSENode)
1410 return SDValue(CSENode, 0);
1411 }
1412 }
1414 return RV;
1415 }
1417 /// Given a node, return its input chain if it has one, otherwise return a null
1418 /// sd operand.
1419 static SDValue getInputChainForNode(SDNode *N) {
1420 if (unsigned NumOps = N->getNumOperands()) {
1421 if (N->getOperand(0).getValueType() == MVT::Other)
1422 return N->getOperand(0);
1423 if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
1424 return N->getOperand(NumOps-1);
1425 for (unsigned i = 1; i < NumOps-1; ++i)
1426 if (N->getOperand(i).getValueType() == MVT::Other)
1427 return N->getOperand(i);
1428 }
1429 return SDValue();
1430 }
1432 SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
1433 // If N has two operands, where one has an input chain equal to the other,
1434 // the 'other' chain is redundant.
1435 if (N->getNumOperands() == 2) {
1436 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
1437 return N->getOperand(0);
1438 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
1439 return N->getOperand(1);
1440 }
1442 SmallVector<SDNode *, 8> TFs; // List of token factors to visit.
1443 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor.
1444 SmallPtrSet<SDNode*, 16> SeenOps;
1445 bool Changed = false; // If we should replace this token factor.
1447 // Start out with this token factor.
1448 TFs.push_back(N);
1450 // Iterate through token factors. The TFs grows when new token factors are
1451 // encountered.
1452 for (unsigned i = 0; i < TFs.size(); ++i) {
1453 SDNode *TF = TFs[i];
1455 // Check each of the operands.
1456 for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) {
1457 SDValue Op = TF->getOperand(i);
1459 switch (Op.getOpcode()) {
1460 case ISD::EntryToken:
1461 // Entry tokens don't need to be added to the list. They are
1462 // rededundant.
1463 Changed = true;
1464 break;
1466 case ISD::TokenFactor:
1467 if (Op.hasOneUse() &&
1468 std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) {
1469 // Queue up for processing.
1470 TFs.push_back(Op.getNode());
1471 // Clean up in case the token factor is removed.
1472 AddToWorklist(Op.getNode());
1473 Changed = true;
1474 break;
1475 }
1476 // Fall thru
1478 default:
1479 // Only add if it isn't already in the list.
1480 if (SeenOps.insert(Op.getNode()))
1481 Ops.push_back(Op);
1482 else
1483 Changed = true;
1484 break;
1485 }
1486 }
1487 }
1489 SDValue Result;
1491 // If we've change things around then replace token factor.
1492 if (Changed) {
1493 if (Ops.empty()) {
1494 // The entry token is the only possible outcome.
1495 Result = DAG.getEntryNode();
1496 } else {
1497 // New and improved token factor.
1498 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops);
1499 }
1501 // Don't add users to work list.
1502 return CombineTo(N, Result, false);
1503 }
1505 return Result;
1506 }
1508 /// MERGE_VALUES can always be eliminated.
1509 SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
1510 WorklistRemover DeadNodes(*this);
1511 // Replacing results may cause a different MERGE_VALUES to suddenly
1512 // be CSE'd with N, and carry its uses with it. Iterate until no
1513 // uses remain, to ensure that the node can be safely deleted.
1514 // First add the users of this node to the work list so that they
1515 // can be tried again once they have new operands.
1516 AddUsersToWorklist(N);
1517 do {
1518 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1519 DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i));
1520 } while (!N->use_empty());
1521 deleteAndRecombine(N);
1522 return SDValue(N, 0); // Return N so it doesn't get rechecked!
1523 }
1525 SDValue DAGCombiner::visitADD(SDNode *N) {
1526 SDValue N0 = N->getOperand(0);
1527 SDValue N1 = N->getOperand(1);
1528 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1529 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1530 EVT VT = N0.getValueType();
1532 // fold vector ops
1533 if (VT.isVector()) {
1534 SDValue FoldedVOp = SimplifyVBinOp(N);
1535 if (FoldedVOp.getNode()) return FoldedVOp;
1537 // fold (add x, 0) -> x, vector edition
1538 if (ISD::isBuildVectorAllZeros(N1.getNode()))
1539 return N0;
1540 if (ISD::isBuildVectorAllZeros(N0.getNode()))
1541 return N1;
1542 }
1544 // fold (add x, undef) -> undef
1545 if (N0.getOpcode() == ISD::UNDEF)
1546 return N0;
1547 if (N1.getOpcode() == ISD::UNDEF)
1548 return N1;
1549 // fold (add c1, c2) -> c1+c2
1550 if (N0C && N1C)
1551 return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C);
1552 // canonicalize constant to RHS
1553 if (N0C && !N1C)
1554 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N1, N0);
1555 // fold (add x, 0) -> x
1556 if (N1C && N1C->isNullValue())
1557 return N0;
1558 // fold (add Sym, c) -> Sym+c
1559 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1560 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C &&
1561 GA->getOpcode() == ISD::GlobalAddress)
1562 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
1563 GA->getOffset() +
1564 (uint64_t)N1C->getSExtValue());
1565 // fold ((c1-A)+c2) -> (c1+c2)-A
1566 if (N1C && N0.getOpcode() == ISD::SUB)
1567 if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0)))
1568 return DAG.getNode(ISD::SUB, SDLoc(N), VT,
1569 DAG.getConstant(N1C->getAPIntValue()+
1570 N0C->getAPIntValue(), VT),
1571 N0.getOperand(1));
1572 // reassociate add
1573 SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1);
1574 if (RADD.getNode())
1575 return RADD;
1576 // fold ((0-A) + B) -> B-A
1577 if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
1578 cast<ConstantSDNode>(N0.getOperand(0))->isNullValue())
1579 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1, N0.getOperand(1));
1580 // fold (A + (0-B)) -> A-B
1581 if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) &&
1582 cast<ConstantSDNode>(N1.getOperand(0))->isNullValue())
1583 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, N1.getOperand(1));
1584 // fold (A+(B-A)) -> B
1585 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
1586 return N1.getOperand(0);
1587 // fold ((B-A)+A) -> B
1588 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
1589 return N0.getOperand(0);
1590 // fold (A+(B-(A+C))) to (B-C)
1591 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1592 N0 == N1.getOperand(1).getOperand(0))
1593 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1.getOperand(0),
1594 N1.getOperand(1).getOperand(1));
1595 // fold (A+(B-(C+A))) to (B-C)
1596 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1597 N0 == N1.getOperand(1).getOperand(1))
1598 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1.getOperand(0),
1599 N1.getOperand(1).getOperand(0));
1600 // fold (A+((B-A)+or-C)) to (B+or-C)
1601 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
1602 N1.getOperand(0).getOpcode() == ISD::SUB &&
1603 N0 == N1.getOperand(0).getOperand(1))
1604 return DAG.getNode(N1.getOpcode(), SDLoc(N), VT,
1605 N1.getOperand(0).getOperand(0), N1.getOperand(1));
1607 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
1608 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
1609 SDValue N00 = N0.getOperand(0);
1610 SDValue N01 = N0.getOperand(1);
1611 SDValue N10 = N1.getOperand(0);
1612 SDValue N11 = N1.getOperand(1);
1614 if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10))
1615 return DAG.getNode(ISD::SUB, SDLoc(N), VT,
1616 DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10),
1617 DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11));
1618 }
1620 if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0)))
1621 return SDValue(N, 0);
1623 // fold (a+b) -> (a|b) iff a and b share no bits.
1624 if (VT.isInteger() && !VT.isVector()) {
1625 APInt LHSZero, LHSOne;
1626 APInt RHSZero, RHSOne;
1627 DAG.computeKnownBits(N0, LHSZero, LHSOne);
1629 if (LHSZero.getBoolValue()) {
1630 DAG.computeKnownBits(N1, RHSZero, RHSOne);
1632 // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1633 // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1634 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero){
1635 if (!LegalOperations || TLI.isOperationLegal(ISD::OR, VT))
1636 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1);
1637 }
1638 }
1639 }
1641 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
1642 if (N1.getOpcode() == ISD::SHL &&
1643 N1.getOperand(0).getOpcode() == ISD::SUB)
1644 if (ConstantSDNode *C =
1645 dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(0)))
1646 if (C->getAPIntValue() == 0)
1647 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0,
1648 DAG.getNode(ISD::SHL, SDLoc(N), VT,
1649 N1.getOperand(0).getOperand(1),
1650 N1.getOperand(1)));
1651 if (N0.getOpcode() == ISD::SHL &&
1652 N0.getOperand(0).getOpcode() == ISD::SUB)
1653 if (ConstantSDNode *C =
1654 dyn_cast<ConstantSDNode>(N0.getOperand(0).getOperand(0)))
1655 if (C->getAPIntValue() == 0)
1656 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1,
1657 DAG.getNode(ISD::SHL, SDLoc(N), VT,
1658 N0.getOperand(0).getOperand(1),
1659 N0.getOperand(1)));
1661 if (N1.getOpcode() == ISD::AND) {
1662 SDValue AndOp0 = N1.getOperand(0);
1663 ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1));
1664 unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
1665 unsigned DestBits = VT.getScalarType().getSizeInBits();
1667 // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
1668 // and similar xforms where the inner op is either ~0 or 0.
1669 if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) {
1670 SDLoc DL(N);
1671 return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0);
1672 }
1673 }
1675 // add (sext i1), X -> sub X, (zext i1)
1676 if (N0.getOpcode() == ISD::SIGN_EXTEND &&
1677 N0.getOperand(0).getValueType() == MVT::i1 &&
1678 !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) {
1679 SDLoc DL(N);
1680 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
1681 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
1682 }
1684 return SDValue();
1685 }
1687 SDValue DAGCombiner::visitADDC(SDNode *N) {
1688 SDValue N0 = N->getOperand(0);
1689 SDValue N1 = N->getOperand(1);
1690 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1691 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1692 EVT VT = N0.getValueType();
1694 // If the flag result is dead, turn this into an ADD.
1695 if (!N->hasAnyUseOfValue(1))
1696 return CombineTo(N, DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N1),
1697 DAG.getNode(ISD::CARRY_FALSE,
1698 SDLoc(N), MVT::Glue));
1700 // canonicalize constant to RHS.
1701 if (N0C && !N1C)
1702 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N1, N0);
1704 // fold (addc x, 0) -> x + no carry out
1705 if (N1C && N1C->isNullValue())
1706 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
1707 SDLoc(N), MVT::Glue));
1709 // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
1710 APInt LHSZero, LHSOne;
1711 APInt RHSZero, RHSOne;
1712 DAG.computeKnownBits(N0, LHSZero, LHSOne);
1714 if (LHSZero.getBoolValue()) {
1715 DAG.computeKnownBits(N1, RHSZero, RHSOne);
1717 // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1718 // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1719 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero)
1720 return CombineTo(N, DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1),
1721 DAG.getNode(ISD::CARRY_FALSE,
1722 SDLoc(N), MVT::Glue));
1723 }
1725 return SDValue();
1726 }
1728 SDValue DAGCombiner::visitADDE(SDNode *N) {
1729 SDValue N0 = N->getOperand(0);
1730 SDValue N1 = N->getOperand(1);
1731 SDValue CarryIn = N->getOperand(2);
1732 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1733 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1735 // canonicalize constant to RHS
1736 if (N0C && !N1C)
1737 return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(),
1738 N1, N0, CarryIn);
1740 // fold (adde x, y, false) -> (addc x, y)
1741 if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
1742 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1);
1744 return SDValue();
1745 }
1747 // Since it may not be valid to emit a fold to zero for vector initializers
1748 // check if we can before folding.
1749 static SDValue tryFoldToZero(SDLoc DL, const TargetLowering &TLI, EVT VT,
1750 SelectionDAG &DAG,
1751 bool LegalOperations, bool LegalTypes) {
1752 if (!VT.isVector())
1753 return DAG.getConstant(0, VT);
1754 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
1755 return DAG.getConstant(0, VT);
1756 return SDValue();
1757 }
1759 SDValue DAGCombiner::visitSUB(SDNode *N) {
1760 SDValue N0 = N->getOperand(0);
1761 SDValue N1 = N->getOperand(1);
1762 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1763 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1764 ConstantSDNode *N1C1 = N1.getOpcode() != ISD::ADD ? nullptr :
1765 dyn_cast<ConstantSDNode>(N1.getOperand(1).getNode());
1766 EVT VT = N0.getValueType();
1768 // fold vector ops
1769 if (VT.isVector()) {
1770 SDValue FoldedVOp = SimplifyVBinOp(N);
1771 if (FoldedVOp.getNode()) return FoldedVOp;
1773 // fold (sub x, 0) -> x, vector edition
1774 if (ISD::isBuildVectorAllZeros(N1.getNode()))
1775 return N0;
1776 }
1778 // fold (sub x, x) -> 0
1779 // FIXME: Refactor this and xor and other similar operations together.
1780 if (N0 == N1)
1781 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
1782 // fold (sub c1, c2) -> c1-c2
1783 if (N0C && N1C)
1784 return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C);
1785 // fold (sub x, c) -> (add x, -c)
1786 if (N1C)
1787 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0,
1788 DAG.getConstant(-N1C->getAPIntValue(), VT));
1789 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
1790 if (N0C && N0C->isAllOnesValue())
1791 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
1792 // fold A-(A-B) -> B
1793 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
1794 return N1.getOperand(1);
1795 // fold (A+B)-A -> B
1796 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
1797 return N0.getOperand(1);
1798 // fold (A+B)-B -> A
1799 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
1800 return N0.getOperand(0);
1801 // fold C2-(A+C1) -> (C2-C1)-A
1802 if (N1.getOpcode() == ISD::ADD && N0C && N1C1) {
1803 SDValue NewC = DAG.getConstant(N0C->getAPIntValue() - N1C1->getAPIntValue(),
1804 VT);
1805 return DAG.getNode(ISD::SUB, SDLoc(N), VT, NewC,
1806 N1.getOperand(0));
1807 }
1808 // fold ((A+(B+or-C))-B) -> A+or-C
1809 if (N0.getOpcode() == ISD::ADD &&
1810 (N0.getOperand(1).getOpcode() == ISD::SUB ||
1811 N0.getOperand(1).getOpcode() == ISD::ADD) &&
1812 N0.getOperand(1).getOperand(0) == N1)
1813 return DAG.getNode(N0.getOperand(1).getOpcode(), SDLoc(N), VT,
1814 N0.getOperand(0), N0.getOperand(1).getOperand(1));
1815 // fold ((A+(C+B))-B) -> A+C
1816 if (N0.getOpcode() == ISD::ADD &&
1817 N0.getOperand(1).getOpcode() == ISD::ADD &&
1818 N0.getOperand(1).getOperand(1) == N1)
1819 return DAG.getNode(ISD::ADD, SDLoc(N), VT,
1820 N0.getOperand(0), N0.getOperand(1).getOperand(0));
1821 // fold ((A-(B-C))-C) -> A-B
1822 if (N0.getOpcode() == ISD::SUB &&
1823 N0.getOperand(1).getOpcode() == ISD::SUB &&
1824 N0.getOperand(1).getOperand(1) == N1)
1825 return DAG.getNode(ISD::SUB, SDLoc(N), VT,
1826 N0.getOperand(0), N0.getOperand(1).getOperand(0));
1828 // If either operand of a sub is undef, the result is undef
1829 if (N0.getOpcode() == ISD::UNDEF)
1830 return N0;
1831 if (N1.getOpcode() == ISD::UNDEF)
1832 return N1;
1834 // If the relocation model supports it, consider symbol offsets.
1835 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1836 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
1837 // fold (sub Sym, c) -> Sym-c
1838 if (N1C && GA->getOpcode() == ISD::GlobalAddress)
1839 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
1840 GA->getOffset() -
1841 (uint64_t)N1C->getSExtValue());
1842 // fold (sub Sym+c1, Sym+c2) -> c1-c2
1843 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
1844 if (GA->getGlobal() == GB->getGlobal())
1845 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
1846 VT);
1847 }
1849 return SDValue();
1850 }
1852 SDValue DAGCombiner::visitSUBC(SDNode *N) {
1853 SDValue N0 = N->getOperand(0);
1854 SDValue N1 = N->getOperand(1);
1855 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1856 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1857 EVT VT = N0.getValueType();
1859 // If the flag result is dead, turn this into an SUB.
1860 if (!N->hasAnyUseOfValue(1))
1861 return CombineTo(N, DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, N1),
1862 DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
1863 MVT::Glue));
1865 // fold (subc x, x) -> 0 + no borrow
1866 if (N0 == N1)
1867 return CombineTo(N, DAG.getConstant(0, VT),
1868 DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
1869 MVT::Glue));
1871 // fold (subc x, 0) -> x + no borrow
1872 if (N1C && N1C->isNullValue())
1873 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
1874 MVT::Glue));
1876 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
1877 if (N0C && N0C->isAllOnesValue())
1878 return CombineTo(N, DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0),
1879 DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
1880 MVT::Glue));
1882 return SDValue();
1883 }
1885 SDValue DAGCombiner::visitSUBE(SDNode *N) {
1886 SDValue N0 = N->getOperand(0);
1887 SDValue N1 = N->getOperand(1);
1888 SDValue CarryIn = N->getOperand(2);
1890 // fold (sube x, y, false) -> (subc x, y)
1891 if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
1892 return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1);
1894 return SDValue();
1895 }
1897 SDValue DAGCombiner::visitMUL(SDNode *N) {
1898 SDValue N0 = N->getOperand(0);
1899 SDValue N1 = N->getOperand(1);
1900 EVT VT = N0.getValueType();
1902 // fold (mul x, undef) -> 0
1903 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1904 return DAG.getConstant(0, VT);
1906 bool N0IsConst = false;
1907 bool N1IsConst = false;
1908 APInt ConstValue0, ConstValue1;
1909 // fold vector ops
1910 if (VT.isVector()) {
1911 SDValue FoldedVOp = SimplifyVBinOp(N);
1912 if (FoldedVOp.getNode()) return FoldedVOp;
1914 N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
1915 N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
1916 } else {
1917 N0IsConst = dyn_cast<ConstantSDNode>(N0) != nullptr;
1918 ConstValue0 = N0IsConst ? (dyn_cast<ConstantSDNode>(N0))->getAPIntValue()
1919 : APInt();
1920 N1IsConst = dyn_cast<ConstantSDNode>(N1) != nullptr;
1921 ConstValue1 = N1IsConst ? (dyn_cast<ConstantSDNode>(N1))->getAPIntValue()
1922 : APInt();
1923 }
1925 // fold (mul c1, c2) -> c1*c2
1926 if (N0IsConst && N1IsConst)
1927 return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0.getNode(), N1.getNode());
1929 // canonicalize constant to RHS
1930 if (N0IsConst && !N1IsConst)
1931 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
1932 // fold (mul x, 0) -> 0
1933 if (N1IsConst && ConstValue1 == 0)
1934 return N1;
1935 // We require a splat of the entire scalar bit width for non-contiguous
1936 // bit patterns.
1937 bool IsFullSplat =
1938 ConstValue1.getBitWidth() == VT.getScalarType().getSizeInBits();
1939 // fold (mul x, 1) -> x
1940 if (N1IsConst && ConstValue1 == 1 && IsFullSplat)
1941 return N0;
1942 // fold (mul x, -1) -> 0-x
1943 if (N1IsConst && ConstValue1.isAllOnesValue())
1944 return DAG.getNode(ISD::SUB, SDLoc(N), VT,
1945 DAG.getConstant(0, VT), N0);
1946 // fold (mul x, (1 << c)) -> x << c
1947 if (N1IsConst && ConstValue1.isPowerOf2() && IsFullSplat)
1948 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
1949 DAG.getConstant(ConstValue1.logBase2(),
1950 getShiftAmountTy(N0.getValueType())));
1951 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
1952 if (N1IsConst && (-ConstValue1).isPowerOf2() && IsFullSplat) {
1953 unsigned Log2Val = (-ConstValue1).logBase2();
1954 // FIXME: If the input is something that is easily negated (e.g. a
1955 // single-use add), we should put the negate there.
1956 return DAG.getNode(ISD::SUB, SDLoc(N), VT,
1957 DAG.getConstant(0, VT),
1958 DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
1959 DAG.getConstant(Log2Val,
1960 getShiftAmountTy(N0.getValueType()))));
1961 }
1963 APInt Val;
1964 // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
1965 if (N1IsConst && N0.getOpcode() == ISD::SHL &&
1966 (isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
1967 isa<ConstantSDNode>(N0.getOperand(1)))) {
1968 SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT,
1969 N1, N0.getOperand(1));
1970 AddToWorklist(C3.getNode());
1971 return DAG.getNode(ISD::MUL, SDLoc(N), VT,
1972 N0.getOperand(0), C3);
1973 }
1975 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
1976 // use.
1977 {
1978 SDValue Sh(nullptr,0), Y(nullptr,0);
1979 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
1980 if (N0.getOpcode() == ISD::SHL &&
1981 (isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
1982 isa<ConstantSDNode>(N0.getOperand(1))) &&
1983 N0.getNode()->hasOneUse()) {
1984 Sh = N0; Y = N1;
1985 } else if (N1.getOpcode() == ISD::SHL &&
1986 isa<ConstantSDNode>(N1.getOperand(1)) &&
1987 N1.getNode()->hasOneUse()) {
1988 Sh = N1; Y = N0;
1989 }
1991 if (Sh.getNode()) {
1992 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
1993 Sh.getOperand(0), Y);
1994 return DAG.getNode(ISD::SHL, SDLoc(N), VT,
1995 Mul, Sh.getOperand(1));
1996 }
1997 }
1999 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
2000 if (N1IsConst && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
2001 (isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
2002 isa<ConstantSDNode>(N0.getOperand(1))))
2003 return DAG.getNode(ISD::ADD, SDLoc(N), VT,
2004 DAG.getNode(ISD::MUL, SDLoc(N0), VT,
2005 N0.getOperand(0), N1),
2006 DAG.getNode(ISD::MUL, SDLoc(N1), VT,
2007 N0.getOperand(1), N1));
2009 // reassociate mul
2010 SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1);
2011 if (RMUL.getNode())
2012 return RMUL;
2014 return SDValue();
2015 }
2017 SDValue DAGCombiner::visitSDIV(SDNode *N) {
2018 SDValue N0 = N->getOperand(0);
2019 SDValue N1 = N->getOperand(1);
2020 ConstantSDNode *N0C = isConstOrConstSplat(N0);
2021 ConstantSDNode *N1C = isConstOrConstSplat(N1);
2022 EVT VT = N->getValueType(0);
2024 // fold vector ops
2025 if (VT.isVector()) {
2026 SDValue FoldedVOp = SimplifyVBinOp(N);
2027 if (FoldedVOp.getNode()) return FoldedVOp;
2028 }
2030 // fold (sdiv c1, c2) -> c1/c2
2031 if (N0C && N1C && !N1C->isNullValue())
2032 return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C);
2033 // fold (sdiv X, 1) -> X
2034 if (N1C && N1C->getAPIntValue() == 1LL)
2035 return N0;
2036 // fold (sdiv X, -1) -> 0-X
2037 if (N1C && N1C->isAllOnesValue())
2038 return DAG.getNode(ISD::SUB, SDLoc(N), VT,
2039 DAG.getConstant(0, VT), N0);
2040 // If we know the sign bits of both operands are zero, strength reduce to a
2041 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2
2042 if (!VT.isVector()) {
2043 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
2044 return DAG.getNode(ISD::UDIV, SDLoc(N), N1.getValueType(),
2045 N0, N1);
2046 }
2048 // fold (sdiv X, pow2) -> simple ops after legalize
2049 if (N1C && !N1C->isNullValue() && (N1C->getAPIntValue().isPowerOf2() ||
2050 (-N1C->getAPIntValue()).isPowerOf2())) {
2051 // If dividing by powers of two is cheap, then don't perform the following
2052 // fold.
2053 if (TLI.isPow2SDivCheap())
2054 return SDValue();
2056 // Target-specific implementation of sdiv x, pow2.
2057 SDValue Res = BuildSDIVPow2(N);
2058 if (Res.getNode())
2059 return Res;
2061 unsigned lg2 = N1C->getAPIntValue().countTrailingZeros();
2063 // Splat the sign bit into the register
2064 SDValue SGN =
2065 DAG.getNode(ISD::SRA, SDLoc(N), VT, N0,
2066 DAG.getConstant(VT.getScalarSizeInBits() - 1,
2067 getShiftAmountTy(N0.getValueType())));
2068 AddToWorklist(SGN.getNode());
2070 // Add (N0 < 0) ? abs2 - 1 : 0;
2071 SDValue SRL =
2072 DAG.getNode(ISD::SRL, SDLoc(N), VT, SGN,
2073 DAG.getConstant(VT.getScalarSizeInBits() - lg2,
2074 getShiftAmountTy(SGN.getValueType())));
2075 SDValue ADD = DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, SRL);
2076 AddToWorklist(SRL.getNode());
2077 AddToWorklist(ADD.getNode()); // Divide by pow2
2078 SDValue SRA = DAG.getNode(ISD::SRA, SDLoc(N), VT, ADD,
2079 DAG.getConstant(lg2, getShiftAmountTy(ADD.getValueType())));
2081 // If we're dividing by a positive value, we're done. Otherwise, we must
2082 // negate the result.
2083 if (N1C->getAPIntValue().isNonNegative())
2084 return SRA;
2086 AddToWorklist(SRA.getNode());
2087 return DAG.getNode(ISD::SUB, SDLoc(N), VT, DAG.getConstant(0, VT), SRA);
2088 }
2090 // if integer divide is expensive and we satisfy the requirements, emit an
2091 // alternate sequence.
2092 if (N1C && !TLI.isIntDivCheap()) {
2093 SDValue Op = BuildSDIV(N);
2094 if (Op.getNode()) return Op;
2095 }
2097 // undef / X -> 0
2098 if (N0.getOpcode() == ISD::UNDEF)
2099 return DAG.getConstant(0, VT);
2100 // X / undef -> undef
2101 if (N1.getOpcode() == ISD::UNDEF)
2102 return N1;
2104 return SDValue();
2105 }
2107 SDValue DAGCombiner::visitUDIV(SDNode *N) {
2108 SDValue N0 = N->getOperand(0);
2109 SDValue N1 = N->getOperand(1);
2110 ConstantSDNode *N0C = isConstOrConstSplat(N0);
2111 ConstantSDNode *N1C = isConstOrConstSplat(N1);
2112 EVT VT = N->getValueType(0);
2114 // fold vector ops
2115 if (VT.isVector()) {
2116 SDValue FoldedVOp = SimplifyVBinOp(N);
2117 if (FoldedVOp.getNode()) return FoldedVOp;
2118 }
2120 // fold (udiv c1, c2) -> c1/c2
2121 if (N0C && N1C && !N1C->isNullValue())
2122 return DAG.FoldConstantArithmetic(ISD::UDIV, VT, N0C, N1C);
2123 // fold (udiv x, (1 << c)) -> x >>u c
2124 if (N1C && N1C->getAPIntValue().isPowerOf2())
2125 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0,
2126 DAG.getConstant(N1C->getAPIntValue().logBase2(),
2127 getShiftAmountTy(N0.getValueType())));
2128 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
2129 if (N1.getOpcode() == ISD::SHL) {
2130 if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
2131 if (SHC->getAPIntValue().isPowerOf2()) {
2132 EVT ADDVT = N1.getOperand(1).getValueType();
2133 SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N), ADDVT,
2134 N1.getOperand(1),
2135 DAG.getConstant(SHC->getAPIntValue()
2136 .logBase2(),
2137 ADDVT));
2138 AddToWorklist(Add.getNode());
2139 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, Add);
2140 }
2141 }
2142 }
2143 // fold (udiv x, c) -> alternate
2144 if (N1C && !TLI.isIntDivCheap()) {
2145 SDValue Op = BuildUDIV(N);
2146 if (Op.getNode()) return Op;
2147 }
2149 // undef / X -> 0
2150 if (N0.getOpcode() == ISD::UNDEF)
2151 return DAG.getConstant(0, VT);
2152 // X / undef -> undef
2153 if (N1.getOpcode() == ISD::UNDEF)
2154 return N1;
2156 return SDValue();
2157 }
2159 SDValue DAGCombiner::visitSREM(SDNode *N) {
2160 SDValue N0 = N->getOperand(0);
2161 SDValue N1 = N->getOperand(1);
2162 ConstantSDNode *N0C = isConstOrConstSplat(N0);
2163 ConstantSDNode *N1C = isConstOrConstSplat(N1);
2164 EVT VT = N->getValueType(0);
2166 // fold (srem c1, c2) -> c1%c2
2167 if (N0C && N1C && !N1C->isNullValue())
2168 return DAG.FoldConstantArithmetic(ISD::SREM, VT, N0C, N1C);
2169 // If we know the sign bits of both operands are zero, strength reduce to a
2170 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
2171 if (!VT.isVector()) {
2172 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
2173 return DAG.getNode(ISD::UREM, SDLoc(N), VT, N0, N1);
2174 }
2176 // If X/C can be simplified by the division-by-constant logic, lower
2177 // X%C to the equivalent of X-X/C*C.
2178 if (N1C && !N1C->isNullValue()) {
2179 SDValue Div = DAG.getNode(ISD::SDIV, SDLoc(N), VT, N0, N1);
2180 AddToWorklist(Div.getNode());
2181 SDValue OptimizedDiv = combine(Div.getNode());
2182 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
2183 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
2184 OptimizedDiv, N1);
2185 SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul);
2186 AddToWorklist(Mul.getNode());
2187 return Sub;
2188 }
2189 }
2191 // undef % X -> 0
2192 if (N0.getOpcode() == ISD::UNDEF)
2193 return DAG.getConstant(0, VT);
2194 // X % undef -> undef
2195 if (N1.getOpcode() == ISD::UNDEF)
2196 return N1;
2198 return SDValue();
2199 }
2201 SDValue DAGCombiner::visitUREM(SDNode *N) {
2202 SDValue N0 = N->getOperand(0);
2203 SDValue N1 = N->getOperand(1);
2204 ConstantSDNode *N0C = isConstOrConstSplat(N0);
2205 ConstantSDNode *N1C = isConstOrConstSplat(N1);
2206 EVT VT = N->getValueType(0);
2208 // fold (urem c1, c2) -> c1%c2
2209 if (N0C && N1C && !N1C->isNullValue())
2210 return DAG.FoldConstantArithmetic(ISD::UREM, VT, N0C, N1C);
2211 // fold (urem x, pow2) -> (and x, pow2-1)
2212 if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2())
2213 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0,
2214 DAG.getConstant(N1C->getAPIntValue()-1,VT));
2215 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
2216 if (N1.getOpcode() == ISD::SHL) {
2217 if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
2218 if (SHC->getAPIntValue().isPowerOf2()) {
2219 SDValue Add =
2220 DAG.getNode(ISD::ADD, SDLoc(N), VT, N1,
2221 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()),
2222 VT));
2223 AddToWorklist(Add.getNode());
2224 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, Add);
2225 }
2226 }
2227 }
2229 // If X/C can be simplified by the division-by-constant logic, lower
2230 // X%C to the equivalent of X-X/C*C.
2231 if (N1C && !N1C->isNullValue()) {
2232 SDValue Div = DAG.getNode(ISD::UDIV, SDLoc(N), VT, N0, N1);
2233 AddToWorklist(Div.getNode());
2234 SDValue OptimizedDiv = combine(Div.getNode());
2235 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
2236 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
2237 OptimizedDiv, N1);
2238 SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul);
2239 AddToWorklist(Mul.getNode());
2240 return Sub;
2241 }
2242 }
2244 // undef % X -> 0
2245 if (N0.getOpcode() == ISD::UNDEF)
2246 return DAG.getConstant(0, VT);
2247 // X % undef -> undef
2248 if (N1.getOpcode() == ISD::UNDEF)
2249 return N1;
2251 return SDValue();
2252 }
2254 SDValue DAGCombiner::visitMULHS(SDNode *N) {
2255 SDValue N0 = N->getOperand(0);
2256 SDValue N1 = N->getOperand(1);
2257 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2258 EVT VT = N->getValueType(0);
2259 SDLoc DL(N);
2261 // fold (mulhs x, 0) -> 0
2262 if (N1C && N1C->isNullValue())
2263 return N1;
2264 // fold (mulhs x, 1) -> (sra x, size(x)-1)
2265 if (N1C && N1C->getAPIntValue() == 1)
2266 return DAG.getNode(ISD::SRA, SDLoc(N), N0.getValueType(), N0,
2267 DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
2268 getShiftAmountTy(N0.getValueType())));
2269 // fold (mulhs x, undef) -> 0
2270 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
2271 return DAG.getConstant(0, VT);
2273 // If the type twice as wide is legal, transform the mulhs to a wider multiply
2274 // plus a shift.
2275 if (VT.isSimple() && !VT.isVector()) {
2276 MVT Simple = VT.getSimpleVT();
2277 unsigned SimpleSize = Simple.getSizeInBits();
2278 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
2279 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
2280 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
2281 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
2282 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
2283 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
2284 DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType())));
2285 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
2286 }
2287 }
2289 return SDValue();
2290 }
2292 SDValue DAGCombiner::visitMULHU(SDNode *N) {
2293 SDValue N0 = N->getOperand(0);
2294 SDValue N1 = N->getOperand(1);
2295 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2296 EVT VT = N->getValueType(0);
2297 SDLoc DL(N);
2299 // fold (mulhu x, 0) -> 0
2300 if (N1C && N1C->isNullValue())
2301 return N1;
2302 // fold (mulhu x, 1) -> 0
2303 if (N1C && N1C->getAPIntValue() == 1)
2304 return DAG.getConstant(0, N0.getValueType());
2305 // fold (mulhu x, undef) -> 0
2306 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
2307 return DAG.getConstant(0, VT);
2309 // If the type twice as wide is legal, transform the mulhu to a wider multiply
2310 // plus a shift.
2311 if (VT.isSimple() && !VT.isVector()) {
2312 MVT Simple = VT.getSimpleVT();
2313 unsigned SimpleSize = Simple.getSizeInBits();
2314 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
2315 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
2316 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
2317 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
2318 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
2319 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
2320 DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType())));
2321 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
2322 }
2323 }
2325 return SDValue();
2326 }
2328 /// Perform optimizations common to nodes that compute two values. LoOp and HiOp
2329 /// give the opcodes for the two computations that are being performed. Return
2330 /// true if a simplification was made.
2331 SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
2332 unsigned HiOp) {
2333 // If the high half is not needed, just compute the low half.
2334 bool HiExists = N->hasAnyUseOfValue(1);
2335 if (!HiExists &&
2336 (!LegalOperations ||
2337 TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) {
2338 SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
2339 return CombineTo(N, Res, Res);
2340 }
2342 // If the low half is not needed, just compute the high half.
2343 bool LoExists = N->hasAnyUseOfValue(0);
2344 if (!LoExists &&
2345 (!LegalOperations ||
2346 TLI.isOperationLegal(HiOp, N->getValueType(1)))) {
2347 SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
2348 return CombineTo(N, Res, Res);
2349 }
2351 // If both halves are used, return as it is.
2352 if (LoExists && HiExists)
2353 return SDValue();
2355 // If the two computed results can be simplified separately, separate them.
2356 if (LoExists) {
2357 SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
2358 AddToWorklist(Lo.getNode());
2359 SDValue LoOpt = combine(Lo.getNode());
2360 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
2361 (!LegalOperations ||
2362 TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType())))
2363 return CombineTo(N, LoOpt, LoOpt);
2364 }
2366 if (HiExists) {
2367 SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
2368 AddToWorklist(Hi.getNode());
2369 SDValue HiOpt = combine(Hi.getNode());
2370 if (HiOpt.getNode() && HiOpt != Hi &&
2371 (!LegalOperations ||
2372 TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType())))
2373 return CombineTo(N, HiOpt, HiOpt);
2374 }
2376 return SDValue();
2377 }
2379 SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
2380 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS);
2381 if (Res.getNode()) return Res;
2383 EVT VT = N->getValueType(0);
2384 SDLoc DL(N);
2386 // If the type twice as wide is legal, transform the mulhu to a wider multiply
2387 // plus a shift.
2388 if (VT.isSimple() && !VT.isVector()) {
2389 MVT Simple = VT.getSimpleVT();
2390 unsigned SimpleSize = Simple.getSizeInBits();
2391 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
2392 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
2393 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
2394 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
2395 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
2396 // Compute the high part as N1.
2397 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
2398 DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType())));
2399 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
2400 // Compute the low part as N0.
2401 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
2402 return CombineTo(N, Lo, Hi);
2403 }
2404 }
2406 return SDValue();
2407 }
2409 SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
2410 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU);
2411 if (Res.getNode()) return Res;
2413 EVT VT = N->getValueType(0);
2414 SDLoc DL(N);
2416 // If the type twice as wide is legal, transform the mulhu to a wider multiply
2417 // plus a shift.
2418 if (VT.isSimple() && !VT.isVector()) {
2419 MVT Simple = VT.getSimpleVT();
2420 unsigned SimpleSize = Simple.getSizeInBits();
2421 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
2422 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
2423 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
2424 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
2425 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
2426 // Compute the high part as N1.
2427 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
2428 DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType())));
2429 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
2430 // Compute the low part as N0.
2431 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
2432 return CombineTo(N, Lo, Hi);
2433 }
2434 }
2436 return SDValue();
2437 }
2439 SDValue DAGCombiner::visitSMULO(SDNode *N) {
2440 // (smulo x, 2) -> (saddo x, x)
2441 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
2442 if (C2->getAPIntValue() == 2)
2443 return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(),
2444 N->getOperand(0), N->getOperand(0));
2446 return SDValue();
2447 }
2449 SDValue DAGCombiner::visitUMULO(SDNode *N) {
2450 // (umulo x, 2) -> (uaddo x, x)
2451 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
2452 if (C2->getAPIntValue() == 2)
2453 return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(),
2454 N->getOperand(0), N->getOperand(0));
2456 return SDValue();
2457 }
2459 SDValue DAGCombiner::visitSDIVREM(SDNode *N) {
2460 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM);
2461 if (Res.getNode()) return Res;
2463 return SDValue();
2464 }
2466 SDValue DAGCombiner::visitUDIVREM(SDNode *N) {
2467 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM);
2468 if (Res.getNode()) return Res;
2470 return SDValue();
2471 }
2473 /// If this is a binary operator with two operands of the same opcode, try to
2474 /// simplify it.
2475 SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
2476 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2477 EVT VT = N0.getValueType();
2478 assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
2480 // Bail early if none of these transforms apply.
2481 if (N0.getNode()->getNumOperands() == 0) return SDValue();
2483 // For each of OP in AND/OR/XOR:
2484 // fold (OP (zext x), (zext y)) -> (zext (OP x, y))
2485 // fold (OP (sext x), (sext y)) -> (sext (OP x, y))
2486 // fold (OP (aext x), (aext y)) -> (aext (OP x, y))
2487 // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
2488 //
2489 // do not sink logical op inside of a vector extend, since it may combine
2490 // into a vsetcc.
2491 EVT Op0VT = N0.getOperand(0).getValueType();
2492 if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
2493 N0.getOpcode() == ISD::SIGN_EXTEND ||
2494 // Avoid infinite looping with PromoteIntBinOp.
2495 (N0.getOpcode() == ISD::ANY_EXTEND &&
2496 (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
2497 (N0.getOpcode() == ISD::TRUNCATE &&
2498 (!TLI.isZExtFree(VT, Op0VT) ||
2499 !TLI.isTruncateFree(Op0VT, VT)) &&
2500 TLI.isTypeLegal(Op0VT))) &&
2501 !VT.isVector() &&
2502 Op0VT == N1.getOperand(0).getValueType() &&
2503 (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) {
2504 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
2505 N0.getOperand(0).getValueType(),
2506 N0.getOperand(0), N1.getOperand(0));
2507 AddToWorklist(ORNode.getNode());
2508 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode);
2509 }
2511 // For each of OP in SHL/SRL/SRA/AND...
2512 // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z)
2513 // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z)
2514 // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z)
2515 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL ||
2516 N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) &&
2517 N0.getOperand(1) == N1.getOperand(1)) {
2518 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
2519 N0.getOperand(0).getValueType(),
2520 N0.getOperand(0), N1.getOperand(0));
2521 AddToWorklist(ORNode.getNode());
2522 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
2523 ORNode, N0.getOperand(1));
2524 }
2526 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
2527 // Only perform this optimization after type legalization and before
2528 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by
2529 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and
2530 // we don't want to undo this promotion.
2531 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
2532 // on scalars.
2533 if ((N0.getOpcode() == ISD::BITCAST ||
2534 N0.getOpcode() == ISD::SCALAR_TO_VECTOR) &&
2535 Level == AfterLegalizeTypes) {
2536 SDValue In0 = N0.getOperand(0);
2537 SDValue In1 = N1.getOperand(0);
2538 EVT In0Ty = In0.getValueType();
2539 EVT In1Ty = In1.getValueType();
2540 SDLoc DL(N);
2541 // If both incoming values are integers, and the original types are the
2542 // same.
2543 if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) {
2544 SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1);
2545 SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op);
2546 AddToWorklist(Op.getNode());
2547 return BC;
2548 }
2549 }
2551 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value).
2552 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B))
2553 // If both shuffles use the same mask, and both shuffle within a single
2554 // vector, then it is worthwhile to move the swizzle after the operation.
2555 // The type-legalizer generates this pattern when loading illegal
2556 // vector types from memory. In many cases this allows additional shuffle
2557 // optimizations.
2558 // There are other cases where moving the shuffle after the xor/and/or
2559 // is profitable even if shuffles don't perform a swizzle.
2560 // If both shuffles use the same mask, and both shuffles have the same first
2561 // or second operand, then it might still be profitable to move the shuffle
2562 // after the xor/and/or operation.
2563 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) {
2564 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0);
2565 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1);
2567 assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() &&
2568 "Inputs to shuffles are not the same type");
2570 // Check that both shuffles use the same mask. The masks are known to be of
2571 // the same length because the result vector type is the same.
2572 // Check also that shuffles have only one use to avoid introducing extra
2573 // instructions.
2574 if (SVN0->hasOneUse() && SVN1->hasOneUse() &&
2575 SVN0->getMask().equals(SVN1->getMask())) {
2576 SDValue ShOp = N0->getOperand(1);
2578 // Don't try to fold this node if it requires introducing a
2579 // build vector of all zeros that might be illegal at this stage.
2580 if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) {
2581 if (!LegalTypes)
2582 ShOp = DAG.getConstant(0, VT);
2583 else
2584 ShOp = SDValue();
2585 }
2587 // (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C)
2588 // (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C)
2589 // (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0)
2590 if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) {
2591 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
2592 N0->getOperand(0), N1->getOperand(0));
2593 AddToWorklist(NewNode.getNode());
2594 return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp,
2595 &SVN0->getMask()[0]);
2596 }
2598 // Don't try to fold this node if it requires introducing a
2599 // build vector of all zeros that might be illegal at this stage.
2600 ShOp = N0->getOperand(0);
2601 if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) {
2602 if (!LegalTypes)
2603 ShOp = DAG.getConstant(0, VT);
2604 else
2605 ShOp = SDValue();
2606 }
2608 // (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B))
2609 // (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B))
2610 // (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B))
2611 if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) {
2612 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
2613 N0->getOperand(1), N1->getOperand(1));
2614 AddToWorklist(NewNode.getNode());
2615 return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode,
2616 &SVN0->getMask()[0]);
2617 }
2618 }
2619 }
2621 return SDValue();
2622 }
2624 SDValue DAGCombiner::visitAND(SDNode *N) {
2625 SDValue N0 = N->getOperand(0);
2626 SDValue N1 = N->getOperand(1);
2627 SDValue LL, LR, RL, RR, CC0, CC1;
2628 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2629 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2630 EVT VT = N1.getValueType();
2631 unsigned BitWidth = VT.getScalarType().getSizeInBits();
2633 // fold vector ops
2634 if (VT.isVector()) {
2635 SDValue FoldedVOp = SimplifyVBinOp(N);
2636 if (FoldedVOp.getNode()) return FoldedVOp;
2638 // fold (and x, 0) -> 0, vector edition
2639 if (ISD::isBuildVectorAllZeros(N0.getNode()))
2640 // do not return N0, because undef node may exist in N0
2641 return DAG.getConstant(
2642 APInt::getNullValue(
2643 N0.getValueType().getScalarType().getSizeInBits()),
2644 N0.getValueType());
2645 if (ISD::isBuildVectorAllZeros(N1.getNode()))
2646 // do not return N1, because undef node may exist in N1
2647 return DAG.getConstant(
2648 APInt::getNullValue(
2649 N1.getValueType().getScalarType().getSizeInBits()),
2650 N1.getValueType());
2652 // fold (and x, -1) -> x, vector edition
2653 if (ISD::isBuildVectorAllOnes(N0.getNode()))
2654 return N1;
2655 if (ISD::isBuildVectorAllOnes(N1.getNode()))
2656 return N0;
2657 }
2659 // fold (and x, undef) -> 0
2660 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
2661 return DAG.getConstant(0, VT);
2662 // fold (and c1, c2) -> c1&c2
2663 if (N0C && N1C)
2664 return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C);
2665 // canonicalize constant to RHS
2666 if (N0C && !N1C)
2667 return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
2668 // fold (and x, -1) -> x
2669 if (N1C && N1C->isAllOnesValue())
2670 return N0;
2671 // if (and x, c) is known to be zero, return 0
2672 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
2673 APInt::getAllOnesValue(BitWidth)))
2674 return DAG.getConstant(0, VT);
2675 // reassociate and
2676 SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1);
2677 if (RAND.getNode())
2678 return RAND;
2679 // fold (and (or x, C), D) -> D if (C & D) == D
2680 if (N1C && N0.getOpcode() == ISD::OR)
2681 if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
2682 if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue())
2683 return N1;
2684 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
2685 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
2686 SDValue N0Op0 = N0.getOperand(0);
2687 APInt Mask = ~N1C->getAPIntValue();
2688 Mask = Mask.trunc(N0Op0.getValueSizeInBits());
2689 if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
2690 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
2691 N0.getValueType(), N0Op0);
2693 // Replace uses of the AND with uses of the Zero extend node.
2694 CombineTo(N, Zext);
2696 // We actually want to replace all uses of the any_extend with the
2697 // zero_extend, to avoid duplicating things. This will later cause this
2698 // AND to be folded.
2699 CombineTo(N0.getNode(), Zext);
2700 return SDValue(N, 0); // Return N so it doesn't get rechecked!
2701 }
2702 }
2703 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
2704 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
2705 // already be zero by virtue of the width of the base type of the load.
2706 //
2707 // the 'X' node here can either be nothing or an extract_vector_elt to catch
2708 // more cases.
2709 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
2710 N0.getOperand(0).getOpcode() == ISD::LOAD) ||
2711 N0.getOpcode() == ISD::LOAD) {
2712 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ?
2713 N0 : N0.getOperand(0) );
2715 // Get the constant (if applicable) the zero'th operand is being ANDed with.
2716 // This can be a pure constant or a vector splat, in which case we treat the
2717 // vector as a scalar and use the splat value.
2718 APInt Constant = APInt::getNullValue(1);
2719 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
2720 Constant = C->getAPIntValue();
2721 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
2722 APInt SplatValue, SplatUndef;
2723 unsigned SplatBitSize;
2724 bool HasAnyUndefs;
2725 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef,
2726 SplatBitSize, HasAnyUndefs);
2727 if (IsSplat) {
2728 // Undef bits can contribute to a possible optimisation if set, so
2729 // set them.
2730 SplatValue |= SplatUndef;
2732 // The splat value may be something like "0x00FFFFFF", which means 0 for
2733 // the first vector value and FF for the rest, repeating. We need a mask
2734 // that will apply equally to all members of the vector, so AND all the
2735 // lanes of the constant together.
2736 EVT VT = Vector->getValueType(0);
2737 unsigned BitWidth = VT.getVectorElementType().getSizeInBits();
2739 // If the splat value has been compressed to a bitlength lower
2740 // than the size of the vector lane, we need to re-expand it to
2741 // the lane size.
2742 if (BitWidth > SplatBitSize)
2743 for (SplatValue = SplatValue.zextOrTrunc(BitWidth);
2744 SplatBitSize < BitWidth;
2745 SplatBitSize = SplatBitSize * 2)
2746 SplatValue |= SplatValue.shl(SplatBitSize);
2748 Constant = APInt::getAllOnesValue(BitWidth);
2749 for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i)
2750 Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth);
2751 }
2752 }
2754 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is
2755 // actually legal and isn't going to get expanded, else this is a false
2756 // optimisation.
2757 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD,
2758 Load->getMemoryVT());
2760 // Resize the constant to the same size as the original memory access before
2761 // extension. If it is still the AllOnesValue then this AND is completely
2762 // unneeded.
2763 Constant =
2764 Constant.zextOrTrunc(Load->getMemoryVT().getScalarType().getSizeInBits());
2766 bool B;
2767 switch (Load->getExtensionType()) {
2768 default: B = false; break;
2769 case ISD::EXTLOAD: B = CanZextLoadProfitably; break;
2770 case ISD::ZEXTLOAD:
2771 case ISD::NON_EXTLOAD: B = true; break;
2772 }
2774 if (B && Constant.isAllOnesValue()) {
2775 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to
2776 // preserve semantics once we get rid of the AND.
2777 SDValue NewLoad(Load, 0);
2778 if (Load->getExtensionType() == ISD::EXTLOAD) {
2779 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD,
2780 Load->getValueType(0), SDLoc(Load),
2781 Load->getChain(), Load->getBasePtr(),
2782 Load->getOffset(), Load->getMemoryVT(),
2783 Load->getMemOperand());
2784 // Replace uses of the EXTLOAD with the new ZEXTLOAD.
2785 if (Load->getNumValues() == 3) {
2786 // PRE/POST_INC loads have 3 values.
2787 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1),
2788 NewLoad.getValue(2) };
2789 CombineTo(Load, To, 3, true);
2790 } else {
2791 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
2792 }
2793 }
2795 // Fold the AND away, taking care not to fold to the old load node if we
2796 // replaced it.
2797 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0);
2799 return SDValue(N, 0); // Return N so it doesn't get rechecked!
2800 }
2801 }
2802 // fold (and (setcc x), (setcc y)) -> (setcc (and x, y))
2803 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
2804 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
2805 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
2807 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
2808 LL.getValueType().isInteger()) {
2809 // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0)
2810 if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) {
2811 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
2812 LR.getValueType(), LL, RL);
2813 AddToWorklist(ORNode.getNode());
2814 return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1);
2815 }
2816 // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
2817 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) {
2818 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0),
2819 LR.getValueType(), LL, RL);
2820 AddToWorklist(ANDNode.getNode());
2821 return DAG.getSetCC(SDLoc(N), VT, ANDNode, LR, Op1);
2822 }
2823 // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1)
2824 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) {
2825 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
2826 LR.getValueType(), LL, RL);
2827 AddToWorklist(ORNode.getNode());
2828 return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1);
2829 }
2830 }
2831 // Simplify (and (setne X, 0), (setne X, -1)) -> (setuge (add X, 1), 2)
2832 if (LL == RL && isa<ConstantSDNode>(LR) && isa<ConstantSDNode>(RR) &&
2833 Op0 == Op1 && LL.getValueType().isInteger() &&
2834 Op0 == ISD::SETNE && ((cast<ConstantSDNode>(LR)->isNullValue() &&
2835 cast<ConstantSDNode>(RR)->isAllOnesValue()) ||
2836 (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
2837 cast<ConstantSDNode>(RR)->isNullValue()))) {
2838 SDValue ADDNode = DAG.getNode(ISD::ADD, SDLoc(N0), LL.getValueType(),
2839 LL, DAG.getConstant(1, LL.getValueType()));
2840 AddToWorklist(ADDNode.getNode());
2841 return DAG.getSetCC(SDLoc(N), VT, ADDNode,
2842 DAG.getConstant(2, LL.getValueType()), ISD::SETUGE);
2843 }
2844 // canonicalize equivalent to ll == rl
2845 if (LL == RR && LR == RL) {
2846 Op1 = ISD::getSetCCSwappedOperands(Op1);
2847 std::swap(RL, RR);
2848 }
2849 if (LL == RL && LR == RR) {
2850 bool isInteger = LL.getValueType().isInteger();
2851 ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger);
2852 if (Result != ISD::SETCC_INVALID &&
2853 (!LegalOperations ||
2854 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) &&
2855 TLI.isOperationLegal(ISD::SETCC,
2856 getSetCCResultType(N0.getSimpleValueType())))))
2857 return DAG.getSetCC(SDLoc(N), N0.getValueType(),
2858 LL, LR, Result);
2859 }
2860 }
2862 // Simplify: (and (op x...), (op y...)) -> (op (and x, y))
2863 if (N0.getOpcode() == N1.getOpcode()) {
2864 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2865 if (Tmp.getNode()) return Tmp;
2866 }
2868 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
2869 // fold (and (sra)) -> (and (srl)) when possible.
2870 if (!VT.isVector() &&
2871 SimplifyDemandedBits(SDValue(N, 0)))
2872 return SDValue(N, 0);
2874 // fold (zext_inreg (extload x)) -> (zextload x)
2875 if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) {
2876 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
2877 EVT MemVT = LN0->getMemoryVT();
2878 // If we zero all the possible extended bits, then we can turn this into
2879 // a zextload if we are running before legalize or the operation is legal.
2880 unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
2881 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
2882 BitWidth - MemVT.getScalarType().getSizeInBits())) &&
2883 ((!LegalOperations && !LN0->isVolatile()) ||
2884 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
2885 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
2886 LN0->getChain(), LN0->getBasePtr(),
2887 MemVT, LN0->getMemOperand());
2888 AddToWorklist(N);
2889 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
2890 return SDValue(N, 0); // Return N so it doesn't get rechecked!
2891 }
2892 }
2893 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
2894 if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
2895 N0.hasOneUse()) {
2896 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
2897 EVT MemVT = LN0->getMemoryVT();
2898 // If we zero all the possible extended bits, then we can turn this into
2899 // a zextload if we are running before legalize or the operation is legal.
2900 unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
2901 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
2902 BitWidth - MemVT.getScalarType().getSizeInBits())) &&
2903 ((!LegalOperations && !LN0->isVolatile()) ||
2904 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
2905 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
2906 LN0->getChain(), LN0->getBasePtr(),
2907 MemVT, LN0->getMemOperand());
2908 AddToWorklist(N);
2909 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
2910 return SDValue(N, 0); // Return N so it doesn't get rechecked!
2911 }
2912 }
2914 // fold (and (load x), 255) -> (zextload x, i8)
2915 // fold (and (extload x, i16), 255) -> (zextload x, i8)
2916 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
2917 if (N1C && (N0.getOpcode() == ISD::LOAD ||
2918 (N0.getOpcode() == ISD::ANY_EXTEND &&
2919 N0.getOperand(0).getOpcode() == ISD::LOAD))) {
2920 bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND;
2921 LoadSDNode *LN0 = HasAnyExt
2922 ? cast<LoadSDNode>(N0.getOperand(0))
2923 : cast<LoadSDNode>(N0);
2924 if (LN0->getExtensionType() != ISD::SEXTLOAD &&
2925 LN0->isUnindexed() && N0.hasOneUse() && SDValue(LN0, 0).hasOneUse()) {
2926 uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
2927 if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue())){
2928 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
2929 EVT LoadedVT = LN0->getMemoryVT();
2931 if (ExtVT == LoadedVT &&
2932 (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
2933 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
2935 SDValue NewLoad =
2936 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy,
2937 LN0->getChain(), LN0->getBasePtr(), ExtVT,
2938 LN0->getMemOperand());
2939 AddToWorklist(N);
2940 CombineTo(LN0, NewLoad, NewLoad.getValue(1));
2941 return SDValue(N, 0); // Return N so it doesn't get rechecked!
2942 }
2944 // Do not change the width of a volatile load.
2945 // Do not generate loads of non-round integer types since these can
2946 // be expensive (and would be wrong if the type is not byte sized).
2947 if (!LN0->isVolatile() && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() &&
2948 (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
2949 EVT PtrType = LN0->getOperand(1).getValueType();
2951 unsigned Alignment = LN0->getAlignment();
2952 SDValue NewPtr = LN0->getBasePtr();
2954 // For big endian targets, we need to add an offset to the pointer
2955 // to load the correct bytes. For little endian systems, we merely
2956 // need to read fewer bytes from the same pointer.
2957 if (TLI.isBigEndian()) {
2958 unsigned LVTStoreBytes = LoadedVT.getStoreSize();
2959 unsigned EVTStoreBytes = ExtVT.getStoreSize();
2960 unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
2961 NewPtr = DAG.getNode(ISD::ADD, SDLoc(LN0), PtrType,
2962 NewPtr, DAG.getConstant(PtrOff, PtrType));
2963 Alignment = MinAlign(Alignment, PtrOff);
2964 }
2966 AddToWorklist(NewPtr.getNode());
2968 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
2969 SDValue Load =
2970 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy,
2971 LN0->getChain(), NewPtr,
2972 LN0->getPointerInfo(),
2973 ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
2974 LN0->isInvariant(), Alignment, LN0->getAAInfo());
2975 AddToWorklist(N);
2976 CombineTo(LN0, Load, Load.getValue(1));
2977 return SDValue(N, 0); // Return N so it doesn't get rechecked!
2978 }
2979 }
2980 }
2981 }
2983 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
2984 VT.getSizeInBits() <= 64) {
2985 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2986 APInt ADDC = ADDI->getAPIntValue();
2987 if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
2988 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal
2989 // immediate for an add, but it is legal if its top c2 bits are set,
2990 // transform the ADD so the immediate doesn't need to be materialized
2991 // in a register.
2992 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
2993 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
2994 SRLI->getZExtValue());
2995 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) {
2996 ADDC |= Mask;
2997 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
2998 SDValue NewAdd =
2999 DAG.getNode(ISD::ADD, SDLoc(N0), VT,
3000 N0.getOperand(0), DAG.getConstant(ADDC, VT));
3001 CombineTo(N0.getNode(), NewAdd);
3002 return SDValue(N, 0); // Return N so it doesn't get rechecked!
3003 }
3004 }
3005 }
3006 }
3007 }
3008 }
3010 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
3011 if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) {
3012 SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
3013 N0.getOperand(1), false);
3014 if (BSwap.getNode())
3015 return BSwap;
3016 }
3018 return SDValue();
3019 }
3021 /// Match (a >> 8) | (a << 8) as (bswap a) >> 16.
3022 SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
3023 bool DemandHighBits) {
3024 if (!LegalOperations)
3025 return SDValue();
3027 EVT VT = N->getValueType(0);
3028 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
3029 return SDValue();
3030 if (!TLI.isOperationLegal(ISD::BSWAP, VT))
3031 return SDValue();
3033 // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00)
3034 bool LookPassAnd0 = false;
3035 bool LookPassAnd1 = false;
3036 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL)
3037 std::swap(N0, N1);
3038 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL)
3039 std::swap(N0, N1);
3040 if (N0.getOpcode() == ISD::AND) {
3041 if (!N0.getNode()->hasOneUse())
3042 return SDValue();
3043 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3044 if (!N01C || N01C->getZExtValue() != 0xFF00)
3045 return SDValue();
3046 N0 = N0.getOperand(0);
3047 LookPassAnd0 = true;
3048 }
3050 if (N1.getOpcode() == ISD::AND) {
3051 if (!N1.getNode()->hasOneUse())
3052 return SDValue();
3053 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
3054 if (!N11C || N11C->getZExtValue() != 0xFF)
3055 return SDValue();
3056 N1 = N1.getOperand(0);
3057 LookPassAnd1 = true;
3058 }
3060 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
3061 std::swap(N0, N1);
3062 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
3063 return SDValue();
3064 if (!N0.getNode()->hasOneUse() ||
3065 !N1.getNode()->hasOneUse())
3066 return SDValue();
3068 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3069 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
3070 if (!N01C || !N11C)
3071 return SDValue();
3072 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8)
3073 return SDValue();
3075 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
3076 SDValue N00 = N0->getOperand(0);
3077 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) {
3078 if (!N00.getNode()->hasOneUse())
3079 return SDValue();
3080 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1));
3081 if (!N001C || N001C->getZExtValue() != 0xFF)
3082 return SDValue();
3083 N00 = N00.getOperand(0);
3084 LookPassAnd0 = true;
3085 }
3087 SDValue N10 = N1->getOperand(0);
3088 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) {
3089 if (!N10.getNode()->hasOneUse())
3090 return SDValue();
3091 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1));
3092 if (!N101C || N101C->getZExtValue() != 0xFF00)
3093 return SDValue();
3094 N10 = N10.getOperand(0);
3095 LookPassAnd1 = true;
3096 }
3098 if (N00 != N10)
3099 return SDValue();
3101 // Make sure everything beyond the low halfword gets set to zero since the SRL
3102 // 16 will clear the top bits.
3103 unsigned OpSizeInBits = VT.getSizeInBits();
3104 if (DemandHighBits && OpSizeInBits > 16) {
3105 // If the left-shift isn't masked out then the only way this is a bswap is
3106 // if all bits beyond the low 8 are 0. In that case the entire pattern
3107 // reduces to a left shift anyway: leave it for other parts of the combiner.
3108 if (!LookPassAnd0)
3109 return SDValue();
3111 // However, if the right shift isn't masked out then it might be because
3112 // it's not needed. See if we can spot that too.
3113 if (!LookPassAnd1 &&
3114 !DAG.MaskedValueIsZero(
3115 N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16)))
3116 return SDValue();
3117 }
3119 SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00);
3120 if (OpSizeInBits > 16)
3121 Res = DAG.getNode(ISD::SRL, SDLoc(N), VT, Res,
3122 DAG.getConstant(OpSizeInBits-16, getShiftAmountTy(VT)));
3123 return Res;
3124 }
3126 /// Return true if the specified node is an element that makes up a 32-bit
3127 /// packed halfword byteswap.
3128 /// ((x & 0x000000ff) << 8) |
3129 /// ((x & 0x0000ff00) >> 8) |
3130 /// ((x & 0x00ff0000) << 8) |
3131 /// ((x & 0xff000000) >> 8)
3132 static bool isBSwapHWordElement(SDValue N, SmallVectorImpl<SDNode *> &Parts) {
3133 if (!N.getNode()->hasOneUse())
3134 return false;
3136 unsigned Opc = N.getOpcode();
3137 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL)
3138 return false;
3140 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
3141 if (!N1C)
3142 return false;
3144 unsigned Num;
3145 switch (N1C->getZExtValue()) {
3146 default:
3147 return false;
3148 case 0xFF: Num = 0; break;
3149 case 0xFF00: Num = 1; break;
3150 case 0xFF0000: Num = 2; break;
3151 case 0xFF000000: Num = 3; break;
3152 }
3154 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00).
3155 SDValue N0 = N.getOperand(0);
3156 if (Opc == ISD::AND) {
3157 if (Num == 0 || Num == 2) {
3158 // (x >> 8) & 0xff
3159 // (x >> 8) & 0xff0000
3160 if (N0.getOpcode() != ISD::SRL)
3161 return false;
3162 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3163 if (!C || C->getZExtValue() != 8)
3164 return false;
3165 } else {
3166 // (x << 8) & 0xff00
3167 // (x << 8) & 0xff000000
3168 if (N0.getOpcode() != ISD::SHL)
3169 return false;
3170 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3171 if (!C || C->getZExtValue() != 8)
3172 return false;
3173 }
3174 } else if (Opc == ISD::SHL) {
3175 // (x & 0xff) << 8
3176 // (x & 0xff0000) << 8
3177 if (Num != 0 && Num != 2)
3178 return false;
3179 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
3180 if (!C || C->getZExtValue() != 8)
3181 return false;
3182 } else { // Opc == ISD::SRL
3183 // (x & 0xff00) >> 8
3184 // (x & 0xff000000) >> 8
3185 if (Num != 1 && Num != 3)
3186 return false;
3187 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
3188 if (!C || C->getZExtValue() != 8)
3189 return false;
3190 }
3192 if (Parts[Num])
3193 return false;
3195 Parts[Num] = N0.getOperand(0).getNode();
3196 return true;
3197 }
3199 /// Match a 32-bit packed halfword bswap. That is
3200 /// ((x & 0x000000ff) << 8) |
3201 /// ((x & 0x0000ff00) >> 8) |
3202 /// ((x & 0x00ff0000) << 8) |
3203 /// ((x & 0xff000000) >> 8)
3204 /// => (rotl (bswap x), 16)
3205 SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
3206 if (!LegalOperations)
3207 return SDValue();
3209 EVT VT = N->getValueType(0);
3210 if (VT != MVT::i32)
3211 return SDValue();
3212 if (!TLI.isOperationLegal(ISD::BSWAP, VT))
3213 return SDValue();
3215 SmallVector<SDNode*,4> Parts(4, (SDNode*)nullptr);
3216 // Look for either
3217 // (or (or (and), (and)), (or (and), (and)))
3218 // (or (or (or (and), (and)), (and)), (and))
3219 if (N0.getOpcode() != ISD::OR)
3220 return SDValue();
3221 SDValue N00 = N0.getOperand(0);
3222 SDValue N01 = N0.getOperand(1);
3224 if (N1.getOpcode() == ISD::OR &&
3225 N00.getNumOperands() == 2 && N01.getNumOperands() == 2) {
3226 // (or (or (and), (and)), (or (and), (and)))
3227 SDValue N000 = N00.getOperand(0);
3228 if (!isBSwapHWordElement(N000, Parts))
3229 return SDValue();
3231 SDValue N001 = N00.getOperand(1);
3232 if (!isBSwapHWordElement(N001, Parts))
3233 return SDValue();
3234 SDValue N010 = N01.getOperand(0);
3235 if (!isBSwapHWordElement(N010, Parts))
3236 return SDValue();
3237 SDValue N011 = N01.getOperand(1);
3238 if (!isBSwapHWordElement(N011, Parts))
3239 return SDValue();
3240 } else {
3241 // (or (or (or (and), (and)), (and)), (and))
3242 if (!isBSwapHWordElement(N1, Parts))
3243 return SDValue();
3244 if (!isBSwapHWordElement(N01, Parts))
3245 return SDValue();
3246 if (N00.getOpcode() != ISD::OR)
3247 return SDValue();
3248 SDValue N000 = N00.getOperand(0);
3249 if (!isBSwapHWordElement(N000, Parts))
3250 return SDValue();
3251 SDValue N001 = N00.getOperand(1);
3252 if (!isBSwapHWordElement(N001, Parts))
3253 return SDValue();
3254 }
3256 // Make sure the parts are all coming from the same node.
3257 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
3258 return SDValue();
3260 SDValue BSwap = DAG.getNode(ISD::BSWAP, SDLoc(N), VT,
3261 SDValue(Parts[0],0));
3263 // Result of the bswap should be rotated by 16. If it's not legal, then
3264 // do (x << 16) | (x >> 16).
3265 SDValue ShAmt = DAG.getConstant(16, getShiftAmountTy(VT));
3266 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
3267 return DAG.getNode(ISD::ROTL, SDLoc(N), VT, BSwap, ShAmt);
3268 if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
3269 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, BSwap, ShAmt);
3270 return DAG.getNode(ISD::OR, SDLoc(N), VT,
3271 DAG.getNode(ISD::SHL, SDLoc(N), VT, BSwap, ShAmt),
3272 DAG.getNode(ISD::SRL, SDLoc(N), VT, BSwap, ShAmt));
3273 }
3275 SDValue DAGCombiner::visitOR(SDNode *N) {
3276 SDValue N0 = N->getOperand(0);
3277 SDValue N1 = N->getOperand(1);
3278 SDValue LL, LR, RL, RR, CC0, CC1;
3279 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
3280 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3281 EVT VT = N1.getValueType();
3283 // fold vector ops
3284 if (VT.isVector()) {
3285 SDValue FoldedVOp = SimplifyVBinOp(N);
3286 if (FoldedVOp.getNode()) return FoldedVOp;
3288 // fold (or x, 0) -> x, vector edition
3289 if (ISD::isBuildVectorAllZeros(N0.getNode()))
3290 return N1;
3291 if (ISD::isBuildVectorAllZeros(N1.getNode()))
3292 return N0;
3294 // fold (or x, -1) -> -1, vector edition
3295 if (ISD::isBuildVectorAllOnes(N0.getNode()))
3296 // do not return N0, because undef node may exist in N0
3297 return DAG.getConstant(
3298 APInt::getAllOnesValue(
3299 N0.getValueType().getScalarType().getSizeInBits()),
3300 N0.getValueType());
3301 if (ISD::isBuildVectorAllOnes(N1.getNode()))
3302 // do not return N1, because undef node may exist in N1
3303 return DAG.getConstant(
3304 APInt::getAllOnesValue(
3305 N1.getValueType().getScalarType().getSizeInBits()),
3306 N1.getValueType());
3308 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask1)
3309 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf B, A, Mask2)
3310 // Do this only if the resulting shuffle is legal.
3311 if (isa<ShuffleVectorSDNode>(N0) &&
3312 isa<ShuffleVectorSDNode>(N1) &&
3313 // Avoid folding a node with illegal type.
3314 TLI.isTypeLegal(VT) &&
3315 N0->getOperand(1) == N1->getOperand(1) &&
3316 ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode())) {
3317 bool CanFold = true;
3318 unsigned NumElts = VT.getVectorNumElements();
3319 const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
3320 const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
3321 // We construct two shuffle masks:
3322 // - Mask1 is a shuffle mask for a shuffle with N0 as the first operand
3323 // and N1 as the second operand.
3324 // - Mask2 is a shuffle mask for a shuffle with N1 as the first operand
3325 // and N0 as the second operand.
3326 // We do this because OR is commutable and therefore there might be
3327 // two ways to fold this node into a shuffle.
3328 SmallVector<int,4> Mask1;
3329 SmallVector<int,4> Mask2;
3331 for (unsigned i = 0; i != NumElts && CanFold; ++i) {
3332 int M0 = SV0->getMaskElt(i);
3333 int M1 = SV1->getMaskElt(i);
3335 // Both shuffle indexes are undef. Propagate Undef.
3336 if (M0 < 0 && M1 < 0) {
3337 Mask1.push_back(M0);
3338 Mask2.push_back(M0);
3339 continue;
3340 }
3342 if (M0 < 0 || M1 < 0 ||
3343 (M0 < (int)NumElts && M1 < (int)NumElts) ||
3344 (M0 >= (int)NumElts && M1 >= (int)NumElts)) {
3345 CanFold = false;
3346 break;
3347 }
3349 Mask1.push_back(M0 < (int)NumElts ? M0 : M1 + NumElts);
3350 Mask2.push_back(M1 < (int)NumElts ? M1 : M0 + NumElts);
3351 }
3353 if (CanFold) {
3354 // Fold this sequence only if the resulting shuffle is 'legal'.
3355 if (TLI.isShuffleMaskLegal(Mask1, VT))
3356 return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0),
3357 N1->getOperand(0), &Mask1[0]);
3358 if (TLI.isShuffleMaskLegal(Mask2, VT))
3359 return DAG.getVectorShuffle(VT, SDLoc(N), N1->getOperand(0),
3360 N0->getOperand(0), &Mask2[0]);
3361 }
3362 }
3363 }
3365 // fold (or x, undef) -> -1
3366 if (!LegalOperations &&
3367 (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) {
3368 EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
3369 return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
3370 }
3371 // fold (or c1, c2) -> c1|c2
3372 if (N0C && N1C)
3373 return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C);
3374 // canonicalize constant to RHS
3375 if (N0C && !N1C)
3376 return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
3377 // fold (or x, 0) -> x
3378 if (N1C && N1C->isNullValue())
3379 return N0;
3380 // fold (or x, -1) -> -1
3381 if (N1C && N1C->isAllOnesValue())
3382 return N1;
3383 // fold (or x, c) -> c iff (x & ~c) == 0
3384 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
3385 return N1;
3387 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
3388 SDValue BSwap = MatchBSwapHWord(N, N0, N1);
3389 if (BSwap.getNode())
3390 return BSwap;
3391 BSwap = MatchBSwapHWordLow(N, N0, N1);
3392 if (BSwap.getNode())
3393 return BSwap;
3395 // reassociate or
3396 SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1);
3397 if (ROR.getNode())
3398 return ROR;
3399 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
3400 // iff (c1 & c2) == 0.
3401 if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
3402 isa<ConstantSDNode>(N0.getOperand(1))) {
3403 ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1));
3404 if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) {
3405 SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1);
3406 if (!COR.getNode())
3407 return SDValue();
3408 return DAG.getNode(ISD::AND, SDLoc(N), VT,
3409 DAG.getNode(ISD::OR, SDLoc(N0), VT,
3410 N0.getOperand(0), N1), COR);
3411 }
3412 }
3413 // fold (or (setcc x), (setcc y)) -> (setcc (or x, y))
3414 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
3415 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
3416 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
3418 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
3419 LL.getValueType().isInteger()) {
3420 // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0)
3421 // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0)
3422 if (cast<ConstantSDNode>(LR)->isNullValue() &&
3423 (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
3424 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(LR),
3425 LR.getValueType(), LL, RL);
3426 AddToWorklist(ORNode.getNode());
3427 return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1);
3428 }
3429 // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1)
3430 // fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1)
3431 if (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
3432 (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
3433 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(LR),
3434 LR.getValueType(), LL, RL);
3435 AddToWorklist(ANDNode.getNode());
3436 return DAG.getSetCC(SDLoc(N), VT, ANDNode, LR, Op1);
3437 }
3438 }
3439 // canonicalize equivalent to ll == rl
3440 if (LL == RR && LR == RL) {
3441 Op1 = ISD::getSetCCSwappedOperands(Op1);
3442 std::swap(RL, RR);
3443 }
3444 if (LL == RL && LR == RR) {
3445 bool isInteger = LL.getValueType().isInteger();
3446 ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger);
3447 if (Result != ISD::SETCC_INVALID &&
3448 (!LegalOperations ||
3449 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) &&
3450 TLI.isOperationLegal(ISD::SETCC,
3451 getSetCCResultType(N0.getValueType())))))
3452 return DAG.getSetCC(SDLoc(N), N0.getValueType(),
3453 LL, LR, Result);
3454 }
3455 }
3457 // Simplify: (or (op x...), (op y...)) -> (op (or x, y))
3458 if (N0.getOpcode() == N1.getOpcode()) {
3459 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
3460 if (Tmp.getNode()) return Tmp;
3461 }
3463 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible.
3464 if (N0.getOpcode() == ISD::AND &&
3465 N1.getOpcode() == ISD::AND &&
3466 N0.getOperand(1).getOpcode() == ISD::Constant &&
3467 N1.getOperand(1).getOpcode() == ISD::Constant &&
3468 // Don't increase # computations.
3469 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
3470 // We can only do this xform if we know that bits from X that are set in C2
3471 // but not in C1 are already zero. Likewise for Y.
3472 const APInt &LHSMask =
3473 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
3474 const APInt &RHSMask =
3475 cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue();
3477 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
3478 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
3479 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
3480 N0.getOperand(0), N1.getOperand(0));
3481 return DAG.getNode(ISD::AND, SDLoc(N), VT, X,
3482 DAG.getConstant(LHSMask | RHSMask, VT));
3483 }
3484 }
3486 // See if this is some rotate idiom.
3487 if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N)))
3488 return SDValue(Rot, 0);
3490 // Simplify the operands using demanded-bits information.
3491 if (!VT.isVector() &&
3492 SimplifyDemandedBits(SDValue(N, 0)))
3493 return SDValue(N, 0);
3495 return SDValue();
3496 }
3498 /// Match "(X shl/srl V1) & V2" where V2 may not be present.
3499 static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
3500 if (Op.getOpcode() == ISD::AND) {
3501 if (isa<ConstantSDNode>(Op.getOperand(1))) {
3502 Mask = Op.getOperand(1);
3503 Op = Op.getOperand(0);
3504 } else {
3505 return false;
3506 }
3507 }
3509 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
3510 Shift = Op;
3511 return true;
3512 }
3514 return false;
3515 }
3517 // Return true if we can prove that, whenever Neg and Pos are both in the
3518 // range [0, OpSize), Neg == (Pos == 0 ? 0 : OpSize - Pos). This means that
3519 // for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
3520 //
3521 // (or (shift1 X, Neg), (shift2 X, Pos))
3522 //
3523 // reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
3524 // in direction shift1 by Neg. The range [0, OpSize) means that we only need
3525 // to consider shift amounts with defined behavior.
3526 static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned OpSize) {
3527 // If OpSize is a power of 2 then:
3528 //
3529 // (a) (Pos == 0 ? 0 : OpSize - Pos) == (OpSize - Pos) & (OpSize - 1)
3530 // (b) Neg == Neg & (OpSize - 1) whenever Neg is in [0, OpSize).
3531 //
3532 // So if OpSize is a power of 2 and Neg is (and Neg', OpSize-1), we check
3533 // for the stronger condition:
3534 //
3535 // Neg & (OpSize - 1) == (OpSize - Pos) & (OpSize - 1) [A]
3536 //
3537 // for all Neg and Pos. Since Neg & (OpSize - 1) == Neg' & (OpSize - 1)
3538 // we can just replace Neg with Neg' for the rest of the function.
3539 //
3540 // In other cases we check for the even stronger condition:
3541 //
3542 // Neg == OpSize - Pos [B]
3543 //
3544 // for all Neg and Pos. Note that the (or ...) then invokes undefined
3545 // behavior if Pos == 0 (and consequently Neg == OpSize).
3546 //
3547 // We could actually use [A] whenever OpSize is a power of 2, but the
3548 // only extra cases that it would match are those uninteresting ones
3549 // where Neg and Pos are never in range at the same time. E.g. for
3550 // OpSize == 32, using [A] would allow a Neg of the form (sub 64, Pos)
3551 // as well as (sub 32, Pos), but:
3552 //
3553 // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos))
3554 //
3555 // always invokes undefined behavior for 32-bit X.
3556 //
3557 // Below, Mask == OpSize - 1 when using [A] and is all-ones otherwise.
3558 unsigned MaskLoBits = 0;
3559 if (Neg.getOpcode() == ISD::AND &&
3560 isPowerOf2_64(OpSize) &&
3561 Neg.getOperand(1).getOpcode() == ISD::Constant &&
3562 cast<ConstantSDNode>(Neg.getOperand(1))->getAPIntValue() == OpSize - 1) {
3563 Neg = Neg.getOperand(0);
3564 MaskLoBits = Log2_64(OpSize);
3565 }
3567 // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1.
3568 if (Neg.getOpcode() != ISD::SUB)
3569 return 0;
3570 ConstantSDNode *NegC = dyn_cast<ConstantSDNode>(Neg.getOperand(0));
3571 if (!NegC)
3572 return 0;
3573 SDValue NegOp1 = Neg.getOperand(1);
3575 // On the RHS of [A], if Pos is Pos' & (OpSize - 1), just replace Pos with
3576 // Pos'. The truncation is redundant for the purpose of the equality.
3577 if (MaskLoBits &&
3578 Pos.getOpcode() == ISD::AND &&
3579 Pos.getOperand(1).getOpcode() == ISD::Constant &&
3580 cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() == OpSize - 1)
3581 Pos = Pos.getOperand(0);
3583 // The condition we need is now:
3584 //
3585 // (NegC - NegOp1) & Mask == (OpSize - Pos) & Mask
3586 //
3587 // If NegOp1 == Pos then we need:
3588 //
3589 // OpSize & Mask == NegC & Mask
3590 //
3591 // (because "x & Mask" is a truncation and distributes through subtraction).
3592 APInt Width;
3593 if (Pos == NegOp1)
3594 Width = NegC->getAPIntValue();
3595 // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC.
3596 // Then the condition we want to prove becomes:
3597 //
3598 // (NegC - NegOp1) & Mask == (OpSize - (NegOp1 + PosC)) & Mask
3599 //
3600 // which, again because "x & Mask" is a truncation, becomes:
3601 //
3602 // NegC & Mask == (OpSize - PosC) & Mask
3603 // OpSize & Mask == (NegC + PosC) & Mask
3604 else if (Pos.getOpcode() == ISD::ADD &&
3605 Pos.getOperand(0) == NegOp1 &&
3606 Pos.getOperand(1).getOpcode() == ISD::Constant)
3607 Width = (cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() +
3608 NegC->getAPIntValue());
3609 else
3610 return false;
3612 // Now we just need to check that OpSize & Mask == Width & Mask.
3613 if (MaskLoBits)
3614 // Opsize & Mask is 0 since Mask is Opsize - 1.
3615 return Width.getLoBits(MaskLoBits) == 0;
3616 return Width == OpSize;
3617 }
3619 // A subroutine of MatchRotate used once we have found an OR of two opposite
3620 // shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces
3621 // to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
3622 // former being preferred if supported. InnerPos and InnerNeg are Pos and
3623 // Neg with outer conversions stripped away.
3624 SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
3625 SDValue Neg, SDValue InnerPos,
3626 SDValue InnerNeg, unsigned PosOpcode,
3627 unsigned NegOpcode, SDLoc DL) {
3628 // fold (or (shl x, (*ext y)),
3629 // (srl x, (*ext (sub 32, y)))) ->
3630 // (rotl x, y) or (rotr x, (sub 32, y))
3631 //
3632 // fold (or (shl x, (*ext (sub 32, y))),
3633 // (srl x, (*ext y))) ->
3634 // (rotr x, y) or (rotl x, (sub 32, y))
3635 EVT VT = Shifted.getValueType();
3636 if (matchRotateSub(InnerPos, InnerNeg, VT.getSizeInBits())) {
3637 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
3638 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
3639 HasPos ? Pos : Neg).getNode();
3640 }
3642 return nullptr;
3643 }
3645 // MatchRotate - Handle an 'or' of two operands. If this is one of the many
3646 // idioms for rotate, and if the target supports rotation instructions, generate
3647 // a rot[lr].
3648 SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
3649 // Must be a legal type. Expanded 'n promoted things won't work with rotates.
3650 EVT VT = LHS.getValueType();
3651 if (!TLI.isTypeLegal(VT)) return nullptr;
3653 // The target must have at least one rotate flavor.
3654 bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT);
3655 bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT);
3656 if (!HasROTL && !HasROTR) return nullptr;
3658 // Match "(X shl/srl V1) & V2" where V2 may not be present.
3659 SDValue LHSShift; // The shift.
3660 SDValue LHSMask; // AND value if any.
3661 if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
3662 return nullptr; // Not part of a rotate.
3664 SDValue RHSShift; // The shift.
3665 SDValue RHSMask; // AND value if any.
3666 if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
3667 return nullptr; // Not part of a rotate.
3669 if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
3670 return nullptr; // Not shifting the same value.
3672 if (LHSShift.getOpcode() == RHSShift.getOpcode())
3673 return nullptr; // Shifts must disagree.
3675 // Canonicalize shl to left side in a shl/srl pair.
3676 if (RHSShift.getOpcode() == ISD::SHL) {
3677 std::swap(LHS, RHS);
3678 std::swap(LHSShift, RHSShift);
3679 std::swap(LHSMask , RHSMask );
3680 }
3682 unsigned OpSizeInBits = VT.getSizeInBits();
3683 SDValue LHSShiftArg = LHSShift.getOperand(0);
3684 SDValue LHSShiftAmt = LHSShift.getOperand(1);
3685 SDValue RHSShiftArg = RHSShift.getOperand(0);
3686 SDValue RHSShiftAmt = RHSShift.getOperand(1);
3688 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
3689 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
3690 if (LHSShiftAmt.getOpcode() == ISD::Constant &&
3691 RHSShiftAmt.getOpcode() == ISD::Constant) {
3692 uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue();
3693 uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue();
3694 if ((LShVal + RShVal) != OpSizeInBits)
3695 return nullptr;
3697 SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
3698 LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
3700 // If there is an AND of either shifted operand, apply it to the result.
3701 if (LHSMask.getNode() || RHSMask.getNode()) {
3702 APInt Mask = APInt::getAllOnesValue(OpSizeInBits);
3704 if (LHSMask.getNode()) {
3705 APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal);
3706 Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits;
3707 }
3708 if (RHSMask.getNode()) {
3709 APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal);
3710 Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits;
3711 }
3713 Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, VT));
3714 }
3716 return Rot.getNode();
3717 }
3719 // If there is a mask here, and we have a variable shift, we can't be sure
3720 // that we're masking out the right stuff.
3721 if (LHSMask.getNode() || RHSMask.getNode())
3722 return nullptr;
3724 // If the shift amount is sign/zext/any-extended just peel it off.
3725 SDValue LExtOp0 = LHSShiftAmt;
3726 SDValue RExtOp0 = RHSShiftAmt;
3727 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
3728 LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
3729 LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
3730 LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
3731 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
3732 RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
3733 RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
3734 RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
3735 LExtOp0 = LHSShiftAmt.getOperand(0);
3736 RExtOp0 = RHSShiftAmt.getOperand(0);
3737 }
3739 SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt,
3740 LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL);
3741 if (TryL)
3742 return TryL;
3744 SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt,
3745 RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL);
3746 if (TryR)
3747 return TryR;
3749 return nullptr;
3750 }
3752 SDValue DAGCombiner::visitXOR(SDNode *N) {
3753 SDValue N0 = N->getOperand(0);
3754 SDValue N1 = N->getOperand(1);
3755 SDValue LHS, RHS, CC;
3756 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
3757 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3758 EVT VT = N0.getValueType();
3760 // fold vector ops
3761 if (VT.isVector()) {
3762 SDValue FoldedVOp = SimplifyVBinOp(N);
3763 if (FoldedVOp.getNode()) return FoldedVOp;
3765 // fold (xor x, 0) -> x, vector edition
3766 if (ISD::isBuildVectorAllZeros(N0.getNode()))
3767 return N1;
3768 if (ISD::isBuildVectorAllZeros(N1.getNode()))
3769 return N0;
3770 }
3772 // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
3773 if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
3774 return DAG.getConstant(0, VT);
3775 // fold (xor x, undef) -> undef
3776 if (N0.getOpcode() == ISD::UNDEF)
3777 return N0;
3778 if (N1.getOpcode() == ISD::UNDEF)
3779 return N1;
3780 // fold (xor c1, c2) -> c1^c2
3781 if (N0C && N1C)
3782 return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C);
3783 // canonicalize constant to RHS
3784 if (N0C && !N1C)
3785 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
3786 // fold (xor x, 0) -> x
3787 if (N1C && N1C->isNullValue())
3788 return N0;
3789 // reassociate xor
3790 SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1);
3791 if (RXOR.getNode())
3792 return RXOR;
3794 // fold !(x cc y) -> (x !cc y)
3795 if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) {
3796 bool isInt = LHS.getValueType().isInteger();
3797 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
3798 isInt);
3800 if (!LegalOperations ||
3801 TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) {
3802 switch (N0.getOpcode()) {
3803 default:
3804 llvm_unreachable("Unhandled SetCC Equivalent!");
3805 case ISD::SETCC:
3806 return DAG.getSetCC(SDLoc(N), VT, LHS, RHS, NotCC);
3807 case ISD::SELECT_CC:
3808 return DAG.getSelectCC(SDLoc(N), LHS, RHS, N0.getOperand(2),
3809 N0.getOperand(3), NotCC);
3810 }
3811 }
3812 }
3814 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
3815 if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND &&
3816 N0.getNode()->hasOneUse() &&
3817 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
3818 SDValue V = N0.getOperand(0);
3819 V = DAG.getNode(ISD::XOR, SDLoc(N0), V.getValueType(), V,
3820 DAG.getConstant(1, V.getValueType()));
3821 AddToWorklist(V.getNode());
3822 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V);
3823 }
3825 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
3826 if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 &&
3827 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
3828 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
3829 if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
3830 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
3831 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
3832 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
3833 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
3834 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
3835 }
3836 }
3837 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
3838 if (N1C && N1C->isAllOnesValue() &&
3839 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
3840 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
3841 if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
3842 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
3843 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
3844 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
3845 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
3846 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
3847 }
3848 }
3849 // fold (xor (and x, y), y) -> (and (not x), y)
3850 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
3851 N0->getOperand(1) == N1) {
3852 SDValue X = N0->getOperand(0);
3853 SDValue NotX = DAG.getNOT(SDLoc(X), X, VT);
3854 AddToWorklist(NotX.getNode());
3855 return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1);
3856 }
3857 // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
3858 if (N1C && N0.getOpcode() == ISD::XOR) {
3859 ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0));
3860 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3861 if (N00C)
3862 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N0.getOperand(1),
3863 DAG.getConstant(N1C->getAPIntValue() ^
3864 N00C->getAPIntValue(), VT));
3865 if (N01C)
3866 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N0.getOperand(0),
3867 DAG.getConstant(N1C->getAPIntValue() ^
3868 N01C->getAPIntValue(), VT));
3869 }
3870 // fold (xor x, x) -> 0
3871 if (N0 == N1)
3872 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
3874 // Simplify: xor (op x...), (op y...) -> (op (xor x, y))
3875 if (N0.getOpcode() == N1.getOpcode()) {
3876 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
3877 if (Tmp.getNode()) return Tmp;
3878 }
3880 // Simplify the expression using non-local knowledge.
3881 if (!VT.isVector() &&
3882 SimplifyDemandedBits(SDValue(N, 0)))
3883 return SDValue(N, 0);
3885 return SDValue();
3886 }
3888 /// Handle transforms common to the three shifts, when the shift amount is a
3889 /// constant.
3890 SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) {
3891 // We can't and shouldn't fold opaque constants.
3892 if (Amt->isOpaque())
3893 return SDValue();
3895 SDNode *LHS = N->getOperand(0).getNode();
3896 if (!LHS->hasOneUse()) return SDValue();
3898 // We want to pull some binops through shifts, so that we have (and (shift))
3899 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of
3900 // thing happens with address calculations, so it's important to canonicalize
3901 // it.
3902 bool HighBitSet = false; // Can we transform this if the high bit is set?
3904 switch (LHS->getOpcode()) {
3905 default: return SDValue();
3906 case ISD::OR:
3907 case ISD::XOR:
3908 HighBitSet = false; // We can only transform sra if the high bit is clear.
3909 break;
3910 case ISD::AND:
3911 HighBitSet = true; // We can only transform sra if the high bit is set.
3912 break;
3913 case ISD::ADD:
3914 if (N->getOpcode() != ISD::SHL)
3915 return SDValue(); // only shl(add) not sr[al](add).
3916 HighBitSet = false; // We can only transform sra if the high bit is clear.
3917 break;
3918 }
3920 // We require the RHS of the binop to be a constant and not opaque as well.
3921 ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
3922 if (!BinOpCst || BinOpCst->isOpaque()) return SDValue();
3924 // FIXME: disable this unless the input to the binop is a shift by a constant.
3925 // If it is not a shift, it pessimizes some common cases like:
3926 //
3927 // void foo(int *X, int i) { X[i & 1235] = 1; }
3928 // int bar(int *X, int i) { return X[i & 255]; }
3929 SDNode *BinOpLHSVal = LHS->getOperand(0).getNode();
3930 if ((BinOpLHSVal->getOpcode() != ISD::SHL &&
3931 BinOpLHSVal->getOpcode() != ISD::SRA &&
3932 BinOpLHSVal->getOpcode() != ISD::SRL) ||
3933 !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1)))
3934 return SDValue();
3936 EVT VT = N->getValueType(0);
3938 // If this is a signed shift right, and the high bit is modified by the
3939 // logical operation, do not perform the transformation. The highBitSet
3940 // boolean indicates the value of the high bit of the constant which would
3941 // cause it to be modified for this operation.
3942 if (N->getOpcode() == ISD::SRA) {
3943 bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative();
3944 if (BinOpRHSSignSet != HighBitSet)
3945 return SDValue();
3946 }
3948 if (!TLI.isDesirableToCommuteWithShift(LHS))
3949 return SDValue();
3951 // Fold the constants, shifting the binop RHS by the shift amount.
3952 SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)),
3953 N->getValueType(0),
3954 LHS->getOperand(1), N->getOperand(1));
3955 assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!");
3957 // Create the new shift.
3958 SDValue NewShift = DAG.getNode(N->getOpcode(),
3959 SDLoc(LHS->getOperand(0)),
3960 VT, LHS->getOperand(0), N->getOperand(1));
3962 // Create the new binop.
3963 return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS);
3964 }
3966 SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
3967 assert(N->getOpcode() == ISD::TRUNCATE);
3968 assert(N->getOperand(0).getOpcode() == ISD::AND);
3970 // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
3971 if (N->hasOneUse() && N->getOperand(0).hasOneUse()) {
3972 SDValue N01 = N->getOperand(0).getOperand(1);
3974 if (ConstantSDNode *N01C = isConstOrConstSplat(N01)) {
3975 EVT TruncVT = N->getValueType(0);
3976 SDValue N00 = N->getOperand(0).getOperand(0);
3977 APInt TruncC = N01C->getAPIntValue();
3978 TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits());
3980 return DAG.getNode(ISD::AND, SDLoc(N), TruncVT,
3981 DAG.getNode(ISD::TRUNCATE, SDLoc(N), TruncVT, N00),
3982 DAG.getConstant(TruncC, TruncVT));
3983 }
3984 }
3986 return SDValue();
3987 }
3989 SDValue DAGCombiner::visitRotate(SDNode *N) {
3990 // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
3991 if (N->getOperand(1).getOpcode() == ISD::TRUNCATE &&
3992 N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) {
3993 SDValue NewOp1 = distributeTruncateThroughAnd(N->getOperand(1).getNode());
3994 if (NewOp1.getNode())
3995 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
3996 N->getOperand(0), NewOp1);
3997 }
3998 return SDValue();
3999 }
4001 SDValue DAGCombiner::visitSHL(SDNode *N) {
4002 SDValue N0 = N->getOperand(0);
4003 SDValue N1 = N->getOperand(1);
4004 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4005 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4006 EVT VT = N0.getValueType();
4007 unsigned OpSizeInBits = VT.getScalarSizeInBits();
4009 // fold vector ops
4010 if (VT.isVector()) {
4011 SDValue FoldedVOp = SimplifyVBinOp(N);
4012 if (FoldedVOp.getNode()) return FoldedVOp;
4014 BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
4015 // If setcc produces all-one true value then:
4016 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
4017 if (N1CV && N1CV->isConstant()) {
4018 if (N0.getOpcode() == ISD::AND) {
4019 SDValue N00 = N0->getOperand(0);
4020 SDValue N01 = N0->getOperand(1);
4021 BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01);
4023 if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC &&
4024 TLI.getBooleanContents(N00.getOperand(0).getValueType()) ==
4025 TargetLowering::ZeroOrNegativeOneBooleanContent) {
4026 SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, VT, N01CV, N1CV);
4027 if (C.getNode())
4028 return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C);
4029 }
4030 } else {
4031 N1C = isConstOrConstSplat(N1);
4032 }
4033 }
4034 }
4036 // fold (shl c1, c2) -> c1<<c2
4037 if (N0C && N1C)
4038 return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C);
4039 // fold (shl 0, x) -> 0
4040 if (N0C && N0C->isNullValue())
4041 return N0;
4042 // fold (shl x, c >= size(x)) -> undef
4043 if (N1C && N1C->getZExtValue() >= OpSizeInBits)
4044 return DAG.getUNDEF(VT);
4045 // fold (shl x, 0) -> x
4046 if (N1C && N1C->isNullValue())
4047 return N0;
4048 // fold (shl undef, x) -> 0
4049 if (N0.getOpcode() == ISD::UNDEF)
4050 return DAG.getConstant(0, VT);
4051 // if (shl x, c) is known to be zero, return 0
4052 if (DAG.MaskedValueIsZero(SDValue(N, 0),
4053 APInt::getAllOnesValue(OpSizeInBits)))
4054 return DAG.getConstant(0, VT);
4055 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
4056 if (N1.getOpcode() == ISD::TRUNCATE &&
4057 N1.getOperand(0).getOpcode() == ISD::AND) {
4058 SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
4059 if (NewOp1.getNode())
4060 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
4061 }
4063 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
4064 return SDValue(N, 0);
4066 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
4067 if (N1C && N0.getOpcode() == ISD::SHL) {
4068 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
4069 uint64_t c1 = N0C1->getZExtValue();
4070 uint64_t c2 = N1C->getZExtValue();
4071 if (c1 + c2 >= OpSizeInBits)
4072 return DAG.getConstant(0, VT);
4073 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0),
4074 DAG.getConstant(c1 + c2, N1.getValueType()));
4075 }
4076 }
4078 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
4079 // For this to be valid, the second form must not preserve any of the bits
4080 // that are shifted out by the inner shift in the first form. This means
4081 // the outer shift size must be >= the number of bits added by the ext.
4082 // As a corollary, we don't care what kind of ext it is.
4083 if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND ||
4084 N0.getOpcode() == ISD::ANY_EXTEND ||
4085 N0.getOpcode() == ISD::SIGN_EXTEND) &&
4086 N0.getOperand(0).getOpcode() == ISD::SHL) {
4087 SDValue N0Op0 = N0.getOperand(0);
4088 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
4089 uint64_t c1 = N0Op0C1->getZExtValue();
4090 uint64_t c2 = N1C->getZExtValue();
4091 EVT InnerShiftVT = N0Op0.getValueType();
4092 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
4093 if (c2 >= OpSizeInBits - InnerShiftSize) {
4094 if (c1 + c2 >= OpSizeInBits)
4095 return DAG.getConstant(0, VT);
4096 return DAG.getNode(ISD::SHL, SDLoc(N0), VT,
4097 DAG.getNode(N0.getOpcode(), SDLoc(N0), VT,
4098 N0Op0->getOperand(0)),
4099 DAG.getConstant(c1 + c2, N1.getValueType()));
4100 }
4101 }
4102 }
4104 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
4105 // Only fold this if the inner zext has no other uses to avoid increasing
4106 // the total number of instructions.
4107 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
4108 N0.getOperand(0).getOpcode() == ISD::SRL) {
4109 SDValue N0Op0 = N0.getOperand(0);
4110 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
4111 uint64_t c1 = N0Op0C1->getZExtValue();
4112 if (c1 < VT.getScalarSizeInBits()) {
4113 uint64_t c2 = N1C->getZExtValue();
4114 if (c1 == c2) {
4115 SDValue NewOp0 = N0.getOperand(0);
4116 EVT CountVT = NewOp0.getOperand(1).getValueType();
4117 SDValue NewSHL = DAG.getNode(ISD::SHL, SDLoc(N), NewOp0.getValueType(),
4118 NewOp0, DAG.getConstant(c2, CountVT));
4119 AddToWorklist(NewSHL.getNode());
4120 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
4121 }
4122 }
4123 }
4124 }
4126 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
4127 // (and (srl x, (sub c1, c2), MASK)
4128 // Only fold this if the inner shift has no other uses -- if it does, folding
4129 // this will increase the total number of instructions.
4130 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
4131 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
4132 uint64_t c1 = N0C1->getZExtValue();
4133 if (c1 < OpSizeInBits) {
4134 uint64_t c2 = N1C->getZExtValue();
4135 APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
4136 SDValue Shift;
4137 if (c2 > c1) {
4138 Mask = Mask.shl(c2 - c1);
4139 Shift = DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0),
4140 DAG.getConstant(c2 - c1, N1.getValueType()));
4141 } else {
4142 Mask = Mask.lshr(c1 - c2);
4143 Shift = DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0),
4144 DAG.getConstant(c1 - c2, N1.getValueType()));
4145 }
4146 return DAG.getNode(ISD::AND, SDLoc(N0), VT, Shift,
4147 DAG.getConstant(Mask, VT));
4148 }
4149 }
4150 }
4151 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
4152 if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) {
4153 unsigned BitSize = VT.getScalarSizeInBits();
4154 SDValue HiBitsMask =
4155 DAG.getConstant(APInt::getHighBitsSet(BitSize,
4156 BitSize - N1C->getZExtValue()), VT);
4157 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0.getOperand(0),
4158 HiBitsMask);
4159 }
4161 // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
4162 // Variant of version done on multiply, except mul by a power of 2 is turned
4163 // into a shift.
4164 APInt Val;
4165 if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
4166 (isa<ConstantSDNode>(N0.getOperand(1)) ||
4167 isConstantSplatVector(N0.getOperand(1).getNode(), Val))) {
4168 SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
4169 SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
4170 return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1);
4171 }
4173 if (N1C) {
4174 SDValue NewSHL = visitShiftByConstant(N, N1C);
4175 if (NewSHL.getNode())
4176 return NewSHL;
4177 }
4179 return SDValue();
4180 }
4182 SDValue DAGCombiner::visitSRA(SDNode *N) {
4183 SDValue N0 = N->getOperand(0);
4184 SDValue N1 = N->getOperand(1);
4185 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4186 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4187 EVT VT = N0.getValueType();
4188 unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
4190 // fold vector ops
4191 if (VT.isVector()) {
4192 SDValue FoldedVOp = SimplifyVBinOp(N);
4193 if (FoldedVOp.getNode()) return FoldedVOp;
4195 N1C = isConstOrConstSplat(N1);
4196 }
4198 // fold (sra c1, c2) -> (sra c1, c2)
4199 if (N0C && N1C)
4200 return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C);
4201 // fold (sra 0, x) -> 0
4202 if (N0C && N0C->isNullValue())
4203 return N0;
4204 // fold (sra -1, x) -> -1
4205 if (N0C && N0C->isAllOnesValue())
4206 return N0;
4207 // fold (sra x, (setge c, size(x))) -> undef
4208 if (N1C && N1C->getZExtValue() >= OpSizeInBits)
4209 return DAG.getUNDEF(VT);
4210 // fold (sra x, 0) -> x
4211 if (N1C && N1C->isNullValue())
4212 return N0;
4213 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
4214 // sext_inreg.
4215 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
4216 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
4217 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
4218 if (VT.isVector())
4219 ExtVT = EVT::getVectorVT(*DAG.getContext(),
4220 ExtVT, VT.getVectorNumElements());
4221 if ((!LegalOperations ||
4222 TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT)))
4223 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
4224 N0.getOperand(0), DAG.getValueType(ExtVT));
4225 }
4227 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
4228 if (N1C && N0.getOpcode() == ISD::SRA) {
4229 if (ConstantSDNode *C1 = isConstOrConstSplat(N0.getOperand(1))) {
4230 unsigned Sum = N1C->getZExtValue() + C1->getZExtValue();
4231 if (Sum >= OpSizeInBits)
4232 Sum = OpSizeInBits - 1;
4233 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0.getOperand(0),
4234 DAG.getConstant(Sum, N1.getValueType()));
4235 }
4236 }
4238 // fold (sra (shl X, m), (sub result_size, n))
4239 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
4240 // result_size - n != m.
4241 // If truncate is free for the target sext(shl) is likely to result in better
4242 // code.
4243 if (N0.getOpcode() == ISD::SHL && N1C) {
4244 // Get the two constanst of the shifts, CN0 = m, CN = n.
4245 const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1));
4246 if (N01C) {
4247 LLVMContext &Ctx = *DAG.getContext();
4248 // Determine what the truncate's result bitsize and type would be.
4249 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());
4251 if (VT.isVector())
4252 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements());
4254 // Determine the residual right-shift amount.
4255 signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
4257 // If the shift is not a no-op (in which case this should be just a sign
4258 // extend already), the truncated to type is legal, sign_extend is legal
4259 // on that type, and the truncate to that type is both legal and free,
4260 // perform the transform.
4261 if ((ShiftAmt > 0) &&
4262 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
4263 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
4264 TLI.isTruncateFree(VT, TruncVT)) {
4266 SDValue Amt = DAG.getConstant(ShiftAmt,
4267 getShiftAmountTy(N0.getOperand(0).getValueType()));
4268 SDValue Shift = DAG.getNode(ISD::SRL, SDLoc(N0), VT,
4269 N0.getOperand(0), Amt);
4270 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), TruncVT,
4271 Shift);
4272 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N),
4273 N->getValueType(0), Trunc);
4274 }
4275 }
4276 }
4278 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
4279 if (N1.getOpcode() == ISD::TRUNCATE &&
4280 N1.getOperand(0).getOpcode() == ISD::AND) {
4281 SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
4282 if (NewOp1.getNode())
4283 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
4284 }
4286 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
4287 // if c1 is equal to the number of bits the trunc removes
4288 if (N0.getOpcode() == ISD::TRUNCATE &&
4289 (N0.getOperand(0).getOpcode() == ISD::SRL ||
4290 N0.getOperand(0).getOpcode() == ISD::SRA) &&
4291 N0.getOperand(0).hasOneUse() &&
4292 N0.getOperand(0).getOperand(1).hasOneUse() &&
4293 N1C) {
4294 SDValue N0Op0 = N0.getOperand(0);
4295 if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) {
4296 unsigned LargeShiftVal = LargeShift->getZExtValue();
4297 EVT LargeVT = N0Op0.getValueType();
4299 if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) {
4300 SDValue Amt =
4301 DAG.getConstant(LargeShiftVal + N1C->getZExtValue(),
4302 getShiftAmountTy(N0Op0.getOperand(0).getValueType()));
4303 SDValue SRA = DAG.getNode(ISD::SRA, SDLoc(N), LargeVT,
4304 N0Op0.getOperand(0), Amt);
4305 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, SRA);
4306 }
4307 }
4308 }
4310 // Simplify, based on bits shifted out of the LHS.
4311 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
4312 return SDValue(N, 0);
4315 // If the sign bit is known to be zero, switch this to a SRL.
4316 if (DAG.SignBitIsZero(N0))
4317 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);
4319 if (N1C) {
4320 SDValue NewSRA = visitShiftByConstant(N, N1C);
4321 if (NewSRA.getNode())
4322 return NewSRA;
4323 }
4325 return SDValue();
4326 }
4328 SDValue DAGCombiner::visitSRL(SDNode *N) {
4329 SDValue N0 = N->getOperand(0);
4330 SDValue N1 = N->getOperand(1);
4331 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4332 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4333 EVT VT = N0.getValueType();
4334 unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
4336 // fold vector ops
4337 if (VT.isVector()) {
4338 SDValue FoldedVOp = SimplifyVBinOp(N);
4339 if (FoldedVOp.getNode()) return FoldedVOp;
4341 N1C = isConstOrConstSplat(N1);
4342 }
4344 // fold (srl c1, c2) -> c1 >>u c2
4345 if (N0C && N1C)
4346 return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C);
4347 // fold (srl 0, x) -> 0
4348 if (N0C && N0C->isNullValue())
4349 return N0;
4350 // fold (srl x, c >= size(x)) -> undef
4351 if (N1C && N1C->getZExtValue() >= OpSizeInBits)
4352 return DAG.getUNDEF(VT);
4353 // fold (srl x, 0) -> x
4354 if (N1C && N1C->isNullValue())
4355 return N0;
4356 // if (srl x, c) is known to be zero, return 0
4357 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
4358 APInt::getAllOnesValue(OpSizeInBits)))
4359 return DAG.getConstant(0, VT);
4361 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
4362 if (N1C && N0.getOpcode() == ISD::SRL) {
4363 if (ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1))) {
4364 uint64_t c1 = N01C->getZExtValue();
4365 uint64_t c2 = N1C->getZExtValue();
4366 if (c1 + c2 >= OpSizeInBits)
4367 return DAG.getConstant(0, VT);
4368 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0),
4369 DAG.getConstant(c1 + c2, N1.getValueType()));
4370 }
4371 }
4373 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
4374 if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
4375 N0.getOperand(0).getOpcode() == ISD::SRL &&
4376 isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
4377 uint64_t c1 =
4378 cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
4379 uint64_t c2 = N1C->getZExtValue();
4380 EVT InnerShiftVT = N0.getOperand(0).getValueType();
4381 EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType();
4382 uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
4383 // This is only valid if the OpSizeInBits + c1 = size of inner shift.
4384 if (c1 + OpSizeInBits == InnerShiftSize) {
4385 if (c1 + c2 >= InnerShiftSize)
4386 return DAG.getConstant(0, VT);
4387 return DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT,
4388 DAG.getNode(ISD::SRL, SDLoc(N0), InnerShiftVT,
4389 N0.getOperand(0)->getOperand(0),
4390 DAG.getConstant(c1 + c2, ShiftCountVT)));
4391 }
4392 }
4394 // fold (srl (shl x, c), c) -> (and x, cst2)
4395 if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1) {
4396 unsigned BitSize = N0.getScalarValueSizeInBits();
4397 if (BitSize <= 64) {
4398 uint64_t ShAmt = N1C->getZExtValue() + 64 - BitSize;
4399 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0.getOperand(0),
4400 DAG.getConstant(~0ULL >> ShAmt, VT));
4401 }
4402 }
4404 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
4405 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
4406 // Shifting in all undef bits?
4407 EVT SmallVT = N0.getOperand(0).getValueType();
4408 unsigned BitSize = SmallVT.getScalarSizeInBits();
4409 if (N1C->getZExtValue() >= BitSize)
4410 return DAG.getUNDEF(VT);
4412 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
4413 uint64_t ShiftAmt = N1C->getZExtValue();
4414 SDValue SmallShift = DAG.getNode(ISD::SRL, SDLoc(N0), SmallVT,
4415 N0.getOperand(0),
4416 DAG.getConstant(ShiftAmt, getShiftAmountTy(SmallVT)));
4417 AddToWorklist(SmallShift.getNode());
4418 APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt);
4419 return DAG.getNode(ISD::AND, SDLoc(N), VT,
4420 DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, SmallShift),
4421 DAG.getConstant(Mask, VT));
4422 }
4423 }
4425 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
4426 // bit, which is unmodified by sra.
4427 if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) {
4428 if (N0.getOpcode() == ISD::SRA)
4429 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1);
4430 }
4432 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
4433 if (N1C && N0.getOpcode() == ISD::CTLZ &&
4434 N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
4435 APInt KnownZero, KnownOne;
4436 DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne);
4438 // If any of the input bits are KnownOne, then the input couldn't be all
4439 // zeros, thus the result of the srl will always be zero.
4440 if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT);
4442 // If all of the bits input the to ctlz node are known to be zero, then
4443 // the result of the ctlz is "32" and the result of the shift is one.
4444 APInt UnknownBits = ~KnownZero;
4445 if (UnknownBits == 0) return DAG.getConstant(1, VT);
4447 // Otherwise, check to see if there is exactly one bit input to the ctlz.
4448 if ((UnknownBits & (UnknownBits - 1)) == 0) {
4449 // Okay, we know that only that the single bit specified by UnknownBits
4450 // could be set on input to the CTLZ node. If this bit is set, the SRL
4451 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
4452 // to an SRL/XOR pair, which is likely to simplify more.
4453 unsigned ShAmt = UnknownBits.countTrailingZeros();
4454 SDValue Op = N0.getOperand(0);
4456 if (ShAmt) {
4457 Op = DAG.getNode(ISD::SRL, SDLoc(N0), VT, Op,
4458 DAG.getConstant(ShAmt, getShiftAmountTy(Op.getValueType())));
4459 AddToWorklist(Op.getNode());
4460 }
4462 return DAG.getNode(ISD::XOR, SDLoc(N), VT,
4463 Op, DAG.getConstant(1, VT));
4464 }
4465 }
4467 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
4468 if (N1.getOpcode() == ISD::TRUNCATE &&
4469 N1.getOperand(0).getOpcode() == ISD::AND) {
4470 SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
4471 if (NewOp1.getNode())
4472 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
4473 }
4475 // fold operands of srl based on knowledge that the low bits are not
4476 // demanded.
4477 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
4478 return SDValue(N, 0);
4480 if (N1C) {
4481 SDValue NewSRL = visitShiftByConstant(N, N1C);
4482 if (NewSRL.getNode())
4483 return NewSRL;
4484 }
4486 // Attempt to convert a srl of a load into a narrower zero-extending load.
4487 SDValue NarrowLoad = ReduceLoadWidth(N);
4488 if (NarrowLoad.getNode())
4489 return NarrowLoad;
4491 // Here is a common situation. We want to optimize:
4492 //
4493 // %a = ...
4494 // %b = and i32 %a, 2
4495 // %c = srl i32 %b, 1
4496 // brcond i32 %c ...
4497 //
4498 // into
4499 //
4500 // %a = ...
4501 // %b = and %a, 2
4502 // %c = setcc eq %b, 0
4503 // brcond %c ...
4504 //
4505 // However when after the source operand of SRL is optimized into AND, the SRL
4506 // itself may not be optimized further. Look for it and add the BRCOND into
4507 // the worklist.
4508 if (N->hasOneUse()) {
4509 SDNode *Use = *N->use_begin();
4510 if (Use->getOpcode() == ISD::BRCOND)
4511 AddToWorklist(Use);
4512 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
4513 // Also look pass the truncate.
4514 Use = *Use->use_begin();
4515 if (Use->getOpcode() == ISD::BRCOND)
4516 AddToWorklist(Use);
4517 }
4518 }
4520 return SDValue();
4521 }
4523 SDValue DAGCombiner::visitCTLZ(SDNode *N) {
4524 SDValue N0 = N->getOperand(0);
4525 EVT VT = N->getValueType(0);
4527 // fold (ctlz c1) -> c2
4528 if (isa<ConstantSDNode>(N0))
4529 return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0);
4530 return SDValue();
4531 }
4533 SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {
4534 SDValue N0 = N->getOperand(0);
4535 EVT VT = N->getValueType(0);
4537 // fold (ctlz_zero_undef c1) -> c2
4538 if (isa<ConstantSDNode>(N0))
4539 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
4540 return SDValue();
4541 }
4543 SDValue DAGCombiner::visitCTTZ(SDNode *N) {
4544 SDValue N0 = N->getOperand(0);
4545 EVT VT = N->getValueType(0);
4547 // fold (cttz c1) -> c2
4548 if (isa<ConstantSDNode>(N0))
4549 return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0);
4550 return SDValue();
4551 }
4553 SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {
4554 SDValue N0 = N->getOperand(0);
4555 EVT VT = N->getValueType(0);
4557 // fold (cttz_zero_undef c1) -> c2
4558 if (isa<ConstantSDNode>(N0))
4559 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
4560 return SDValue();
4561 }
4563 SDValue DAGCombiner::visitCTPOP(SDNode *N) {
4564 SDValue N0 = N->getOperand(0);
4565 EVT VT = N->getValueType(0);
4567 // fold (ctpop c1) -> c2
4568 if (isa<ConstantSDNode>(N0))
4569 return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0);
4570 return SDValue();
4571 }
4573 SDValue DAGCombiner::visitSELECT(SDNode *N) {
4574 SDValue N0 = N->getOperand(0);
4575 SDValue N1 = N->getOperand(1);
4576 SDValue N2 = N->getOperand(2);
4577 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4578 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4579 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
4580 EVT VT = N->getValueType(0);
4581 EVT VT0 = N0.getValueType();
4583 // fold (select C, X, X) -> X
4584 if (N1 == N2)
4585 return N1;
4586 // fold (select true, X, Y) -> X
4587 if (N0C && !N0C->isNullValue())
4588 return N1;
4589 // fold (select false, X, Y) -> Y
4590 if (N0C && N0C->isNullValue())
4591 return N2;
4592 // fold (select C, 1, X) -> (or C, X)
4593 if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
4594 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
4595 // fold (select C, 0, 1) -> (xor C, 1)
4596 // We can't do this reliably if integer based booleans have different contents
4597 // to floating point based booleans. This is because we can't tell whether we
4598 // have an integer-based boolean or a floating-point-based boolean unless we
4599 // can find the SETCC that produced it and inspect its operands. This is
4600 // fairly easy if C is the SETCC node, but it can potentially be
4601 // undiscoverable (or not reasonably discoverable). For example, it could be
4602 // in another basic block or it could require searching a complicated
4603 // expression.
4604 if (VT.isInteger() &&
4605 (VT0 == MVT::i1 || (VT0.isInteger() &&
4606 TLI.getBooleanContents(false, false) ==
4607 TLI.getBooleanContents(false, true) &&
4608 TLI.getBooleanContents(false, false) ==
4609 TargetLowering::ZeroOrOneBooleanContent)) &&
4610 N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
4611 SDValue XORNode;
4612 if (VT == VT0)
4613 return DAG.getNode(ISD::XOR, SDLoc(N), VT0,
4614 N0, DAG.getConstant(1, VT0));
4615 XORNode = DAG.getNode(ISD::XOR, SDLoc(N0), VT0,
4616 N0, DAG.getConstant(1, VT0));
4617 AddToWorklist(XORNode.getNode());
4618 if (VT.bitsGT(VT0))
4619 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, XORNode);
4620 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, XORNode);
4621 }
4622 // fold (select C, 0, X) -> (and (not C), X)
4623 if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) {
4624 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
4625 AddToWorklist(NOTNode.getNode());
4626 return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2);
4627 }
4628 // fold (select C, X, 1) -> (or (not C), X)
4629 if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) {
4630 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
4631 AddToWorklist(NOTNode.getNode());
4632 return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1);
4633 }
4634 // fold (select C, X, 0) -> (and C, X)
4635 if (VT == MVT::i1 && N2C && N2C->isNullValue())
4636 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
4637 // fold (select X, X, Y) -> (or X, Y)
4638 // fold (select X, 1, Y) -> (or X, Y)
4639 if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1)))
4640 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
4641 // fold (select X, Y, X) -> (and X, Y)
4642 // fold (select X, Y, 0) -> (and X, Y)
4643 if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0)))
4644 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
4646 // If we can fold this based on the true/false value, do so.
4647 if (SimplifySelectOps(N, N1, N2))
4648 return SDValue(N, 0); // Don't revisit N.
4650 // fold selects based on a setcc into other things, such as min/max/abs
4651 if (N0.getOpcode() == ISD::SETCC) {
4652 if ((!LegalOperations &&
4653 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) ||
4654 TLI.isOperationLegal(ISD::SELECT_CC, VT))
4655 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT,
4656 N0.getOperand(0), N0.getOperand(1),
4657 N1, N2, N0.getOperand(2));
4658 return SimplifySelect(SDLoc(N), N0, N1, N2);
4659 }
4661 return SDValue();
4662 }
4664 static
4665 std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) {
4666 SDLoc DL(N);
4667 EVT LoVT, HiVT;
4668 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
4670 // Split the inputs.
4671 SDValue Lo, Hi, LL, LH, RL, RH;
4672 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
4673 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
4675 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
4676 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
4678 return std::make_pair(Lo, Hi);
4679 }
4681 // This function assumes all the vselect's arguments are CONCAT_VECTOR
4682 // nodes and that the condition is a BV of ConstantSDNodes (or undefs).
4683 static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
4684 SDLoc dl(N);
4685 SDValue Cond = N->getOperand(0);
4686 SDValue LHS = N->getOperand(1);
4687 SDValue RHS = N->getOperand(2);
4688 EVT VT = N->getValueType(0);
4689 int NumElems = VT.getVectorNumElements();
4690 assert(LHS.getOpcode() == ISD::CONCAT_VECTORS &&
4691 RHS.getOpcode() == ISD::CONCAT_VECTORS &&
4692 Cond.getOpcode() == ISD::BUILD_VECTOR);
4694 // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about
4695 // binary ones here.
4696 if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2)
4697 return SDValue();
4699 // We're sure we have an even number of elements due to the
4700 // concat_vectors we have as arguments to vselect.
4701 // Skip BV elements until we find one that's not an UNDEF
4702 // After we find an UNDEF element, keep looping until we get to half the
4703 // length of the BV and see if all the non-undef nodes are the same.
4704 ConstantSDNode *BottomHalf = nullptr;
4705 for (int i = 0; i < NumElems / 2; ++i) {
4706 if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF)
4707 continue;
4709 if (BottomHalf == nullptr)
4710 BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i));
4711 else if (Cond->getOperand(i).getNode() != BottomHalf)
4712 return SDValue();
4713 }
4715 // Do the same for the second half of the BuildVector
4716 ConstantSDNode *TopHalf = nullptr;
4717 for (int i = NumElems / 2; i < NumElems; ++i) {
4718 if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF)
4719 continue;
4721 if (TopHalf == nullptr)
4722 TopHalf = cast<ConstantSDNode>(Cond.getOperand(i));
4723 else if (Cond->getOperand(i).getNode() != TopHalf)
4724 return SDValue();
4725 }
4727 assert(TopHalf && BottomHalf &&
4728 "One half of the selector was all UNDEFs and the other was all the "
4729 "same value. This should have been addressed before this function.");
4730 return DAG.getNode(
4731 ISD::CONCAT_VECTORS, dl, VT,
4732 BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0),
4733 TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
4734 }
4736 SDValue DAGCombiner::visitVSELECT(SDNode *N) {
4737 SDValue N0 = N->getOperand(0);
4738 SDValue N1 = N->getOperand(1);
4739 SDValue N2 = N->getOperand(2);
4740 SDLoc DL(N);
4742 // Canonicalize integer abs.
4743 // vselect (setg[te] X, 0), X, -X ->
4744 // vselect (setgt X, -1), X, -X ->
4745 // vselect (setl[te] X, 0), -X, X ->
4746 // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
4747 if (N0.getOpcode() == ISD::SETCC) {
4748 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
4749 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
4750 bool isAbs = false;
4751 bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
4753 if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
4754 (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) &&
4755 N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1))
4756 isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode());
4757 else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) &&
4758 N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1))
4759 isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
4761 if (isAbs) {
4762 EVT VT = LHS.getValueType();
4763 SDValue Shift = DAG.getNode(
4764 ISD::SRA, DL, VT, LHS,
4765 DAG.getConstant(VT.getScalarType().getSizeInBits() - 1, VT));
4766 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
4767 AddToWorklist(Shift.getNode());
4768 AddToWorklist(Add.getNode());
4769 return DAG.getNode(ISD::XOR, DL, VT, Add, Shift);
4770 }
4771 }
4773 // If the VSELECT result requires splitting and the mask is provided by a
4774 // SETCC, then split both nodes and its operands before legalization. This
4775 // prevents the type legalizer from unrolling SETCC into scalar comparisons
4776 // and enables future optimizations (e.g. min/max pattern matching on X86).
4777 if (N0.getOpcode() == ISD::SETCC) {
4778 EVT VT = N->getValueType(0);
4780 // Check if any splitting is required.
4781 if (TLI.getTypeAction(*DAG.getContext(), VT) !=
4782 TargetLowering::TypeSplitVector)
4783 return SDValue();
4785 SDValue Lo, Hi, CCLo, CCHi, LL, LH, RL, RH;
4786 std::tie(CCLo, CCHi) = SplitVSETCC(N0.getNode(), DAG);
4787 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 1);
4788 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 2);
4790 Lo = DAG.getNode(N->getOpcode(), DL, LL.getValueType(), CCLo, LL, RL);
4791 Hi = DAG.getNode(N->getOpcode(), DL, LH.getValueType(), CCHi, LH, RH);
4793 // Add the new VSELECT nodes to the work list in case they need to be split
4794 // again.
4795 AddToWorklist(Lo.getNode());
4796 AddToWorklist(Hi.getNode());
4798 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
4799 }
4801 // Fold (vselect (build_vector all_ones), N1, N2) -> N1
4802 if (ISD::isBuildVectorAllOnes(N0.getNode()))
4803 return N1;
4804 // Fold (vselect (build_vector all_zeros), N1, N2) -> N2
4805 if (ISD::isBuildVectorAllZeros(N0.getNode()))
4806 return N2;
4808 // The ConvertSelectToConcatVector function is assuming both the above
4809 // checks for (vselect (build_vector all{ones,zeros) ...) have been made
4810 // and addressed.
4811 if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
4812 N2.getOpcode() == ISD::CONCAT_VECTORS &&
4813 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
4814 SDValue CV = ConvertSelectToConcatVector(N, DAG);
4815 if (CV.getNode())
4816 return CV;
4817 }
4819 return SDValue();
4820 }
4822 SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
4823 SDValue N0 = N->getOperand(0);
4824 SDValue N1 = N->getOperand(1);
4825 SDValue N2 = N->getOperand(2);
4826 SDValue N3 = N->getOperand(3);
4827 SDValue N4 = N->getOperand(4);
4828 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
4830 // fold select_cc lhs, rhs, x, x, cc -> x
4831 if (N2 == N3)
4832 return N2;
4834 // Determine if the condition we're dealing with is constant
4835 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
4836 N0, N1, CC, SDLoc(N), false);
4837 if (SCC.getNode()) {
4838 AddToWorklist(SCC.getNode());
4840 if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
4841 if (!SCCC->isNullValue())
4842 return N2; // cond always true -> true val
4843 else
4844 return N3; // cond always false -> false val
4845 }
4847 // Fold to a simpler select_cc
4848 if (SCC.getOpcode() == ISD::SETCC)
4849 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(),
4850 SCC.getOperand(0), SCC.getOperand(1), N2, N3,
4851 SCC.getOperand(2));
4852 }
4854 // If we can fold this based on the true/false value, do so.
4855 if (SimplifySelectOps(N, N2, N3))
4856 return SDValue(N, 0); // Don't revisit N.
4858 // fold select_cc into other things, such as min/max/abs
4859 return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC);
4860 }
4862 SDValue DAGCombiner::visitSETCC(SDNode *N) {
4863 return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1),
4864 cast<CondCodeSDNode>(N->getOperand(2))->get(),
4865 SDLoc(N));
4866 }
4868 // tryToFoldExtendOfConstant - Try to fold a sext/zext/aext
4869 // dag node into a ConstantSDNode or a build_vector of constants.
4870 // This function is called by the DAGCombiner when visiting sext/zext/aext
4871 // dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
4872 // Vector extends are not folded if operations are legal; this is to
4873 // avoid introducing illegal build_vector dag nodes.
4874 static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
4875 SelectionDAG &DAG, bool LegalTypes,
4876 bool LegalOperations) {
4877 unsigned Opcode = N->getOpcode();
4878 SDValue N0 = N->getOperand(0);
4879 EVT VT = N->getValueType(0);
4881 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||
4882 Opcode == ISD::ANY_EXTEND) && "Expected EXTEND dag node in input!");
4884 // fold (sext c1) -> c1
4885 // fold (zext c1) -> c1
4886 // fold (aext c1) -> c1
4887 if (isa<ConstantSDNode>(N0))
4888 return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode();
4890 // fold (sext (build_vector AllConstants) -> (build_vector AllConstants)
4891 // fold (zext (build_vector AllConstants) -> (build_vector AllConstants)
4892 // fold (aext (build_vector AllConstants) -> (build_vector AllConstants)
4893 EVT SVT = VT.getScalarType();
4894 if (!(VT.isVector() &&
4895 (!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) &&
4896 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())))
4897 return nullptr;
4899 // We can fold this node into a build_vector.
4900 unsigned VTBits = SVT.getSizeInBits();
4901 unsigned EVTBits = N0->getValueType(0).getScalarType().getSizeInBits();
4902 unsigned ShAmt = VTBits - EVTBits;
4903 SmallVector<SDValue, 8> Elts;
4904 unsigned NumElts = N0->getNumOperands();
4905 SDLoc DL(N);
4907 for (unsigned i=0; i != NumElts; ++i) {
4908 SDValue Op = N0->getOperand(i);
4909 if (Op->getOpcode() == ISD::UNDEF) {
4910 Elts.push_back(DAG.getUNDEF(SVT));
4911 continue;
4912 }
4914 ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op);
4915 const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue());
4916 if (Opcode == ISD::SIGN_EXTEND)
4917 Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(),
4918 SVT));
4919 else
4920 Elts.push_back(DAG.getConstant(C.shl(ShAmt).lshr(ShAmt).getZExtValue(),
4921 SVT));
4922 }
4924 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Elts).getNode();
4925 }
4927 // ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
4928 // "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
4929 // transformation. Returns true if extension are possible and the above
4930 // mentioned transformation is profitable.
4931 static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
4932 unsigned ExtOpc,
4933 SmallVectorImpl<SDNode *> &ExtendNodes,
4934 const TargetLowering &TLI) {
4935 bool HasCopyToRegUses = false;
4936 bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
4937 for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
4938 UE = N0.getNode()->use_end();
4939 UI != UE; ++UI) {
4940 SDNode *User = *UI;
4941 if (User == N)
4942 continue;
4943 if (UI.getUse().getResNo() != N0.getResNo())
4944 continue;
4945 // FIXME: Only extend SETCC N, N and SETCC N, c for now.
4946 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
4947 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
4948 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
4949 // Sign bits will be lost after a zext.
4950 return false;
4951 bool Add = false;
4952 for (unsigned i = 0; i != 2; ++i) {
4953 SDValue UseOp = User->getOperand(i);
4954 if (UseOp == N0)
4955 continue;
4956 if (!isa<ConstantSDNode>(UseOp))
4957 return false;
4958 Add = true;
4959 }
4960 if (Add)
4961 ExtendNodes.push_back(User);
4962 continue;
4963 }
4964 // If truncates aren't free and there are users we can't
4965 // extend, it isn't worthwhile.
4966 if (!isTruncFree)
4967 return false;
4968 // Remember if this value is live-out.
4969 if (User->getOpcode() == ISD::CopyToReg)
4970 HasCopyToRegUses = true;
4971 }
4973 if (HasCopyToRegUses) {
4974 bool BothLiveOut = false;
4975 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
4976 UI != UE; ++UI) {
4977 SDUse &Use = UI.getUse();
4978 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
4979 BothLiveOut = true;
4980 break;
4981 }
4982 }
4983 if (BothLiveOut)
4984 // Both unextended and extended values are live out. There had better be
4985 // a good reason for the transformation.
4986 return ExtendNodes.size();
4987 }
4988 return true;
4989 }
4991 void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
4992 SDValue Trunc, SDValue ExtLoad, SDLoc DL,
4993 ISD::NodeType ExtType) {
4994 // Extend SetCC uses if necessary.
4995 for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
4996 SDNode *SetCC = SetCCs[i];
4997 SmallVector<SDValue, 4> Ops;
4999 for (unsigned j = 0; j != 2; ++j) {
5000 SDValue SOp = SetCC->getOperand(j);
5001 if (SOp == Trunc)
5002 Ops.push_back(ExtLoad);
5003 else
5004 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp));
5005 }
5007 Ops.push_back(SetCC->getOperand(2));
5008 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
5009 }
5010 }
5012 SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
5013 SDValue N0 = N->getOperand(0);
5014 EVT VT = N->getValueType(0);
5016 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
5017 LegalOperations))
5018 return SDValue(Res, 0);
5020 // fold (sext (sext x)) -> (sext x)
5021 // fold (sext (aext x)) -> (sext x)
5022 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
5023 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT,
5024 N0.getOperand(0));
5026 if (N0.getOpcode() == ISD::TRUNCATE) {
5027 // fold (sext (truncate (load x))) -> (sext (smaller load x))
5028 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
5029 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
5030 if (NarrowLoad.getNode()) {
5031 SDNode* oye = N0.getNode()->getOperand(0).getNode();
5032 if (NarrowLoad.getNode() != N0.getNode()) {
5033 CombineTo(N0.getNode(), NarrowLoad);
5034 // CombineTo deleted the truncate, if needed, but not what's under it.
5035 AddToWorklist(oye);
5036 }
5037 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5038 }
5040 // See if the value being truncated is already sign extended. If so, just
5041 // eliminate the trunc/sext pair.
5042 SDValue Op = N0.getOperand(0);
5043 unsigned OpBits = Op.getValueType().getScalarType().getSizeInBits();
5044 unsigned MidBits = N0.getValueType().getScalarType().getSizeInBits();
5045 unsigned DestBits = VT.getScalarType().getSizeInBits();
5046 unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
5048 if (OpBits == DestBits) {
5049 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
5050 // bits, it is already ready.
5051 if (NumSignBits > DestBits-MidBits)
5052 return Op;
5053 } else if (OpBits < DestBits) {
5054 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
5055 // bits, just sext from i32.
5056 if (NumSignBits > OpBits-MidBits)
5057 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, Op);
5058 } else {
5059 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
5060 // bits, just truncate to i32.
5061 if (NumSignBits > OpBits-MidBits)
5062 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
5063 }
5065 // fold (sext (truncate x)) -> (sextinreg x).
5066 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
5067 N0.getValueType())) {
5068 if (OpBits < DestBits)
5069 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op);
5070 else if (OpBits > DestBits)
5071 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op);
5072 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, Op,
5073 DAG.getValueType(N0.getValueType()));
5074 }
5075 }
5077 // fold (sext (load x)) -> (sext (truncate (sextload x)))
5078 // None of the supported targets knows how to perform load and sign extend
5079 // on vectors in one instruction. We only perform this transformation on
5080 // scalars.
5081 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
5082 ISD::isUNINDEXEDLoad(N0.getNode()) &&
5083 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
5084 TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) {
5085 bool DoXform = true;
5086 SmallVector<SDNode*, 4> SetCCs;
5087 if (!N0.hasOneUse())
5088 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
5089 if (DoXform) {
5090 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5091 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
5092 LN0->getChain(),
5093 LN0->getBasePtr(), N0.getValueType(),
5094 LN0->getMemOperand());
5095 CombineTo(N, ExtLoad);
5096 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
5097 N0.getValueType(), ExtLoad);
5098 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
5099 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
5100 ISD::SIGN_EXTEND);
5101 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5102 }
5103 }
5105 // fold (sext (sextload x)) -> (sext (truncate (sextload x)))
5106 // fold (sext ( extload x)) -> (sext (truncate (sextload x)))
5107 if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
5108 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
5109 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5110 EVT MemVT = LN0->getMemoryVT();
5111 if ((!LegalOperations && !LN0->isVolatile()) ||
5112 TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
5113 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
5114 LN0->getChain(),
5115 LN0->getBasePtr(), MemVT,
5116 LN0->getMemOperand());
5117 CombineTo(N, ExtLoad);
5118 CombineTo(N0.getNode(),
5119 DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
5120 N0.getValueType(), ExtLoad),
5121 ExtLoad.getValue(1));
5122 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5123 }
5124 }
5126 // fold (sext (and/or/xor (load x), cst)) ->
5127 // (and/or/xor (sextload x), (sext cst))
5128 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
5129 N0.getOpcode() == ISD::XOR) &&
5130 isa<LoadSDNode>(N0.getOperand(0)) &&
5131 N0.getOperand(1).getOpcode() == ISD::Constant &&
5132 TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()) &&
5133 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
5134 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0));
5135 if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) {
5136 bool DoXform = true;
5137 SmallVector<SDNode*, 4> SetCCs;
5138 if (!N0.hasOneUse())
5139 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND,
5140 SetCCs, TLI);
5141 if (DoXform) {
5142 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN0), VT,
5143 LN0->getChain(), LN0->getBasePtr(),
5144 LN0->getMemoryVT(),
5145 LN0->getMemOperand());
5146 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
5147 Mask = Mask.sext(VT.getSizeInBits());
5148 SDValue And = DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
5149 ExtLoad, DAG.getConstant(Mask, VT));
5150 SDValue Trunc = DAG.getNode(ISD::TRUNCATE,
5151 SDLoc(N0.getOperand(0)),
5152 N0.getOperand(0).getValueType(), ExtLoad);
5153 CombineTo(N, And);
5154 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1));
5155 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
5156 ISD::SIGN_EXTEND);
5157 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5158 }
5159 }
5160 }
5162 if (N0.getOpcode() == ISD::SETCC) {
5163 EVT N0VT = N0.getOperand(0).getValueType();
5164 // sext(setcc) -> sext_in_reg(vsetcc) for vectors.
5165 // Only do this before legalize for now.
5166 if (VT.isVector() && !LegalOperations &&
5167 TLI.getBooleanContents(N0VT) ==
5168 TargetLowering::ZeroOrNegativeOneBooleanContent) {
5169 // On some architectures (such as SSE/NEON/etc) the SETCC result type is
5170 // of the same size as the compared operands. Only optimize sext(setcc())
5171 // if this is the case.
5172 EVT SVT = getSetCCResultType(N0VT);
5174 // We know that the # elements of the results is the same as the
5175 // # elements of the compare (and the # elements of the compare result
5176 // for that matter). Check to see that they are the same size. If so,
5177 // we know that the element size of the sext'd result matches the
5178 // element size of the compare operands.
5179 if (VT.getSizeInBits() == SVT.getSizeInBits())
5180 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
5181 N0.getOperand(1),
5182 cast<CondCodeSDNode>(N0.getOperand(2))->get());
5184 // If the desired elements are smaller or larger than the source
5185 // elements we can use a matching integer vector type and then
5186 // truncate/sign extend
5187 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger();
5188 if (SVT == MatchingVectorType) {
5189 SDValue VsetCC = DAG.getSetCC(SDLoc(N), MatchingVectorType,
5190 N0.getOperand(0), N0.getOperand(1),
5191 cast<CondCodeSDNode>(N0.getOperand(2))->get());
5192 return DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT);
5193 }
5194 }
5196 // sext(setcc x, y, cc) -> (select (setcc x, y, cc), -1, 0)
5197 unsigned ElementWidth = VT.getScalarType().getSizeInBits();
5198 SDValue NegOne =
5199 DAG.getConstant(APInt::getAllOnesValue(ElementWidth), VT);
5200 SDValue SCC =
5201 SimplifySelectCC(SDLoc(N), N0.getOperand(0), N0.getOperand(1),
5202 NegOne, DAG.getConstant(0, VT),
5203 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
5204 if (SCC.getNode()) return SCC;
5206 if (!VT.isVector()) {
5207 EVT SetCCVT = getSetCCResultType(N0.getOperand(0).getValueType());
5208 if (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, SetCCVT)) {
5209 SDLoc DL(N);
5210 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
5211 SDValue SetCC = DAG.getSetCC(DL,
5212 SetCCVT,
5213 N0.getOperand(0), N0.getOperand(1), CC);
5214 EVT SelectVT = getSetCCResultType(VT);
5215 return DAG.getSelect(DL, VT,
5216 DAG.getSExtOrTrunc(SetCC, DL, SelectVT),
5217 NegOne, DAG.getConstant(0, VT));
5219 }
5220 }
5221 }
5223 // fold (sext x) -> (zext x) if the sign bit is known zero.
5224 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
5225 DAG.SignBitIsZero(N0))
5226 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0);
5228 return SDValue();
5229 }
5231 // isTruncateOf - If N is a truncate of some other value, return true, record
5232 // the value being truncated in Op and which of Op's bits are zero in KnownZero.
5233 // This function computes KnownZero to avoid a duplicated call to
5234 // computeKnownBits in the caller.
5235 static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
5236 APInt &KnownZero) {
5237 APInt KnownOne;
5238 if (N->getOpcode() == ISD::TRUNCATE) {
5239 Op = N->getOperand(0);
5240 DAG.computeKnownBits(Op, KnownZero, KnownOne);
5241 return true;
5242 }
5244 if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 ||
5245 cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE)
5246 return false;
5248 SDValue Op0 = N->getOperand(0);
5249 SDValue Op1 = N->getOperand(1);
5250 assert(Op0.getValueType() == Op1.getValueType());
5252 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
5253 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
5254 if (COp0 && COp0->isNullValue())
5255 Op = Op1;
5256 else if (COp1 && COp1->isNullValue())
5257 Op = Op0;
5258 else
5259 return false;
5261 DAG.computeKnownBits(Op, KnownZero, KnownOne);
5263 if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue())
5264 return false;
5266 return true;
5267 }
5269 SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
5270 SDValue N0 = N->getOperand(0);
5271 EVT VT = N->getValueType(0);
5273 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
5274 LegalOperations))
5275 return SDValue(Res, 0);
5277 // fold (zext (zext x)) -> (zext x)
5278 // fold (zext (aext x)) -> (zext x)
5279 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
5280 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT,
5281 N0.getOperand(0));
5283 // fold (zext (truncate x)) -> (zext x) or
5284 // (zext (truncate x)) -> (truncate x)
5285 // This is valid when the truncated bits of x are already zero.
5286 // FIXME: We should extend this to work for vectors too.
5287 SDValue Op;
5288 APInt KnownZero;
5289 if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) {
5290 APInt TruncatedBits =
5291 (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ?
5292 APInt(Op.getValueSizeInBits(), 0) :
5293 APInt::getBitsSet(Op.getValueSizeInBits(),
5294 N0.getValueSizeInBits(),
5295 std::min(Op.getValueSizeInBits(),
5296 VT.getSizeInBits()));
5297 if (TruncatedBits == (KnownZero & TruncatedBits)) {
5298 if (VT.bitsGT(Op.getValueType()))
5299 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Op);
5300 if (VT.bitsLT(Op.getValueType()))
5301 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
5303 return Op;
5304 }
5305 }
5307 // fold (zext (truncate (load x))) -> (zext (smaller load x))
5308 // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
5309 if (N0.getOpcode() == ISD::TRUNCATE) {
5310 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
5311 if (NarrowLoad.getNode()) {
5312 SDNode* oye = N0.getNode()->getOperand(0).getNode();
5313 if (NarrowLoad.getNode() != N0.getNode()) {
5314 CombineTo(N0.getNode(), NarrowLoad);
5315 // CombineTo deleted the truncate, if needed, but not what's under it.
5316 AddToWorklist(oye);
5317 }
5318 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5319 }
5320 }
5322 // fold (zext (truncate x)) -> (and x, mask)
5323 if (N0.getOpcode() == ISD::TRUNCATE &&
5324 (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
5326 // fold (zext (truncate (load x))) -> (zext (smaller load x))
5327 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
5328 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
5329 if (NarrowLoad.getNode()) {
5330 SDNode* oye = N0.getNode()->getOperand(0).getNode();
5331 if (NarrowLoad.getNode() != N0.getNode()) {
5332 CombineTo(N0.getNode(), NarrowLoad);
5333 // CombineTo deleted the truncate, if needed, but not what's under it.
5334 AddToWorklist(oye);
5335 }
5336 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5337 }
5339 SDValue Op = N0.getOperand(0);
5340 if (Op.getValueType().bitsLT(VT)) {
5341 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op);
5342 AddToWorklist(Op.getNode());
5343 } else if (Op.getValueType().bitsGT(VT)) {
5344 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
5345 AddToWorklist(Op.getNode());
5346 }
5347 return DAG.getZeroExtendInReg(Op, SDLoc(N),
5348 N0.getValueType().getScalarType());
5349 }
5351 // Fold (zext (and (trunc x), cst)) -> (and x, cst),
5352 // if either of the casts is not free.
5353 if (N0.getOpcode() == ISD::AND &&
5354 N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
5355 N0.getOperand(1).getOpcode() == ISD::Constant &&
5356 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
5357 N0.getValueType()) ||
5358 !TLI.isZExtFree(N0.getValueType(), VT))) {
5359 SDValue X = N0.getOperand(0).getOperand(0);
5360 if (X.getValueType().bitsLT(VT)) {
5361 X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(X), VT, X);
5362 } else if (X.getValueType().bitsGT(VT)) {
5363 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
5364 }
5365 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
5366 Mask = Mask.zext(VT.getSizeInBits());
5367 return DAG.getNode(ISD::AND, SDLoc(N), VT,
5368 X, DAG.getConstant(Mask, VT));
5369 }
5371 // fold (zext (load x)) -> (zext (truncate (zextload x)))
5372 // None of the supported targets knows how to perform load and vector_zext
5373 // on vectors in one instruction. We only perform this transformation on
5374 // scalars.
5375 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
5376 ISD::isUNINDEXEDLoad(N0.getNode()) &&
5377 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
5378 TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
5379 bool DoXform = true;
5380 SmallVector<SDNode*, 4> SetCCs;
5381 if (!N0.hasOneUse())
5382 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
5383 if (DoXform) {
5384 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5385 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT,
5386 LN0->getChain(),
5387 LN0->getBasePtr(), N0.getValueType(),
5388 LN0->getMemOperand());
5389 CombineTo(N, ExtLoad);
5390 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
5391 N0.getValueType(), ExtLoad);
5392 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
5394 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
5395 ISD::ZERO_EXTEND);
5396 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5397 }
5398 }
5400 // fold (zext (and/or/xor (load x), cst)) ->
5401 // (and/or/xor (zextload x), (zext cst))
5402 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
5403 N0.getOpcode() == ISD::XOR) &&
5404 isa<LoadSDNode>(N0.getOperand(0)) &&
5405 N0.getOperand(1).getOpcode() == ISD::Constant &&
5406 TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()) &&
5407 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
5408 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0));
5409 if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) {
5410 bool DoXform = true;
5411 SmallVector<SDNode*, 4> SetCCs;
5412 if (!N0.hasOneUse())
5413 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::ZERO_EXTEND,
5414 SetCCs, TLI);
5415 if (DoXform) {
5416 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT,
5417 LN0->getChain(), LN0->getBasePtr(),
5418 LN0->getMemoryVT(),
5419 LN0->getMemOperand());
5420 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
5421 Mask = Mask.zext(VT.getSizeInBits());
5422 SDValue And = DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
5423 ExtLoad, DAG.getConstant(Mask, VT));
5424 SDValue Trunc = DAG.getNode(ISD::TRUNCATE,
5425 SDLoc(N0.getOperand(0)),
5426 N0.getOperand(0).getValueType(), ExtLoad);
5427 CombineTo(N, And);
5428 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1));
5429 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
5430 ISD::ZERO_EXTEND);
5431 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5432 }
5433 }
5434 }
5436 // fold (zext (zextload x)) -> (zext (truncate (zextload x)))
5437 // fold (zext ( extload x)) -> (zext (truncate (zextload x)))
5438 if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
5439 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
5440 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5441 EVT MemVT = LN0->getMemoryVT();
5442 if ((!LegalOperations && !LN0->isVolatile()) ||
5443 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
5444 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT,
5445 LN0->getChain(),
5446 LN0->getBasePtr(), MemVT,
5447 LN0->getMemOperand());
5448 CombineTo(N, ExtLoad);
5449 CombineTo(N0.getNode(),
5450 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(),
5451 ExtLoad),
5452 ExtLoad.getValue(1));
5453 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5454 }
5455 }
5457 if (N0.getOpcode() == ISD::SETCC) {
5458 if (!LegalOperations && VT.isVector() &&
5459 N0.getValueType().getVectorElementType() == MVT::i1) {
5460 EVT N0VT = N0.getOperand(0).getValueType();
5461 if (getSetCCResultType(N0VT) == N0.getValueType())
5462 return SDValue();
5464 // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
5465 // Only do this before legalize for now.
5466 EVT EltVT = VT.getVectorElementType();
5467 SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(),
5468 DAG.getConstant(1, EltVT));
5469 if (VT.getSizeInBits() == N0VT.getSizeInBits())
5470 // We know that the # elements of the results is the same as the
5471 // # elements of the compare (and the # elements of the compare result
5472 // for that matter). Check to see that they are the same size. If so,
5473 // we know that the element size of the sext'd result matches the
5474 // element size of the compare operands.
5475 return DAG.getNode(ISD::AND, SDLoc(N), VT,
5476 DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
5477 N0.getOperand(1),
5478 cast<CondCodeSDNode>(N0.getOperand(2))->get()),
5479 DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT,
5480 OneOps));
5482 // If the desired elements are smaller or larger than the source
5483 // elements we can use a matching integer vector type and then
5484 // truncate/sign extend
5485 EVT MatchingElementType =
5486 EVT::getIntegerVT(*DAG.getContext(),
5487 N0VT.getScalarType().getSizeInBits());
5488 EVT MatchingVectorType =
5489 EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
5490 N0VT.getVectorNumElements());
5491 SDValue VsetCC =
5492 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
5493 N0.getOperand(1),
5494 cast<CondCodeSDNode>(N0.getOperand(2))->get());
5495 return DAG.getNode(ISD::AND, SDLoc(N), VT,
5496 DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT),
5497 DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, OneOps));
5498 }
5500 // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
5501 SDValue SCC =
5502 SimplifySelectCC(SDLoc(N), N0.getOperand(0), N0.getOperand(1),
5503 DAG.getConstant(1, VT), DAG.getConstant(0, VT),
5504 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
5505 if (SCC.getNode()) return SCC;
5506 }
5508 // (zext (shl (zext x), cst)) -> (shl (zext x), cst)
5509 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) &&
5510 isa<ConstantSDNode>(N0.getOperand(1)) &&
5511 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
5512 N0.hasOneUse()) {
5513 SDValue ShAmt = N0.getOperand(1);
5514 unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue();
5515 if (N0.getOpcode() == ISD::SHL) {
5516 SDValue InnerZExt = N0.getOperand(0);
5517 // If the original shl may be shifting out bits, do not perform this
5518 // transformation.
5519 unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() -
5520 InnerZExt.getOperand(0).getValueType().getSizeInBits();
5521 if (ShAmtVal > KnownZeroBits)
5522 return SDValue();
5523 }
5525 SDLoc DL(N);
5527 // Ensure that the shift amount is wide enough for the shifted value.
5528 if (VT.getSizeInBits() >= 256)
5529 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);
5531 return DAG.getNode(N0.getOpcode(), DL, VT,
5532 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
5533 ShAmt);
5534 }
5536 return SDValue();
5537 }
5539 SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
5540 SDValue N0 = N->getOperand(0);
5541 EVT VT = N->getValueType(0);
5543 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
5544 LegalOperations))
5545 return SDValue(Res, 0);
5547 // fold (aext (aext x)) -> (aext x)
5548 // fold (aext (zext x)) -> (zext x)
5549 // fold (aext (sext x)) -> (sext x)
5550 if (N0.getOpcode() == ISD::ANY_EXTEND ||
5551 N0.getOpcode() == ISD::ZERO_EXTEND ||
5552 N0.getOpcode() == ISD::SIGN_EXTEND)
5553 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
5555 // fold (aext (truncate (load x))) -> (aext (smaller load x))
5556 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
5557 if (N0.getOpcode() == ISD::TRUNCATE) {
5558 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
5559 if (NarrowLoad.getNode()) {
5560 SDNode* oye = N0.getNode()->getOperand(0).getNode();
5561 if (NarrowLoad.getNode() != N0.getNode()) {
5562 CombineTo(N0.getNode(), NarrowLoad);
5563 // CombineTo deleted the truncate, if needed, but not what's under it.
5564 AddToWorklist(oye);
5565 }
5566 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5567 }
5568 }
5570 // fold (aext (truncate x))
5571 if (N0.getOpcode() == ISD::TRUNCATE) {
5572 SDValue TruncOp = N0.getOperand(0);
5573 if (TruncOp.getValueType() == VT)
5574 return TruncOp; // x iff x size == zext size.
5575 if (TruncOp.getValueType().bitsGT(VT))
5576 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, TruncOp);
5577 return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, TruncOp);
5578 }
5580 // Fold (aext (and (trunc x), cst)) -> (and x, cst)
5581 // if the trunc is not free.
5582 if (N0.getOpcode() == ISD::AND &&
5583 N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
5584 N0.getOperand(1).getOpcode() == ISD::Constant &&
5585 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
5586 N0.getValueType())) {
5587 SDValue X = N0.getOperand(0).getOperand(0);
5588 if (X.getValueType().bitsLT(VT)) {
5589 X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, X);
5590 } else if (X.getValueType().bitsGT(VT)) {
5591 X = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, X);
5592 }
5593 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
5594 Mask = Mask.zext(VT.getSizeInBits());
5595 return DAG.getNode(ISD::AND, SDLoc(N), VT,
5596 X, DAG.getConstant(Mask, VT));
5597 }
5599 // fold (aext (load x)) -> (aext (truncate (extload x)))
5600 // None of the supported targets knows how to perform load and any_ext
5601 // on vectors in one instruction. We only perform this transformation on
5602 // scalars.
5603 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
5604 ISD::isUNINDEXEDLoad(N0.getNode()) &&
5605 TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType())) {
5606 bool DoXform = true;
5607 SmallVector<SDNode*, 4> SetCCs;
5608 if (!N0.hasOneUse())
5609 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
5610 if (DoXform) {
5611 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5612 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
5613 LN0->getChain(),
5614 LN0->getBasePtr(), N0.getValueType(),
5615 LN0->getMemOperand());
5616 CombineTo(N, ExtLoad);
5617 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
5618 N0.getValueType(), ExtLoad);
5619 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
5620 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
5621 ISD::ANY_EXTEND);
5622 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5623 }
5624 }
5626 // fold (aext (zextload x)) -> (aext (truncate (zextload x)))
5627 // fold (aext (sextload x)) -> (aext (truncate (sextload x)))
5628 // fold (aext ( extload x)) -> (aext (truncate (extload x)))
5629 if (N0.getOpcode() == ISD::LOAD &&
5630 !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
5631 N0.hasOneUse()) {
5632 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5633 ISD::LoadExtType ExtType = LN0->getExtensionType();
5634 EVT MemVT = LN0->getMemoryVT();
5635 if (!LegalOperations || TLI.isLoadExtLegal(ExtType, MemVT)) {
5636 SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N),
5637 VT, LN0->getChain(), LN0->getBasePtr(),
5638 MemVT, LN0->getMemOperand());
5639 CombineTo(N, ExtLoad);
5640 CombineTo(N0.getNode(),
5641 DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
5642 N0.getValueType(), ExtLoad),
5643 ExtLoad.getValue(1));
5644 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5645 }
5646 }
5648 if (N0.getOpcode() == ISD::SETCC) {
5649 // For vectors:
5650 // aext(setcc) -> vsetcc
5651 // aext(setcc) -> truncate(vsetcc)
5652 // aext(setcc) -> aext(vsetcc)
5653 // Only do this before legalize for now.
5654 if (VT.isVector() && !LegalOperations) {
5655 EVT N0VT = N0.getOperand(0).getValueType();
5656 // We know that the # elements of the results is the same as the
5657 // # elements of the compare (and the # elements of the compare result
5658 // for that matter). Check to see that they are the same size. If so,
5659 // we know that the element size of the sext'd result matches the
5660 // element size of the compare operands.
5661 if (VT.getSizeInBits() == N0VT.getSizeInBits())
5662 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
5663 N0.getOperand(1),
5664 cast<CondCodeSDNode>(N0.getOperand(2))->get());
5665 // If the desired elements are smaller or larger than the source
5666 // elements we can use a matching integer vector type and then
5667 // truncate/any extend
5668 else {
5669 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger();
5670 SDValue VsetCC =
5671 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
5672 N0.getOperand(1),
5673 cast<CondCodeSDNode>(N0.getOperand(2))->get());
5674 return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT);
5675 }
5676 }
5678 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
5679 SDValue SCC =
5680 SimplifySelectCC(SDLoc(N), N0.getOperand(0), N0.getOperand(1),
5681 DAG.getConstant(1, VT), DAG.getConstant(0, VT),
5682 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
5683 if (SCC.getNode())
5684 return SCC;
5685 }
5687 return SDValue();
5688 }
5690 /// See if the specified operand can be simplified with the knowledge that only
5691 /// the bits specified by Mask are used. If so, return the simpler operand,
5692 /// otherwise return a null SDValue.
5693 SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
5694 switch (V.getOpcode()) {
5695 default: break;
5696 case ISD::Constant: {
5697 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
5698 assert(CV && "Const value should be ConstSDNode.");
5699 const APInt &CVal = CV->getAPIntValue();
5700 APInt NewVal = CVal & Mask;
5701 if (NewVal != CVal)
5702 return DAG.getConstant(NewVal, V.getValueType());
5703 break;
5704 }
5705 case ISD::OR:
5706 case ISD::XOR:
5707 // If the LHS or RHS don't contribute bits to the or, drop them.
5708 if (DAG.MaskedValueIsZero(V.getOperand(0), Mask))
5709 return V.getOperand(1);
5710 if (DAG.MaskedValueIsZero(V.getOperand(1), Mask))
5711 return V.getOperand(0);
5712 break;
5713 case ISD::SRL:
5714 // Only look at single-use SRLs.
5715 if (!V.getNode()->hasOneUse())
5716 break;
5717 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
5718 // See if we can recursively simplify the LHS.
5719 unsigned Amt = RHSC->getZExtValue();
5721 // Watch out for shift count overflow though.
5722 if (Amt >= Mask.getBitWidth()) break;
5723 APInt NewMask = Mask << Amt;
5724 SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask);
5725 if (SimplifyLHS.getNode())
5726 return DAG.getNode(ISD::SRL, SDLoc(V), V.getValueType(),
5727 SimplifyLHS, V.getOperand(1));
5728 }
5729 }
5730 return SDValue();
5731 }
5733 /// If the result of a wider load is shifted to right of N bits and then
5734 /// truncated to a narrower type and where N is a multiple of number of bits of
5735 /// the narrower type, transform it to a narrower load from address + N / num of
5736 /// bits of new type. If the result is to be extended, also fold the extension
5737 /// to form a extending load.
5738 SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
5739 unsigned Opc = N->getOpcode();
5741 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
5742 SDValue N0 = N->getOperand(0);
5743 EVT VT = N->getValueType(0);
5744 EVT ExtVT = VT;
5746 // This transformation isn't valid for vector loads.
5747 if (VT.isVector())
5748 return SDValue();
5750 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
5751 // extended to VT.
5752 if (Opc == ISD::SIGN_EXTEND_INREG) {
5753 ExtType = ISD::SEXTLOAD;
5754 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
5755 } else if (Opc == ISD::SRL) {
5756 // Another special-case: SRL is basically zero-extending a narrower value.
5757 ExtType = ISD::ZEXTLOAD;
5758 N0 = SDValue(N, 0);
5759 ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5760 if (!N01) return SDValue();
5761 ExtVT = EVT::getIntegerVT(*DAG.getContext(),
5762 VT.getSizeInBits() - N01->getZExtValue());
5763 }
5764 if (LegalOperations && !TLI.isLoadExtLegal(ExtType, ExtVT))
5765 return SDValue();
5767 unsigned EVTBits = ExtVT.getSizeInBits();
5769 // Do not generate loads of non-round integer types since these can
5770 // be expensive (and would be wrong if the type is not byte sized).
5771 if (!ExtVT.isRound())
5772 return SDValue();
5774 unsigned ShAmt = 0;
5775 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
5776 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
5777 ShAmt = N01->getZExtValue();
5778 // Is the shift amount a multiple of size of VT?
5779 if ((ShAmt & (EVTBits-1)) == 0) {
5780 N0 = N0.getOperand(0);
5781 // Is the load width a multiple of size of VT?
5782 if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
5783 return SDValue();
5784 }
5786 // At this point, we must have a load or else we can't do the transform.
5787 if (!isa<LoadSDNode>(N0)) return SDValue();
5789 // Because a SRL must be assumed to *need* to zero-extend the high bits
5790 // (as opposed to anyext the high bits), we can't combine the zextload
5791 // lowering of SRL and an sextload.
5792 if (cast<LoadSDNode>(N0)->getExtensionType() == ISD::SEXTLOAD)
5793 return SDValue();
5795 // If the shift amount is larger than the input type then we're not
5796 // accessing any of the loaded bytes. If the load was a zextload/extload
5797 // then the result of the shift+trunc is zero/undef (handled elsewhere).
5798 if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits())
5799 return SDValue();
5800 }
5801 }
5803 // If the load is shifted left (and the result isn't shifted back right),
5804 // we can fold the truncate through the shift.
5805 unsigned ShLeftAmt = 0;
5806 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
5807 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
5808 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
5809 ShLeftAmt = N01->getZExtValue();
5810 N0 = N0.getOperand(0);
5811 }
5812 }
5814 // If we haven't found a load, we can't narrow it. Don't transform one with
5815 // multiple uses, this would require adding a new load.
5816 if (!isa<LoadSDNode>(N0) || !N0.hasOneUse())
5817 return SDValue();
5819 // Don't change the width of a volatile load.
5820 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5821 if (LN0->isVolatile())
5822 return SDValue();
5824 // Verify that we are actually reducing a load width here.
5825 if (LN0->getMemoryVT().getSizeInBits() < EVTBits)
5826 return SDValue();
5828 // For the transform to be legal, the load must produce only two values
5829 // (the value loaded and the chain). Don't transform a pre-increment
5830 // load, for example, which produces an extra value. Otherwise the
5831 // transformation is not equivalent, and the downstream logic to replace
5832 // uses gets things wrong.
5833 if (LN0->getNumValues() > 2)
5834 return SDValue();
5836 // If the load that we're shrinking is an extload and we're not just
5837 // discarding the extension we can't simply shrink the load. Bail.
5838 // TODO: It would be possible to merge the extensions in some cases.
5839 if (LN0->getExtensionType() != ISD::NON_EXTLOAD &&
5840 LN0->getMemoryVT().getSizeInBits() < ExtVT.getSizeInBits() + ShAmt)
5841 return SDValue();
5843 EVT PtrType = N0.getOperand(1).getValueType();
5845 if (PtrType == MVT::Untyped || PtrType.isExtended())
5846 // It's not possible to generate a constant of extended or untyped type.
5847 return SDValue();
5849 // For big endian targets, we need to adjust the offset to the pointer to
5850 // load the correct bytes.
5851 if (TLI.isBigEndian()) {
5852 unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
5853 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
5854 ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
5855 }
5857 uint64_t PtrOff = ShAmt / 8;
5858 unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
5859 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LN0),
5860 PtrType, LN0->getBasePtr(),
5861 DAG.getConstant(PtrOff, PtrType));
5862 AddToWorklist(NewPtr.getNode());
5864 SDValue Load;
5865 if (ExtType == ISD::NON_EXTLOAD)
5866 Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr,
5867 LN0->getPointerInfo().getWithOffset(PtrOff),
5868 LN0->isVolatile(), LN0->isNonTemporal(),
5869 LN0->isInvariant(), NewAlign, LN0->getAAInfo());
5870 else
5871 Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(),NewPtr,
5872 LN0->getPointerInfo().getWithOffset(PtrOff),
5873 ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
5874 LN0->isInvariant(), NewAlign, LN0->getAAInfo());
5876 // Replace the old load's chain with the new load's chain.
5877 WorklistRemover DeadNodes(*this);
5878 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
5880 // Shift the result left, if we've swallowed a left shift.
5881 SDValue Result = Load;
5882 if (ShLeftAmt != 0) {
5883 EVT ShImmTy = getShiftAmountTy(Result.getValueType());
5884 if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
5885 ShImmTy = VT;
5886 // If the shift amount is as large as the result size (but, presumably,
5887 // no larger than the source) then the useful bits of the result are
5888 // zero; we can't simply return the shortened shift, because the result
5889 // of that operation is undefined.
5890 if (ShLeftAmt >= VT.getSizeInBits())
5891 Result = DAG.getConstant(0, VT);
5892 else
5893 Result = DAG.getNode(ISD::SHL, SDLoc(N0), VT,
5894 Result, DAG.getConstant(ShLeftAmt, ShImmTy));
5895 }
5897 // Return the new loaded value.
5898 return Result;
5899 }
5901 SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
5902 SDValue N0 = N->getOperand(0);
5903 SDValue N1 = N->getOperand(1);
5904 EVT VT = N->getValueType(0);
5905 EVT EVT = cast<VTSDNode>(N1)->getVT();
5906 unsigned VTBits = VT.getScalarType().getSizeInBits();
5907 unsigned EVTBits = EVT.getScalarType().getSizeInBits();
5909 // fold (sext_in_reg c1) -> c1
5910 if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)
5911 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1);
5913 // If the input is already sign extended, just drop the extension.
5914 if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1)
5915 return N0;
5917 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
5918 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5919 EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT()))
5920 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
5921 N0.getOperand(0), N1);
5923 // fold (sext_in_reg (sext x)) -> (sext x)
5924 // fold (sext_in_reg (aext x)) -> (sext x)
5925 // if x is small enough.
5926 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
5927 SDValue N00 = N0.getOperand(0);
5928 if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits &&
5929 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
5930 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
5931 }
5933 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
5934 if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits)))
5935 return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT);
5937 // fold operands of sext_in_reg based on knowledge that the top bits are not
5938 // demanded.
5939 if (SimplifyDemandedBits(SDValue(N, 0)))
5940 return SDValue(N, 0);
5942 // fold (sext_in_reg (load x)) -> (smaller sextload x)
5943 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
5944 SDValue NarrowLoad = ReduceLoadWidth(N);
5945 if (NarrowLoad.getNode())
5946 return NarrowLoad;
5948 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
5949 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
5950 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
5951 if (N0.getOpcode() == ISD::SRL) {
5952 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
5953 if (ShAmt->getZExtValue()+EVTBits <= VTBits) {
5954 // We can turn this into an SRA iff the input to the SRL is already sign
5955 // extended enough.
5956 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
5957 if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits)
5958 return DAG.getNode(ISD::SRA, SDLoc(N), VT,
5959 N0.getOperand(0), N0.getOperand(1));
5960 }
5961 }
5963 // fold (sext_inreg (extload x)) -> (sextload x)
5964 if (ISD::isEXTLoad(N0.getNode()) &&
5965 ISD::isUNINDEXEDLoad(N0.getNode()) &&
5966 EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
5967 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
5968 TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
5969 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5970 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
5971 LN0->getChain(),
5972 LN0->getBasePtr(), EVT,
5973 LN0->getMemOperand());
5974 CombineTo(N, ExtLoad);
5975 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
5976 AddToWorklist(ExtLoad.getNode());
5977 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5978 }
5979 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
5980 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
5981 N0.hasOneUse() &&
5982 EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
5983 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
5984 TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
5985 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5986 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
5987 LN0->getChain(),
5988 LN0->getBasePtr(), EVT,
5989 LN0->getMemOperand());
5990 CombineTo(N, ExtLoad);
5991 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
5992 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5993 }
5995 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
5996 if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
5997 SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
5998 N0.getOperand(1), false);
5999 if (BSwap.getNode())
6000 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
6001 BSwap, N1);
6002 }
6004 // Fold a sext_inreg of a build_vector of ConstantSDNodes or undefs
6005 // into a build_vector.
6006 if (ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
6007 SmallVector<SDValue, 8> Elts;
6008 unsigned NumElts = N0->getNumOperands();
6009 unsigned ShAmt = VTBits - EVTBits;
6011 for (unsigned i = 0; i != NumElts; ++i) {
6012 SDValue Op = N0->getOperand(i);
6013 if (Op->getOpcode() == ISD::UNDEF) {
6014 Elts.push_back(Op);
6015 continue;
6016 }
6018 ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op);
6019 const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue());
6020 Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(),
6021 Op.getValueType()));
6022 }
6024 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Elts);
6025 }
6027 return SDValue();
6028 }
6030 SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
6031 SDValue N0 = N->getOperand(0);
6032 EVT VT = N->getValueType(0);
6033 bool isLE = TLI.isLittleEndian();
6035 // noop truncate
6036 if (N0.getValueType() == N->getValueType(0))
6037 return N0;
6038 // fold (truncate c1) -> c1
6039 if (isa<ConstantSDNode>(N0))
6040 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0);
6041 // fold (truncate (truncate x)) -> (truncate x)
6042 if (N0.getOpcode() == ISD::TRUNCATE)
6043 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
6044 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
6045 if (N0.getOpcode() == ISD::ZERO_EXTEND ||
6046 N0.getOpcode() == ISD::SIGN_EXTEND ||
6047 N0.getOpcode() == ISD::ANY_EXTEND) {
6048 if (N0.getOperand(0).getValueType().bitsLT(VT))
6049 // if the source is smaller than the dest, we still need an extend
6050 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
6051 N0.getOperand(0));
6052 if (N0.getOperand(0).getValueType().bitsGT(VT))
6053 // if the source is larger than the dest, than we just need the truncate
6054 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
6055 // if the source and dest are the same type, we can drop both the extend
6056 // and the truncate.
6057 return N0.getOperand(0);
6058 }
6060 // Fold extract-and-trunc into a narrow extract. For example:
6061 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1)
6062 // i32 y = TRUNCATE(i64 x)
6063 // -- becomes --
6064 // v16i8 b = BITCAST (v2i64 val)
6065 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8)
6066 //
6067 // Note: We only run this optimization after type legalization (which often
6068 // creates this pattern) and before operation legalization after which
6069 // we need to be more careful about the vector instructions that we generate.
6070 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6071 LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) {
6073 EVT VecTy = N0.getOperand(0).getValueType();
6074 EVT ExTy = N0.getValueType();
6075 EVT TrTy = N->getValueType(0);
6077 unsigned NumElem = VecTy.getVectorNumElements();
6078 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();
6080 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem);
6081 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size");
6083 SDValue EltNo = N0->getOperand(1);
6084 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
6085 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
6086 EVT IndexTy = TLI.getVectorIdxTy();
6087 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
6089 SDValue V = DAG.getNode(ISD::BITCAST, SDLoc(N),
6090 NVT, N0.getOperand(0));
6092 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT,
6093 SDLoc(N), TrTy, V,
6094 DAG.getConstant(Index, IndexTy));
6095 }
6096 }
6098 // trunc (select c, a, b) -> select c, (trunc a), (trunc b)
6099 if (N0.getOpcode() == ISD::SELECT) {
6100 EVT SrcVT = N0.getValueType();
6101 if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) &&
6102 TLI.isTruncateFree(SrcVT, VT)) {
6103 SDLoc SL(N0);
6104 SDValue Cond = N0.getOperand(0);
6105 SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
6106 SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2));
6107 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1);
6108 }
6109 }
6111 // Fold a series of buildvector, bitcast, and truncate if possible.
6112 // For example fold
6113 // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to
6114 // (2xi32 (buildvector x, y)).
6115 if (Level == AfterLegalizeVectorOps && VT.isVector() &&
6116 N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
6117 N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
6118 N0.getOperand(0).hasOneUse()) {
6120 SDValue BuildVect = N0.getOperand(0);
6121 EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType();
6122 EVT TruncVecEltTy = VT.getVectorElementType();
6124 // Check that the element types match.
6125 if (BuildVectEltTy == TruncVecEltTy) {
6126 // Now we only need to compute the offset of the truncated elements.
6127 unsigned BuildVecNumElts = BuildVect.getNumOperands();
6128 unsigned TruncVecNumElts = VT.getVectorNumElements();
6129 unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts;
6131 assert((BuildVecNumElts % TruncVecNumElts) == 0 &&
6132 "Invalid number of elements");
6134 SmallVector<SDValue, 8> Opnds;
6135 for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
6136 Opnds.push_back(BuildVect.getOperand(i));
6138 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
6139 }
6140 }
6142 // See if we can simplify the input to this truncate through knowledge that
6143 // only the low bits are being used.
6144 // For example "trunc (or (shl x, 8), y)" // -> trunc y
6145 // Currently we only perform this optimization on scalars because vectors
6146 // may have different active low bits.
6147 if (!VT.isVector()) {
6148 SDValue Shorter =
6149 GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
6150 VT.getSizeInBits()));
6151 if (Shorter.getNode())
6152 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter);
6153 }
6154 // fold (truncate (load x)) -> (smaller load x)
6155 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
6156 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
6157 SDValue Reduced = ReduceLoadWidth(N);
6158 if (Reduced.getNode())
6159 return Reduced;
6160 // Handle the case where the load remains an extending load even
6161 // after truncation.
6162 if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
6163 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
6164 if (!LN0->isVolatile() &&
6165 LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) {
6166 SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
6167 VT, LN0->getChain(), LN0->getBasePtr(),
6168 LN0->getMemoryVT(),
6169 LN0->getMemOperand());
6170 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1));
6171 return NewLoad;
6172 }
6173 }
6174 }
6175 // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
6176 // where ... are all 'undef'.
6177 if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) {
6178 SmallVector<EVT, 8> VTs;
6179 SDValue V;
6180 unsigned Idx = 0;
6181 unsigned NumDefs = 0;
6183 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
6184 SDValue X = N0.getOperand(i);
6185 if (X.getOpcode() != ISD::UNDEF) {
6186 V = X;
6187 Idx = i;
6188 NumDefs++;
6189 }
6190 // Stop if more than one members are non-undef.
6191 if (NumDefs > 1)
6192 break;
6193 VTs.push_back(EVT::getVectorVT(*DAG.getContext(),
6194 VT.getVectorElementType(),
6195 X.getValueType().getVectorNumElements()));
6196 }
6198 if (NumDefs == 0)
6199 return DAG.getUNDEF(VT);
6201 if (NumDefs == 1) {
6202 assert(V.getNode() && "The single defined operand is empty!");
6203 SmallVector<SDValue, 8> Opnds;
6204 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
6205 if (i != Idx) {
6206 Opnds.push_back(DAG.getUNDEF(VTs[i]));
6207 continue;
6208 }
6209 SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V);
6210 AddToWorklist(NV.getNode());
6211 Opnds.push_back(NV);
6212 }
6213 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds);
6214 }
6215 }
6217 // Simplify the operands using demanded-bits information.
6218 if (!VT.isVector() &&
6219 SimplifyDemandedBits(SDValue(N, 0)))
6220 return SDValue(N, 0);
6222 return SDValue();
6223 }
6225 static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
6226 SDValue Elt = N->getOperand(i);
6227 if (Elt.getOpcode() != ISD::MERGE_VALUES)
6228 return Elt.getNode();
6229 return Elt.getOperand(Elt.getResNo()).getNode();
6230 }
6232 /// build_pair (load, load) -> load
6233 /// if load locations are consecutive.
6234 SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
6235 assert(N->getOpcode() == ISD::BUILD_PAIR);
6237 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
6238 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
6239 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
6240 LD1->getAddressSpace() != LD2->getAddressSpace())
6241 return SDValue();
6242 EVT LD1VT = LD1->getValueType(0);
6244 if (ISD::isNON_EXTLoad(LD2) &&
6245 LD2->hasOneUse() &&
6246 // If both are volatile this would reduce the number of volatile loads.
6247 // If one is volatile it might be ok, but play conservative and bail out.
6248 !LD1->isVolatile() &&
6249 !LD2->isVolatile() &&
6250 DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
6251 unsigned Align = LD1->getAlignment();
6252 unsigned NewAlign = TLI.getDataLayout()->
6253 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
6255 if (NewAlign <= Align &&
6256 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
6257 return DAG.getLoad(VT, SDLoc(N), LD1->getChain(),
6258 LD1->getBasePtr(), LD1->getPointerInfo(),
6259 false, false, false, Align);
6260 }
6262 return SDValue();
6263 }
6265 SDValue DAGCombiner::visitBITCAST(SDNode *N) {
6266 SDValue N0 = N->getOperand(0);
6267 EVT VT = N->getValueType(0);
6269 // If the input is a BUILD_VECTOR with all constant elements, fold this now.
6270 // Only do this before legalize, since afterward the target may be depending
6271 // on the bitconvert.
6272 // First check to see if this is all constant.
6273 if (!LegalTypes &&
6274 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
6275 VT.isVector()) {
6276 bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant();
6278 EVT DestEltVT = N->getValueType(0).getVectorElementType();
6279 assert(!DestEltVT.isVector() &&
6280 "Element type of vector ValueType must not be vector!");
6281 if (isSimple)
6282 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT);
6283 }
6285 // If the input is a constant, let getNode fold it.
6286 if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
6287 SDValue Res = DAG.getNode(ISD::BITCAST, SDLoc(N), VT, N0);
6288 if (Res.getNode() != N) {
6289 if (!LegalOperations ||
6290 TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
6291 return Res;
6293 // Folding it resulted in an illegal node, and it's too late to
6294 // do that. Clean up the old node and forego the transformation.
6295 // Ideally this won't happen very often, because instcombine
6296 // and the earlier dagcombine runs (where illegal nodes are
6297 // permitted) should have folded most of them already.
6298 deleteAndRecombine(Res.getNode());
6299 }
6300 }
6302 // (conv (conv x, t1), t2) -> (conv x, t2)
6303 if (N0.getOpcode() == ISD::BITCAST)
6304 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT,
6305 N0.getOperand(0));
6307 // fold (conv (load x)) -> (load (conv*)x)
6308 // If the resultant load doesn't need a higher alignment than the original!
6309 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
6310 // Do not change the width of a volatile load.
6311 !cast<LoadSDNode>(N0)->isVolatile() &&
6312 // Do not remove the cast if the types differ in endian layout.
6313 TLI.hasBigEndianPartOrdering(N0.getValueType()) ==
6314 TLI.hasBigEndianPartOrdering(VT) &&
6315 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) &&
6316 TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
6317 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
6318 unsigned Align = TLI.getDataLayout()->
6319 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
6320 unsigned OrigAlign = LN0->getAlignment();
6322 if (Align <= OrigAlign) {
6323 SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(),
6324 LN0->getBasePtr(), LN0->getPointerInfo(),
6325 LN0->isVolatile(), LN0->isNonTemporal(),
6326 LN0->isInvariant(), OrigAlign,
6327 LN0->getAAInfo());
6328 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
6329 return Load;
6330 }
6331 }
6333 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6334 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6335 // This often reduces constant pool loads.
6336 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
6337 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
6338 N0.getNode()->hasOneUse() && VT.isInteger() &&
6339 !VT.isVector() && !N0.getValueType().isVector()) {
6340 SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT,
6341 N0.getOperand(0));
6342 AddToWorklist(NewConv.getNode());
6344 APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
6345 if (N0.getOpcode() == ISD::FNEG)
6346 return DAG.getNode(ISD::XOR, SDLoc(N), VT,
6347 NewConv, DAG.getConstant(SignBit, VT));
6348 assert(N0.getOpcode() == ISD::FABS);
6349 return DAG.getNode(ISD::AND, SDLoc(N), VT,
6350 NewConv, DAG.getConstant(~SignBit, VT));
6351 }
6353 // fold (bitconvert (fcopysign cst, x)) ->
6354 // (or (and (bitconvert x), sign), (and cst, (not sign)))
6355 // Note that we don't handle (copysign x, cst) because this can always be
6356 // folded to an fneg or fabs.
6357 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
6358 isa<ConstantFPSDNode>(N0.getOperand(0)) &&
6359 VT.isInteger() && !VT.isVector()) {
6360 unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
6361 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
6362 if (isTypeLegal(IntXVT)) {
6363 SDValue X = DAG.getNode(ISD::BITCAST, SDLoc(N0),
6364 IntXVT, N0.getOperand(1));
6365 AddToWorklist(X.getNode());
6367 // If X has a different width than the result/lhs, sext it or truncate it.
6368 unsigned VTWidth = VT.getSizeInBits();
6369 if (OrigXWidth < VTWidth) {
6370 X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X);
6371 AddToWorklist(X.getNode());
6372 } else if (OrigXWidth > VTWidth) {
6373 // To get the sign bit in the right place, we have to shift it right
6374 // before truncating.
6375 X = DAG.getNode(ISD::SRL, SDLoc(X),
6376 X.getValueType(), X,
6377 DAG.getConstant(OrigXWidth-VTWidth, X.getValueType()));
6378 AddToWorklist(X.getNode());
6379 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
6380 AddToWorklist(X.getNode());
6381 }
6383 APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
6384 X = DAG.getNode(ISD::AND, SDLoc(X), VT,
6385 X, DAG.getConstant(SignBit, VT));
6386 AddToWorklist(X.getNode());
6388 SDValue Cst = DAG.getNode(ISD::BITCAST, SDLoc(N0),
6389 VT, N0.getOperand(0));
6390 Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
6391 Cst, DAG.getConstant(~SignBit, VT));
6392 AddToWorklist(Cst.getNode());
6394 return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst);
6395 }
6396 }
6398 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
6399 if (N0.getOpcode() == ISD::BUILD_PAIR) {
6400 SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT);
6401 if (CombineLD.getNode())
6402 return CombineLD;
6403 }
6405 return SDValue();
6406 }
6408 SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
6409 EVT VT = N->getValueType(0);
6410 return CombineConsecutiveLoads(N, VT);
6411 }
6413 /// We know that BV is a build_vector node with Constant, ConstantFP or Undef
6414 /// operands. DstEltVT indicates the destination element value type.
6415 SDValue DAGCombiner::
6416 ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
6417 EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
6419 // If this is already the right type, we're done.
6420 if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
6422 unsigned SrcBitSize = SrcEltVT.getSizeInBits();
6423 unsigned DstBitSize = DstEltVT.getSizeInBits();
6425 // If this is a conversion of N elements of one type to N elements of another
6426 // type, convert each element. This handles FP<->INT cases.
6427 if (SrcBitSize == DstBitSize) {
6428 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
6429 BV->getValueType(0).getVectorNumElements());
6431 // Due to the FP element handling below calling this routine recursively,
6432 // we can end up with a scalar-to-vector node here.
6433 if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
6434 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT,
6435 DAG.getNode(ISD::BITCAST, SDLoc(BV),
6436 DstEltVT, BV->getOperand(0)));
6438 SmallVector<SDValue, 8> Ops;
6439 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
6440 SDValue Op = BV->getOperand(i);
6441 // If the vector element type is not legal, the BUILD_VECTOR operands
6442 // are promoted and implicitly truncated. Make that explicit here.
6443 if (Op.getValueType() != SrcEltVT)
6444 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
6445 Ops.push_back(DAG.getNode(ISD::BITCAST, SDLoc(BV),
6446 DstEltVT, Op));
6447 AddToWorklist(Ops.back().getNode());
6448 }
6449 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
6450 }
6452 // Otherwise, we're growing or shrinking the elements. To avoid having to
6453 // handle annoying details of growing/shrinking FP values, we convert them to
6454 // int first.
6455 if (SrcEltVT.isFloatingPoint()) {
6456 // Convert the input float vector to a int vector where the elements are the
6457 // same sizes.
6458 assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
6459 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
6460 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
6461 SrcEltVT = IntVT;
6462 }
6464 // Now we know the input is an integer vector. If the output is a FP type,
6465 // convert to integer first, then to FP of the right size.
6466 if (DstEltVT.isFloatingPoint()) {
6467 assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
6468 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
6469 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
6471 // Next, convert to FP elements of the same size.
6472 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
6473 }
6475 // Okay, we know the src/dst types are both integers of differing types.
6476 // Handling growing first.
6477 assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
6478 if (SrcBitSize < DstBitSize) {
6479 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
6481 SmallVector<SDValue, 8> Ops;
6482 for (unsigned i = 0, e = BV->getNumOperands(); i != e;
6483 i += NumInputsPerOutput) {
6484 bool isLE = TLI.isLittleEndian();
6485 APInt NewBits = APInt(DstBitSize, 0);
6486 bool EltIsUndef = true;
6487 for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
6488 // Shift the previously computed bits over.
6489 NewBits <<= SrcBitSize;
6490 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
6491 if (Op.getOpcode() == ISD::UNDEF) continue;
6492 EltIsUndef = false;
6494 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
6495 zextOrTrunc(SrcBitSize).zext(DstBitSize);
6496 }
6498 if (EltIsUndef)
6499 Ops.push_back(DAG.getUNDEF(DstEltVT));
6500 else
6501 Ops.push_back(DAG.getConstant(NewBits, DstEltVT));
6502 }
6504 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
6505 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
6506 }
6508 // Finally, this must be the case where we are shrinking elements: each input
6509 // turns into multiple outputs.
6510 bool isS2V = ISD::isScalarToVector(BV);
6511 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
6512 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
6513 NumOutputsPerInput*BV->getNumOperands());
6514 SmallVector<SDValue, 8> Ops;
6516 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
6517 if (BV->getOperand(i).getOpcode() == ISD::UNDEF) {
6518 for (unsigned j = 0; j != NumOutputsPerInput; ++j)
6519 Ops.push_back(DAG.getUNDEF(DstEltVT));
6520 continue;
6521 }
6523 APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))->
6524 getAPIntValue().zextOrTrunc(SrcBitSize);
6526 for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
6527 APInt ThisVal = OpVal.trunc(DstBitSize);
6528 Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
6529 if (isS2V && i == 0 && j == 0 && ThisVal.zext(SrcBitSize) == OpVal)
6530 // Simply turn this into a SCALAR_TO_VECTOR of the new type.
6531 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT,
6532 Ops[0]);
6533 OpVal = OpVal.lshr(DstBitSize);
6534 }
6536 // For big endian targets, swap the order of the pieces of each element.
6537 if (TLI.isBigEndian())
6538 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
6539 }
6541 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
6542 }
6544 SDValue DAGCombiner::visitFADD(SDNode *N) {
6545 SDValue N0 = N->getOperand(0);
6546 SDValue N1 = N->getOperand(1);
6547 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
6548 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
6549 EVT VT = N->getValueType(0);
6550 const TargetOptions &Options = DAG.getTarget().Options;
6552 // fold vector ops
6553 if (VT.isVector()) {
6554 SDValue FoldedVOp = SimplifyVBinOp(N);
6555 if (FoldedVOp.getNode()) return FoldedVOp;
6556 }
6558 // fold (fadd c1, c2) -> c1 + c2
6559 if (N0CFP && N1CFP)
6560 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N1);
6562 // canonicalize constant to RHS
6563 if (N0CFP && !N1CFP)
6564 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N0);
6566 // fold (fadd A, (fneg B)) -> (fsub A, B)
6567 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
6568 isNegatibleForFree(N1, LegalOperations, TLI, &Options) == 2)
6569 return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N0,
6570 GetNegatedExpression(N1, DAG, LegalOperations));
6572 // fold (fadd (fneg A), B) -> (fsub B, A)
6573 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
6574 isNegatibleForFree(N0, LegalOperations, TLI, &Options) == 2)
6575 return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N1,
6576 GetNegatedExpression(N0, DAG, LegalOperations));
6578 // If 'unsafe math' is enabled, fold lots of things.
6579 if (Options.UnsafeFPMath) {
6580 // No FP constant should be created after legalization as Instruction
6581 // Selection pass has a hard time dealing with FP constants.
6582 bool AllowNewConst = (Level < AfterLegalizeDAG);
6584 // fold (fadd A, 0) -> A
6585 if (N1CFP && N1CFP->getValueAPF().isZero())
6586 return N0;
6588 // fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
6589 if (N1CFP && N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() &&
6590 isa<ConstantFPSDNode>(N0.getOperand(1)))
6591 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0.getOperand(0),
6592 DAG.getNode(ISD::FADD, SDLoc(N), VT,
6593 N0.getOperand(1), N1));
6595 // If allowed, fold (fadd (fneg x), x) -> 0.0
6596 if (AllowNewConst && N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1)
6597 return DAG.getConstantFP(0.0, VT);
6599 // If allowed, fold (fadd x, (fneg x)) -> 0.0
6600 if (AllowNewConst && N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0)
6601 return DAG.getConstantFP(0.0, VT);
6603 // We can fold chains of FADD's of the same value into multiplications.
6604 // This transform is not safe in general because we are reducing the number
6605 // of rounding steps.
6606 if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
6607 if (N0.getOpcode() == ISD::FMUL) {
6608 ConstantFPSDNode *CFP00 = dyn_cast<ConstantFPSDNode>(N0.getOperand(0));
6609 ConstantFPSDNode *CFP01 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
6611 // (fadd (fmul x, c), x) -> (fmul x, c+1)
6612 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
6613 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
6614 SDValue(CFP01, 0),
6615 DAG.getConstantFP(1.0, VT));
6616 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N1, NewCFP);
6617 }
6619 // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
6620 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
6621 N1.getOperand(0) == N1.getOperand(1) &&
6622 N0.getOperand(0) == N1.getOperand(0)) {
6623 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
6624 SDValue(CFP01, 0),
6625 DAG.getConstantFP(2.0, VT));
6626 return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
6627 N0.getOperand(0), NewCFP);
6628 }
6629 }
6631 if (N1.getOpcode() == ISD::FMUL) {
6632 ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0));
6633 ConstantFPSDNode *CFP11 = dyn_cast<ConstantFPSDNode>(N1.getOperand(1));
6635 // (fadd x, (fmul x, c)) -> (fmul x, c+1)
6636 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
6637 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
6638 SDValue(CFP11, 0),
6639 DAG.getConstantFP(1.0, VT));
6640 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0, NewCFP);
6641 }
6643 // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
6644 if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD &&
6645 N0.getOperand(0) == N0.getOperand(1) &&
6646 N1.getOperand(0) == N0.getOperand(0)) {
6647 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
6648 SDValue(CFP11, 0),
6649 DAG.getConstantFP(2.0, VT));
6650 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N1.getOperand(0), NewCFP);
6651 }
6652 }
6654 if (N0.getOpcode() == ISD::FADD && AllowNewConst) {
6655 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N0.getOperand(0));
6656 // (fadd (fadd x, x), x) -> (fmul x, 3.0)
6657 if (!CFP && N0.getOperand(0) == N0.getOperand(1) &&
6658 (N0.getOperand(0) == N1))
6659 return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
6660 N1, DAG.getConstantFP(3.0, VT));
6661 }
6663 if (N1.getOpcode() == ISD::FADD && AllowNewConst) {
6664 ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0));
6665 // (fadd x, (fadd x, x)) -> (fmul x, 3.0)
6666 if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
6667 N1.getOperand(0) == N0)
6668 return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
6669 N0, DAG.getConstantFP(3.0, VT));
6670 }
6672 // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0)
6673 if (AllowNewConst &&
6674 N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
6675 N0.getOperand(0) == N0.getOperand(1) &&
6676 N1.getOperand(0) == N1.getOperand(1) &&
6677 N0.getOperand(0) == N1.getOperand(0))
6678 return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
6679 N0.getOperand(0), DAG.getConstantFP(4.0, VT));
6680 }
6681 } // enable-unsafe-fp-math
6683 // FADD -> FMA combines:
6684 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) &&
6685 DAG.getTarget()
6686 .getSubtargetImpl()
6687 ->getTargetLowering()
6688 ->isFMAFasterThanFMulAndFAdd(VT) &&
6689 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) {
6691 // fold (fadd (fmul x, y), z) -> (fma x, y, z)
6692 if (N0.getOpcode() == ISD::FMUL &&
6693 (N0->hasOneUse() || TLI.enableAggressiveFMAFusion(VT)))
6694 return DAG.getNode(ISD::FMA, SDLoc(N), VT,
6695 N0.getOperand(0), N0.getOperand(1), N1);
6697 // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
6698 // Note: Commutes FADD operands.
6699 if (N1.getOpcode() == ISD::FMUL &&
6700 (N1->hasOneUse() || TLI.enableAggressiveFMAFusion(VT)))
6701 return DAG.getNode(ISD::FMA, SDLoc(N), VT,
6702 N1.getOperand(0), N1.getOperand(1), N0);
6703 }
6705 return SDValue();
6706 }
6708 SDValue DAGCombiner::visitFSUB(SDNode *N) {
6709 SDValue N0 = N->getOperand(0);
6710 SDValue N1 = N->getOperand(1);
6711 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
6712 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
6713 EVT VT = N->getValueType(0);
6714 SDLoc dl(N);
6715 const TargetOptions &Options = DAG.getTarget().Options;
6717 // fold vector ops
6718 if (VT.isVector()) {
6719 SDValue FoldedVOp = SimplifyVBinOp(N);
6720 if (FoldedVOp.getNode()) return FoldedVOp;
6721 }
6723 // fold (fsub c1, c2) -> c1-c2
6724 if (N0CFP && N1CFP)
6725 return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N0, N1);
6727 // fold (fsub A, (fneg B)) -> (fadd A, B)
6728 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
6729 return DAG.getNode(ISD::FADD, dl, VT, N0,
6730 GetNegatedExpression(N1, DAG, LegalOperations));
6732 // If 'unsafe math' is enabled, fold lots of things.
6733 if (Options.UnsafeFPMath) {
6734 // (fsub A, 0) -> A
6735 if (N1CFP && N1CFP->getValueAPF().isZero())
6736 return N0;
6738 // (fsub 0, B) -> -B
6739 if (N0CFP && N0CFP->getValueAPF().isZero()) {
6740 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
6741 return GetNegatedExpression(N1, DAG, LegalOperations);
6742 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
6743 return DAG.getNode(ISD::FNEG, dl, VT, N1);
6744 }
6746 // (fsub x, x) -> 0.0
6747 if (N0 == N1)
6748 return DAG.getConstantFP(0.0f, VT);
6750 // (fsub x, (fadd x, y)) -> (fneg y)
6751 // (fsub x, (fadd y, x)) -> (fneg y)
6752 if (N1.getOpcode() == ISD::FADD) {
6753 SDValue N10 = N1->getOperand(0);
6754 SDValue N11 = N1->getOperand(1);
6756 if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, &Options))
6757 return GetNegatedExpression(N11, DAG, LegalOperations);
6759 if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, &Options))
6760 return GetNegatedExpression(N10, DAG, LegalOperations);
6761 }
6762 }
6764 // FSUB -> FMA combines:
6765 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) &&
6766 DAG.getTarget().getSubtargetImpl()
6767 ->getTargetLowering()
6768 ->isFMAFasterThanFMulAndFAdd(VT) &&
6769 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) {
6771 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
6772 if (N0.getOpcode() == ISD::FMUL &&
6773 (N0->hasOneUse() || TLI.enableAggressiveFMAFusion(VT)))
6774 return DAG.getNode(ISD::FMA, dl, VT,
6775 N0.getOperand(0), N0.getOperand(1),
6776 DAG.getNode(ISD::FNEG, dl, VT, N1));
6778 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
6779 // Note: Commutes FSUB operands.
6780 if (N1.getOpcode() == ISD::FMUL &&
6781 (N1->hasOneUse() || TLI.enableAggressiveFMAFusion(VT)))
6782 return DAG.getNode(ISD::FMA, dl, VT,
6783 DAG.getNode(ISD::FNEG, dl, VT,
6784 N1.getOperand(0)),
6785 N1.getOperand(1), N0);
6787 // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
6788 if (N0.getOpcode() == ISD::FNEG &&
6789 N0.getOperand(0).getOpcode() == ISD::FMUL &&
6790 ((N0->hasOneUse() && N0.getOperand(0).hasOneUse()) ||
6791 TLI.enableAggressiveFMAFusion(VT))) {
6792 SDValue N00 = N0.getOperand(0).getOperand(0);
6793 SDValue N01 = N0.getOperand(0).getOperand(1);
6794 return DAG.getNode(ISD::FMA, dl, VT,
6795 DAG.getNode(ISD::FNEG, dl, VT, N00), N01,
6796 DAG.getNode(ISD::FNEG, dl, VT, N1));
6797 }
6798 }
6800 return SDValue();
6801 }
6803 SDValue DAGCombiner::visitFMUL(SDNode *N) {
6804 SDValue N0 = N->getOperand(0);
6805 SDValue N1 = N->getOperand(1);
6806 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
6807 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
6808 EVT VT = N->getValueType(0);
6809 const TargetOptions &Options = DAG.getTarget().Options;
6811 // fold vector ops
6812 if (VT.isVector()) {
6813 // This just handles C1 * C2 for vectors. Other vector folds are below.
6814 SDValue FoldedVOp = SimplifyVBinOp(N);
6815 if (FoldedVOp.getNode())
6816 return FoldedVOp;
6817 // Canonicalize vector constant to RHS.
6818 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
6819 N1.getOpcode() != ISD::BUILD_VECTOR)
6820 if (auto *BV0 = dyn_cast<BuildVectorSDNode>(N0))
6821 if (BV0->isConstant())
6822 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
6823 }
6825 // fold (fmul c1, c2) -> c1*c2
6826 if (N0CFP && N1CFP)
6827 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0, N1);
6829 // canonicalize constant to RHS
6830 if (N0CFP && !N1CFP)
6831 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N1, N0);
6833 // fold (fmul A, 1.0) -> A
6834 if (N1CFP && N1CFP->isExactlyValue(1.0))
6835 return N0;
6837 if (Options.UnsafeFPMath) {
6838 // fold (fmul A, 0) -> 0
6839 if (N1CFP && N1CFP->getValueAPF().isZero())
6840 return N1;
6842 // fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
6843 if (N0.getOpcode() == ISD::FMUL) {
6844 // Fold scalars or any vector constants (not just splats).
6845 // This fold is done in general by InstCombine, but extra fmul insts
6846 // may have been generated during lowering.
6847 SDValue N01 = N0.getOperand(1);
6848 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
6849 auto *BV01 = dyn_cast<BuildVectorSDNode>(N01);
6850 if ((N1CFP && isConstOrConstSplatFP(N01)) ||
6851 (BV1 && BV01 && BV1->isConstant() && BV01->isConstant())) {
6852 SDLoc SL(N);
6853 SDValue MulConsts = DAG.getNode(ISD::FMUL, SL, VT, N01, N1);
6854 return DAG.getNode(ISD::FMUL, SL, VT, N0.getOperand(0), MulConsts);
6855 }
6856 }
6858 // fold (fmul (fadd x, x), c) -> (fmul x, (fmul 2.0, c))
6859 // Undo the fmul 2.0, x -> fadd x, x transformation, since if it occurs
6860 // during an early run of DAGCombiner can prevent folding with fmuls
6861 // inserted during lowering.
6862 if (N0.getOpcode() == ISD::FADD && N0.getOperand(0) == N0.getOperand(1)) {
6863 SDLoc SL(N);
6864 const SDValue Two = DAG.getConstantFP(2.0, VT);
6865 SDValue MulConsts = DAG.getNode(ISD::FMUL, SL, VT, Two, N1);
6866 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0.getOperand(0), MulConsts);
6867 }
6868 }
6870 // fold (fmul X, 2.0) -> (fadd X, X)
6871 if (N1CFP && N1CFP->isExactlyValue(+2.0))
6872 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N0);
6874 // fold (fmul X, -1.0) -> (fneg X)
6875 if (N1CFP && N1CFP->isExactlyValue(-1.0))
6876 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
6877 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0);
6879 // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
6880 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) {
6881 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) {
6882 // Both can be negated for free, check to see if at least one is cheaper
6883 // negated.
6884 if (LHSNeg == 2 || RHSNeg == 2)
6885 return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
6886 GetNegatedExpression(N0, DAG, LegalOperations),
6887 GetNegatedExpression(N1, DAG, LegalOperations));
6888 }
6889 }
6891 return SDValue();
6892 }
6894 SDValue DAGCombiner::visitFMA(SDNode *N) {
6895 SDValue N0 = N->getOperand(0);
6896 SDValue N1 = N->getOperand(1);
6897 SDValue N2 = N->getOperand(2);
6898 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
6899 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
6900 EVT VT = N->getValueType(0);
6901 SDLoc dl(N);
6902 const TargetOptions &Options = DAG.getTarget().Options;
6904 // Constant fold FMA.
6905 if (isa<ConstantFPSDNode>(N0) &&
6906 isa<ConstantFPSDNode>(N1) &&
6907 isa<ConstantFPSDNode>(N2)) {
6908 return DAG.getNode(ISD::FMA, dl, VT, N0, N1, N2);
6909 }
6911 if (Options.UnsafeFPMath) {
6912 if (N0CFP && N0CFP->isZero())
6913 return N2;
6914 if (N1CFP && N1CFP->isZero())
6915 return N2;
6916 }
6917 if (N0CFP && N0CFP->isExactlyValue(1.0))
6918 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
6919 if (N1CFP && N1CFP->isExactlyValue(1.0))
6920 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);
6922 // Canonicalize (fma c, x, y) -> (fma x, c, y)
6923 if (N0CFP && !N1CFP)
6924 return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
6926 // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
6927 if (Options.UnsafeFPMath && N1CFP &&
6928 N2.getOpcode() == ISD::FMUL &&
6929 N0 == N2.getOperand(0) &&
6930 N2.getOperand(1).getOpcode() == ISD::ConstantFP) {
6931 return DAG.getNode(ISD::FMUL, dl, VT, N0,
6932 DAG.getNode(ISD::FADD, dl, VT, N1, N2.getOperand(1)));
6933 }
6936 // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
6937 if (Options.UnsafeFPMath &&
6938 N0.getOpcode() == ISD::FMUL && N1CFP &&
6939 N0.getOperand(1).getOpcode() == ISD::ConstantFP) {
6940 return DAG.getNode(ISD::FMA, dl, VT,
6941 N0.getOperand(0),
6942 DAG.getNode(ISD::FMUL, dl, VT, N1, N0.getOperand(1)),
6943 N2);
6944 }
6946 // (fma x, 1, y) -> (fadd x, y)
6947 // (fma x, -1, y) -> (fadd (fneg x), y)
6948 if (N1CFP) {
6949 if (N1CFP->isExactlyValue(1.0))
6950 return DAG.getNode(ISD::FADD, dl, VT, N0, N2);
6952 if (N1CFP->isExactlyValue(-1.0) &&
6953 (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
6954 SDValue RHSNeg = DAG.getNode(ISD::FNEG, dl, VT, N0);
6955 AddToWorklist(RHSNeg.getNode());
6956 return DAG.getNode(ISD::FADD, dl, VT, N2, RHSNeg);
6957 }
6958 }
6960 // (fma x, c, x) -> (fmul x, (c+1))
6961 if (Options.UnsafeFPMath && N1CFP && N0 == N2)
6962 return DAG.getNode(ISD::FMUL, dl, VT, N0,
6963 DAG.getNode(ISD::FADD, dl, VT,
6964 N1, DAG.getConstantFP(1.0, VT)));
6966 // (fma x, c, (fneg x)) -> (fmul x, (c-1))
6967 if (Options.UnsafeFPMath && N1CFP &&
6968 N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0)
6969 return DAG.getNode(ISD::FMUL, dl, VT, N0,
6970 DAG.getNode(ISD::FADD, dl, VT,
6971 N1, DAG.getConstantFP(-1.0, VT)));
6974 return SDValue();
6975 }
6977 SDValue DAGCombiner::visitFDIV(SDNode *N) {
6978 SDValue N0 = N->getOperand(0);
6979 SDValue N1 = N->getOperand(1);
6980 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
6981 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
6982 EVT VT = N->getValueType(0);
6983 SDLoc DL(N);
6984 const TargetOptions &Options = DAG.getTarget().Options;
6986 // fold vector ops
6987 if (VT.isVector()) {
6988 SDValue FoldedVOp = SimplifyVBinOp(N);
6989 if (FoldedVOp.getNode()) return FoldedVOp;
6990 }
6992 // fold (fdiv c1, c2) -> c1/c2
6993 if (N0CFP && N1CFP)
6994 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1);
6996 if (Options.UnsafeFPMath) {
6997 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
6998 if (N1CFP) {
6999 // Compute the reciprocal 1.0 / c2.
7000 APFloat N1APF = N1CFP->getValueAPF();
7001 APFloat Recip(N1APF.getSemantics(), 1); // 1.0
7002 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven);
7003 // Only do the transform if the reciprocal is a legal fp immediate that
7004 // isn't too nasty (eg NaN, denormal, ...).
7005 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty
7006 (!LegalOperations ||
7007 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM
7008 // backend)... we should handle this gracefully after Legalize.
7009 // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) ||
7010 TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) ||
7011 TLI.isFPImmLegal(Recip, VT)))
7012 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0,
7013 DAG.getConstantFP(Recip, VT));
7014 }
7016 // If this FDIV is part of a reciprocal square root, it may be folded
7017 // into a target-specific square root estimate instruction.
7018 if (N1.getOpcode() == ISD::FSQRT) {
7019 if (SDValue RV = BuildRsqrtEstimate(N1.getOperand(0))) {
7020 AddToWorklist(RV.getNode());
7021 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
7022 }
7023 } else if (N1.getOpcode() == ISD::FP_EXTEND &&
7024 N1.getOperand(0).getOpcode() == ISD::FSQRT) {
7025 if (SDValue RV = BuildRsqrtEstimate(N1.getOperand(0).getOperand(0))) {
7026 AddToWorklist(RV.getNode());
7027 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
7028 AddToWorklist(RV.getNode());
7029 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
7030 }
7031 } else if (N1.getOpcode() == ISD::FP_ROUND &&
7032 N1.getOperand(0).getOpcode() == ISD::FSQRT) {
7033 if (SDValue RV = BuildRsqrtEstimate(N1.getOperand(0).getOperand(0))) {
7034 AddToWorklist(RV.getNode());
7035 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
7036 AddToWorklist(RV.getNode());
7037 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
7038 }
7039 }
7041 // Fold into a reciprocal estimate and multiply instead of a real divide.
7042 if (SDValue RV = BuildReciprocalEstimate(N1)) {
7043 AddToWorklist(RV.getNode());
7044 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
7045 }
7046 }
7048 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
7049 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) {
7050 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) {
7051 // Both can be negated for free, check to see if at least one is cheaper
7052 // negated.
7053 if (LHSNeg == 2 || RHSNeg == 2)
7054 return DAG.getNode(ISD::FDIV, SDLoc(N), VT,
7055 GetNegatedExpression(N0, DAG, LegalOperations),
7056 GetNegatedExpression(N1, DAG, LegalOperations));
7057 }
7058 }
7060 return SDValue();
7061 }
7063 SDValue DAGCombiner::visitFREM(SDNode *N) {
7064 SDValue N0 = N->getOperand(0);
7065 SDValue N1 = N->getOperand(1);
7066 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7067 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
7068 EVT VT = N->getValueType(0);
7070 // fold (frem c1, c2) -> fmod(c1,c2)
7071 if (N0CFP && N1CFP)
7072 return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1);
7074 return SDValue();
7075 }
7077 SDValue DAGCombiner::visitFSQRT(SDNode *N) {
7078 if (DAG.getTarget().Options.UnsafeFPMath) {
7079 // Compute this as 1/(1/sqrt(X)): the reciprocal of the reciprocal sqrt.
7080 if (SDValue RV = BuildRsqrtEstimate(N->getOperand(0))) {
7081 AddToWorklist(RV.getNode());
7082 RV = BuildReciprocalEstimate(RV);
7083 if (RV.getNode()) {
7084 // Unfortunately, RV is now NaN if the input was exactly 0.
7085 // Select out this case and force the answer to 0.
7086 EVT VT = RV.getValueType();
7088 SDValue Zero = DAG.getConstantFP(0.0, VT);
7089 SDValue ZeroCmp =
7090 DAG.getSetCC(SDLoc(N), TLI.getSetCCResultType(*DAG.getContext(), VT),
7091 N->getOperand(0), Zero, ISD::SETEQ);
7092 AddToWorklist(ZeroCmp.getNode());
7093 AddToWorklist(RV.getNode());
7095 RV = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT,
7096 SDLoc(N), VT, ZeroCmp, Zero, RV);
7097 return RV;
7098 }
7099 }
7100 }
7101 return SDValue();
7102 }
7104 SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
7105 SDValue N0 = N->getOperand(0);
7106 SDValue N1 = N->getOperand(1);
7107 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7108 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
7109 EVT VT = N->getValueType(0);
7111 if (N0CFP && N1CFP) // Constant fold
7112 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1);
7114 if (N1CFP) {
7115 const APFloat& V = N1CFP->getValueAPF();
7116 // copysign(x, c1) -> fabs(x) iff ispos(c1)
7117 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
7118 if (!V.isNegative()) {
7119 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
7120 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
7121 } else {
7122 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
7123 return DAG.getNode(ISD::FNEG, SDLoc(N), VT,
7124 DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0));
7125 }
7126 }
7128 // copysign(fabs(x), y) -> copysign(x, y)
7129 // copysign(fneg(x), y) -> copysign(x, y)
7130 // copysign(copysign(x,z), y) -> copysign(x, y)
7131 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
7132 N0.getOpcode() == ISD::FCOPYSIGN)
7133 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
7134 N0.getOperand(0), N1);
7136 // copysign(x, abs(y)) -> abs(x)
7137 if (N1.getOpcode() == ISD::FABS)
7138 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
7140 // copysign(x, copysign(y,z)) -> copysign(x, z)
7141 if (N1.getOpcode() == ISD::FCOPYSIGN)
7142 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
7143 N0, N1.getOperand(1));
7145 // copysign(x, fp_extend(y)) -> copysign(x, y)
7146 // copysign(x, fp_round(y)) -> copysign(x, y)
7147 if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND)
7148 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
7149 N0, N1.getOperand(0));
7151 return SDValue();
7152 }
7154 SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
7155 SDValue N0 = N->getOperand(0);
7156 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
7157 EVT VT = N->getValueType(0);
7158 EVT OpVT = N0.getValueType();
7160 // fold (sint_to_fp c1) -> c1fp
7161 if (N0C &&
7162 // ...but only if the target supports immediate floating-point values
7163 (!LegalOperations ||
7164 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
7165 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
7167 // If the input is a legal type, and SINT_TO_FP is not legal on this target,
7168 // but UINT_TO_FP is legal on this target, try to convert.
7169 if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) &&
7170 TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) {
7171 // If the sign bit is known to be zero, we can change this to UINT_TO_FP.
7172 if (DAG.SignBitIsZero(N0))
7173 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
7174 }
7176 // The next optimizations are desirable only if SELECT_CC can be lowered.
7177 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
7178 // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
7179 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
7180 !VT.isVector() &&
7181 (!LegalOperations ||
7182 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
7183 SDValue Ops[] =
7184 { N0.getOperand(0), N0.getOperand(1),
7185 DAG.getConstantFP(-1.0, VT) , DAG.getConstantFP(0.0, VT),
7186 N0.getOperand(2) };
7187 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops);
7188 }
7190 // fold (sint_to_fp (zext (setcc x, y, cc))) ->
7191 // (select_cc x, y, 1.0, 0.0,, cc)
7192 if (N0.getOpcode() == ISD::ZERO_EXTEND &&
7193 N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() &&
7194 (!LegalOperations ||
7195 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
7196 SDValue Ops[] =
7197 { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1),
7198 DAG.getConstantFP(1.0, VT) , DAG.getConstantFP(0.0, VT),
7199 N0.getOperand(0).getOperand(2) };
7200 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops);
7201 }
7202 }
7204 return SDValue();
7205 }
7207 SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
7208 SDValue N0 = N->getOperand(0);
7209 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
7210 EVT VT = N->getValueType(0);
7211 EVT OpVT = N0.getValueType();
7213 // fold (uint_to_fp c1) -> c1fp
7214 if (N0C &&
7215 // ...but only if the target supports immediate floating-point values
7216 (!LegalOperations ||
7217 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
7218 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
7220 // If the input is a legal type, and UINT_TO_FP is not legal on this target,
7221 // but SINT_TO_FP is legal on this target, try to convert.
7222 if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) &&
7223 TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) {
7224 // If the sign bit is known to be zero, we can change this to SINT_TO_FP.
7225 if (DAG.SignBitIsZero(N0))
7226 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
7227 }
7229 // The next optimizations are desirable only if SELECT_CC can be lowered.
7230 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
7231 // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
7233 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
7234 (!LegalOperations ||
7235 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
7236 SDValue Ops[] =
7237 { N0.getOperand(0), N0.getOperand(1),
7238 DAG.getConstantFP(1.0, VT), DAG.getConstantFP(0.0, VT),
7239 N0.getOperand(2) };
7240 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops);
7241 }
7242 }
7244 return SDValue();
7245 }
7247 SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
7248 SDValue N0 = N->getOperand(0);
7249 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7250 EVT VT = N->getValueType(0);
7252 // fold (fp_to_sint c1fp) -> c1
7253 if (N0CFP)
7254 return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0);
7256 return SDValue();
7257 }
7259 SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
7260 SDValue N0 = N->getOperand(0);
7261 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7262 EVT VT = N->getValueType(0);
7264 // fold (fp_to_uint c1fp) -> c1
7265 if (N0CFP)
7266 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0);
7268 return SDValue();
7269 }
7271 SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
7272 SDValue N0 = N->getOperand(0);
7273 SDValue N1 = N->getOperand(1);
7274 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7275 EVT VT = N->getValueType(0);
7277 // fold (fp_round c1fp) -> c1fp
7278 if (N0CFP)
7279 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1);
7281 // fold (fp_round (fp_extend x)) -> x
7282 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
7283 return N0.getOperand(0);
7285 // fold (fp_round (fp_round x)) -> (fp_round x)
7286 if (N0.getOpcode() == ISD::FP_ROUND) {
7287 // This is a value preserving truncation if both round's are.
7288 bool IsTrunc = N->getConstantOperandVal(1) == 1 &&
7289 N0.getNode()->getConstantOperandVal(1) == 1;
7290 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0.getOperand(0),
7291 DAG.getIntPtrConstant(IsTrunc));
7292 }
7294 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
7295 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
7296 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
7297 N0.getOperand(0), N1);
7298 AddToWorklist(Tmp.getNode());
7299 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
7300 Tmp, N0.getOperand(1));
7301 }
7303 return SDValue();
7304 }
7306 SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
7307 SDValue N0 = N->getOperand(0);
7308 EVT VT = N->getValueType(0);
7309 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
7310 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7312 // fold (fp_round_inreg c1fp) -> c1fp
7313 if (N0CFP && isTypeLegal(EVT)) {
7314 SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT);
7315 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, Round);
7316 }
7318 return SDValue();
7319 }
7321 SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
7322 SDValue N0 = N->getOperand(0);
7323 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7324 EVT VT = N->getValueType(0);
7326 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
7327 if (N->hasOneUse() &&
7328 N->use_begin()->getOpcode() == ISD::FP_ROUND)
7329 return SDValue();
7331 // fold (fp_extend c1fp) -> c1fp
7332 if (N0CFP)
7333 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);
7335 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
7336 // value of X.
7337 if (N0.getOpcode() == ISD::FP_ROUND
7338 && N0.getNode()->getConstantOperandVal(1) == 1) {
7339 SDValue In = N0.getOperand(0);
7340 if (In.getValueType() == VT) return In;
7341 if (VT.bitsLT(In.getValueType()))
7342 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT,
7343 In, N0.getOperand(1));
7344 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In);
7345 }
7347 // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
7348 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
7349 TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType())) {
7350 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
7351 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
7352 LN0->getChain(),
7353 LN0->getBasePtr(), N0.getValueType(),
7354 LN0->getMemOperand());
7355 CombineTo(N, ExtLoad);
7356 CombineTo(N0.getNode(),
7357 DAG.getNode(ISD::FP_ROUND, SDLoc(N0),
7358 N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)),
7359 ExtLoad.getValue(1));
7360 return SDValue(N, 0); // Return N so it doesn't get rechecked!
7361 }
7363 return SDValue();
7364 }
7366 SDValue DAGCombiner::visitFCEIL(SDNode *N) {
7367 SDValue N0 = N->getOperand(0);
7368 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7369 EVT VT = N->getValueType(0);
7371 // fold (fceil c1) -> fceil(c1)
7372 if (N0CFP)
7373 return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);
7375 return SDValue();
7376 }
7378 SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
7379 SDValue N0 = N->getOperand(0);
7380 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7381 EVT VT = N->getValueType(0);
7383 // fold (ftrunc c1) -> ftrunc(c1)
7384 if (N0CFP)
7385 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);
7387 return SDValue();
7388 }
7390 SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
7391 SDValue N0 = N->getOperand(0);
7392 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
7393 EVT VT = N->getValueType(0);
7395 // fold (ffloor c1) -> ffloor(c1)
7396 if (N0CFP)
7397 return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);
7399 return SDValue();
7400 }
7402 // FIXME: FNEG and FABS have a lot in common; refactor.
7403 SDValue DAGCombiner::visitFNEG(SDNode *N) {
7404 SDValue N0 = N->getOperand(0);
7405 EVT VT = N->getValueType(0);
7407 if (VT.isVector()) {
7408 SDValue FoldedVOp = SimplifyVUnaryOp(N);
7409 if (FoldedVOp.getNode()) return FoldedVOp;
7410 }
7412 // Constant fold FNEG.
7413 if (isa<ConstantFPSDNode>(N0))
7414 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N->getOperand(0));
7416 if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(),
7417 &DAG.getTarget().Options))
7418 return GetNegatedExpression(N0, DAG, LegalOperations);
7420 // Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading
7421 // constant pool values.
7422 if (!TLI.isFNegFree(VT) &&
7423 N0.getOpcode() == ISD::BITCAST &&
7424 N0.getNode()->hasOneUse()) {
7425 SDValue Int = N0.getOperand(0);
7426 EVT IntVT = Int.getValueType();
7427 if (IntVT.isInteger() && !IntVT.isVector()) {
7428 APInt SignMask;
7429 if (N0.getValueType().isVector()) {
7430 // For a vector, get a mask such as 0x80... per scalar element
7431 // and splat it.
7432 SignMask = APInt::getSignBit(N0.getValueType().getScalarSizeInBits());
7433 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
7434 } else {
7435 // For a scalar, just generate 0x80...
7436 SignMask = APInt::getSignBit(IntVT.getSizeInBits());
7437 }
7438 Int = DAG.getNode(ISD::XOR, SDLoc(N0), IntVT, Int,
7439 DAG.getConstant(SignMask, IntVT));
7440 AddToWorklist(Int.getNode());
7441 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Int);
7442 }
7443 }
7445 // (fneg (fmul c, x)) -> (fmul -c, x)
7446 if (N0.getOpcode() == ISD::FMUL) {
7447 ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
7448 if (CFP1) {
7449 APFloat CVal = CFP1->getValueAPF();
7450 CVal.changeSign();
7451 if (Level >= AfterLegalizeDAG &&
7452 (TLI.isFPImmLegal(CVal, N->getValueType(0)) ||
7453 TLI.isOperationLegal(ISD::ConstantFP, N->getValueType(0))))
7454 return DAG.getNode(
7455 ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
7456 DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1)));
7457 }
7458 }
7460 return SDValue();
7461 }
7463 SDValue DAGCombiner::visitFABS(SDNode *N) {
7464 SDValue N0 = N->getOperand(0);
7465 EVT VT = N->getValueType(0);
7467 if (VT.isVector()) {
7468 SDValue FoldedVOp = SimplifyVUnaryOp(N);
7469 if (FoldedVOp.getNode()) return FoldedVOp;
7470 }
7472 // fold (fabs c1) -> fabs(c1)
7473 if (isa<ConstantFPSDNode>(N0))
7474 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
7476 // fold (fabs (fabs x)) -> (fabs x)
7477 if (N0.getOpcode() == ISD::FABS)
7478 return N->getOperand(0);
7480 // fold (fabs (fneg x)) -> (fabs x)
7481 // fold (fabs (fcopysign x, y)) -> (fabs x)
7482 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
7483 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));
7485 // Transform fabs(bitconvert(x)) -> bitconvert(x & ~sign) to avoid loading
7486 // constant pool values.
7487 if (!TLI.isFAbsFree(VT) &&
7488 N0.getOpcode() == ISD::BITCAST &&
7489 N0.getNode()->hasOneUse()) {
7490 SDValue Int = N0.getOperand(0);
7491 EVT IntVT = Int.getValueType();
7492 if (IntVT.isInteger() && !IntVT.isVector()) {
7493 APInt SignMask;
7494 if (N0.getValueType().isVector()) {
7495 // For a vector, get a mask such as 0x7f... per scalar element
7496 // and splat it.
7497 SignMask = ~APInt::getSignBit(N0.getValueType().getScalarSizeInBits());
7498 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
7499 } else {
7500 // For a scalar, just generate 0x7f...
7501 SignMask = ~APInt::getSignBit(IntVT.getSizeInBits());
7502 }
7503 Int = DAG.getNode(ISD::AND, SDLoc(N0), IntVT, Int,
7504 DAG.getConstant(SignMask, IntVT));
7505 AddToWorklist(Int.getNode());
7506 return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Int);
7507 }
7508 }
7510 return SDValue();
7511 }
7513 SDValue DAGCombiner::visitBRCOND(SDNode *N) {
7514 SDValue Chain = N->getOperand(0);
7515 SDValue N1 = N->getOperand(1);
7516 SDValue N2 = N->getOperand(2);
7518 // If N is a constant we could fold this into a fallthrough or unconditional
7519 // branch. However that doesn't happen very often in normal code, because
7520 // Instcombine/SimplifyCFG should have handled the available opportunities.
7521 // If we did this folding here, it would be necessary to update the
7522 // MachineBasicBlock CFG, which is awkward.
7524 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
7525 // on the target.
7526 if (N1.getOpcode() == ISD::SETCC &&
7527 TLI.isOperationLegalOrCustom(ISD::BR_CC,
7528 N1.getOperand(0).getValueType())) {
7529 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
7530 Chain, N1.getOperand(2),
7531 N1.getOperand(0), N1.getOperand(1), N2);
7532 }
7534 if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) ||
7535 ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) &&
7536 (N1.getOperand(0).hasOneUse() &&
7537 N1.getOperand(0).getOpcode() == ISD::SRL))) {
7538 SDNode *Trunc = nullptr;
7539 if (N1.getOpcode() == ISD::TRUNCATE) {
7540 // Look pass the truncate.
7541 Trunc = N1.getNode();
7542 N1 = N1.getOperand(0);
7543 }
7545 // Match this pattern so that we can generate simpler code:
7546 //
7547 // %a = ...
7548 // %b = and i32 %a, 2
7549 // %c = srl i32 %b, 1
7550 // brcond i32 %c ...
7551 //
7552 // into
7553 //
7554 // %a = ...
7555 // %b = and i32 %a, 2
7556 // %c = setcc eq %b, 0
7557 // brcond %c ...
7558 //
7559 // This applies only when the AND constant value has one bit set and the
7560 // SRL constant is equal to the log2 of the AND constant. The back-end is
7561 // smart enough to convert the result into a TEST/JMP sequence.
7562 SDValue Op0 = N1.getOperand(0);
7563 SDValue Op1 = N1.getOperand(1);
7565 if (Op0.getOpcode() == ISD::AND &&
7566 Op1.getOpcode() == ISD::Constant) {
7567 SDValue AndOp1 = Op0.getOperand(1);
7569 if (AndOp1.getOpcode() == ISD::Constant) {
7570 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();
7572 if (AndConst.isPowerOf2() &&
7573 cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) {
7574 SDValue SetCC =
7575 DAG.getSetCC(SDLoc(N),
7576 getSetCCResultType(Op0.getValueType()),
7577 Op0, DAG.getConstant(0, Op0.getValueType()),
7578 ISD::SETNE);
7580 SDValue NewBRCond = DAG.getNode(ISD::BRCOND, SDLoc(N),
7581 MVT::Other, Chain, SetCC, N2);
7582 // Don't add the new BRCond into the worklist or else SimplifySelectCC
7583 // will convert it back to (X & C1) >> C2.
7584 CombineTo(N, NewBRCond, false);
7585 // Truncate is dead.
7586 if (Trunc)
7587 deleteAndRecombine(Trunc);
7588 // Replace the uses of SRL with SETCC
7589 WorklistRemover DeadNodes(*this);
7590 DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
7591 deleteAndRecombine(N1.getNode());
7592 return SDValue(N, 0); // Return N so it doesn't get rechecked!
7593 }
7594 }
7595 }
7597 if (Trunc)
7598 // Restore N1 if the above transformation doesn't match.
7599 N1 = N->getOperand(1);
7600 }
7602 // Transform br(xor(x, y)) -> br(x != y)
7603 // Transform br(xor(xor(x,y), 1)) -> br (x == y)
7604 if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) {
7605 SDNode *TheXor = N1.getNode();
7606 SDValue Op0 = TheXor->getOperand(0);
7607 SDValue Op1 = TheXor->getOperand(1);
7608 if (Op0.getOpcode() == Op1.getOpcode()) {
7609 // Avoid missing important xor optimizations.
7610 SDValue Tmp = visitXOR(TheXor);
7611 if (Tmp.getNode()) {
7612 if (Tmp.getNode() != TheXor) {
7613 DEBUG(dbgs() << "\nReplacing.8 ";
7614 TheXor->dump(&DAG);
7615 dbgs() << "\nWith: ";
7616 Tmp.getNode()->dump(&DAG);
7617 dbgs() << '\n');
7618 WorklistRemover DeadNodes(*this);
7619 DAG.ReplaceAllUsesOfValueWith(N1, Tmp);
7620 deleteAndRecombine(TheXor);
7621 return DAG.getNode(ISD::BRCOND, SDLoc(N),
7622 MVT::Other, Chain, Tmp, N2);
7623 }
7625 // visitXOR has changed XOR's operands or replaced the XOR completely,
7626 // bail out.
7627 return SDValue(N, 0);
7628 }
7629 }
7631 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
7632 bool Equal = false;
7633 if (ConstantSDNode *RHSCI = dyn_cast<ConstantSDNode>(Op0))
7634 if (RHSCI->getAPIntValue() == 1 && Op0.hasOneUse() &&
7635 Op0.getOpcode() == ISD::XOR) {
7636 TheXor = Op0.getNode();
7637 Equal = true;
7638 }
7640 EVT SetCCVT = N1.getValueType();
7641 if (LegalTypes)
7642 SetCCVT = getSetCCResultType(SetCCVT);
7643 SDValue SetCC = DAG.getSetCC(SDLoc(TheXor),
7644 SetCCVT,
7645 Op0, Op1,
7646 Equal ? ISD::SETEQ : ISD::SETNE);
7647 // Replace the uses of XOR with SETCC
7648 WorklistRemover DeadNodes(*this);
7649 DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
7650 deleteAndRecombine(N1.getNode());
7651 return DAG.getNode(ISD::BRCOND, SDLoc(N),
7652 MVT::Other, Chain, SetCC, N2);
7653 }
7654 }
7656 return SDValue();
7657 }
7659 // Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
7660 //
7661 SDValue DAGCombiner::visitBR_CC(SDNode *N) {
7662 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
7663 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
7665 // If N is a constant we could fold this into a fallthrough or unconditional
7666 // branch. However that doesn't happen very often in normal code, because
7667 // Instcombine/SimplifyCFG should have handled the available opportunities.
7668 // If we did this folding here, it would be necessary to update the
7669 // MachineBasicBlock CFG, which is awkward.
7671 // Use SimplifySetCC to simplify SETCC's.
7672 SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()),
7673 CondLHS, CondRHS, CC->get(), SDLoc(N),
7674 false);
7675 if (Simp.getNode()) AddToWorklist(Simp.getNode());
7677 // fold to a simpler setcc
7678 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
7679 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
7680 N->getOperand(0), Simp.getOperand(2),
7681 Simp.getOperand(0), Simp.getOperand(1),
7682 N->getOperand(4));
7684 return SDValue();
7685 }
7687 /// Return true if 'Use' is a load or a store that uses N as its base pointer
7688 /// and that N may be folded in the load / store addressing mode.
7689 static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
7690 SelectionDAG &DAG,
7691 const TargetLowering &TLI) {
7692 EVT VT;
7693 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
7694 if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
7695 return false;
7696 VT = Use->getValueType(0);
7697 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
7698 if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
7699 return false;
7700 VT = ST->getValue().getValueType();
7701 } else
7702 return false;
7704 TargetLowering::AddrMode AM;
7705 if (N->getOpcode() == ISD::ADD) {
7706 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
7707 if (Offset)
7708 // [reg +/- imm]
7709 AM.BaseOffs = Offset->getSExtValue();
7710 else
7711 // [reg +/- reg]
7712 AM.Scale = 1;
7713 } else if (N->getOpcode() == ISD::SUB) {
7714 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
7715 if (Offset)
7716 // [reg +/- imm]
7717 AM.BaseOffs = -Offset->getSExtValue();
7718 else
7719 // [reg +/- reg]
7720 AM.Scale = 1;
7721 } else
7722 return false;
7724 return TLI.isLegalAddressingMode(AM, VT.getTypeForEVT(*DAG.getContext()));
7725 }
7727 /// Try turning a load/store into a pre-indexed load/store when the base
7728 /// pointer is an add or subtract and it has other uses besides the load/store.
7729 /// After the transformation, the new indexed load/store has effectively folded
7730 /// the add/subtract in and all of its other uses are redirected to the
7731 /// new load/store.
7732 bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
7733 if (Level < AfterLegalizeDAG)
7734 return false;
7736 bool isLoad = true;
7737 SDValue Ptr;
7738 EVT VT;
7739 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
7740 if (LD->isIndexed())
7741 return false;
7742 VT = LD->getMemoryVT();
7743 if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
7744 !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
7745 return false;
7746 Ptr = LD->getBasePtr();
7747 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
7748 if (ST->isIndexed())
7749 return false;
7750 VT = ST->getMemoryVT();
7751 if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
7752 !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
7753 return false;
7754 Ptr = ST->getBasePtr();
7755 isLoad = false;
7756 } else {
7757 return false;
7758 }
7760 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
7761 // out. There is no reason to make this a preinc/predec.
7762 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
7763 Ptr.getNode()->hasOneUse())
7764 return false;
7766 // Ask the target to do addressing mode selection.
7767 SDValue BasePtr;
7768 SDValue Offset;
7769 ISD::MemIndexedMode AM = ISD::UNINDEXED;
7770 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
7771 return false;
7773 // Backends without true r+i pre-indexed forms may need to pass a
7774 // constant base with a variable offset so that constant coercion
7775 // will work with the patterns in canonical form.
7776 bool Swapped = false;
7777 if (isa<ConstantSDNode>(BasePtr)) {
7778 std::swap(BasePtr, Offset);
7779 Swapped = true;
7780 }
7782 // Don't create a indexed load / store with zero offset.
7783 if (isa<ConstantSDNode>(Offset) &&
7784 cast<ConstantSDNode>(Offset)->isNullValue())
7785 return false;
7787 // Try turning it into a pre-indexed load / store except when:
7788 // 1) The new base ptr is a frame index.
7789 // 2) If N is a store and the new base ptr is either the same as or is a
7790 // predecessor of the value being stored.
7791 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded
7792 // that would create a cycle.
7793 // 4) All uses are load / store ops that use it as old base ptr.
7795 // Check #1. Preinc'ing a frame index would require copying the stack pointer
7796 // (plus the implicit offset) to a register to preinc anyway.
7797 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
7798 return false;
7800 // Check #2.
7801 if (!isLoad) {
7802 SDValue Val = cast<StoreSDNode>(N)->getValue();
7803 if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode()))
7804 return false;
7805 }
7807 // If the offset is a constant, there may be other adds of constants that
7808 // can be folded with this one. We should do this to avoid having to keep
7809 // a copy of the original base pointer.
7810 SmallVector<SDNode *, 16> OtherUses;
7811 if (isa<ConstantSDNode>(Offset))
7812 for (SDNode *Use : BasePtr.getNode()->uses()) {
7813 if (Use == Ptr.getNode())
7814 continue;
7816 if (Use->isPredecessorOf(N))
7817 continue;
7819 if (Use->getOpcode() != ISD::ADD && Use->getOpcode() != ISD::SUB) {
7820 OtherUses.clear();
7821 break;
7822 }
7824 SDValue Op0 = Use->getOperand(0), Op1 = Use->getOperand(1);
7825 if (Op1.getNode() == BasePtr.getNode())
7826 std::swap(Op0, Op1);
7827 assert(Op0.getNode() == BasePtr.getNode() &&
7828 "Use of ADD/SUB but not an operand");
7830 if (!isa<ConstantSDNode>(Op1)) {
7831 OtherUses.clear();
7832 break;
7833 }
7835 // FIXME: In some cases, we can be smarter about this.
7836 if (Op1.getValueType() != Offset.getValueType()) {
7837 OtherUses.clear();
7838 break;
7839 }
7841 OtherUses.push_back(Use);
7842 }
7844 if (Swapped)
7845 std::swap(BasePtr, Offset);
7847 // Now check for #3 and #4.
7848 bool RealUse = false;
7850 // Caches for hasPredecessorHelper
7851 SmallPtrSet<const SDNode *, 32> Visited;
7852 SmallVector<const SDNode *, 16> Worklist;
7854 for (SDNode *Use : Ptr.getNode()->uses()) {
7855 if (Use == N)
7856 continue;
7857 if (N->hasPredecessorHelper(Use, Visited, Worklist))
7858 return false;
7860 // If Ptr may be folded in addressing mode of other use, then it's
7861 // not profitable to do this transformation.
7862 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI))
7863 RealUse = true;
7864 }
7866 if (!RealUse)
7867 return false;
7869 SDValue Result;
7870 if (isLoad)
7871 Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
7872 BasePtr, Offset, AM);
7873 else
7874 Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
7875 BasePtr, Offset, AM);
7876 ++PreIndexedNodes;
7877 ++NodesCombined;
7878 DEBUG(dbgs() << "\nReplacing.4 ";
7879 N->dump(&DAG);
7880 dbgs() << "\nWith: ";
7881 Result.getNode()->dump(&DAG);
7882 dbgs() << '\n');
7883 WorklistRemover DeadNodes(*this);
7884 if (isLoad) {
7885 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
7886 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
7887 } else {
7888 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
7889 }
7891 // Finally, since the node is now dead, remove it from the graph.
7892 deleteAndRecombine(N);
7894 if (Swapped)
7895 std::swap(BasePtr, Offset);
7897 // Replace other uses of BasePtr that can be updated to use Ptr
7898 for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) {
7899 unsigned OffsetIdx = 1;
7900 if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode())
7901 OffsetIdx = 0;
7902 assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() ==
7903 BasePtr.getNode() && "Expected BasePtr operand");
7905 // We need to replace ptr0 in the following expression:
7906 // x0 * offset0 + y0 * ptr0 = t0
7907 // knowing that
7908 // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
7909 //
7910 // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
7911 // indexed load/store and the expresion that needs to be re-written.
7912 //
7913 // Therefore, we have:
7914 // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1
7916 ConstantSDNode *CN =
7917 cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
7918 int X0, X1, Y0, Y1;
7919 APInt Offset0 = CN->getAPIntValue();
7920 APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();
7922 X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
7923 Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
7924 X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
7925 Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;
7927 unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD;
7929 APInt CNV = Offset0;
7930 if (X0 < 0) CNV = -CNV;
7931 if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1;
7932 else CNV = CNV - Offset1;
7934 // We can now generate the new expression.
7935 SDValue NewOp1 = DAG.getConstant(CNV, CN->getValueType(0));
7936 SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0);
7938 SDValue NewUse = DAG.getNode(Opcode,
7939 SDLoc(OtherUses[i]),
7940 OtherUses[i]->getValueType(0), NewOp1, NewOp2);
7941 DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse);
7942 deleteAndRecombine(OtherUses[i]);
7943 }
7945 // Replace the uses of Ptr with uses of the updated base value.
7946 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0));
7947 deleteAndRecombine(Ptr.getNode());
7949 return true;
7950 }
7952 /// Try to combine a load/store with a add/sub of the base pointer node into a
7953 /// post-indexed load/store. The transformation folded the add/subtract into the
7954 /// new indexed load/store effectively and all of its uses are redirected to the
7955 /// new load/store.
7956 bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
7957 if (Level < AfterLegalizeDAG)
7958 return false;
7960 bool isLoad = true;
7961 SDValue Ptr;
7962 EVT VT;
7963 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
7964 if (LD->isIndexed())
7965 return false;
7966 VT = LD->getMemoryVT();
7967 if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
7968 !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
7969 return false;
7970 Ptr = LD->getBasePtr();
7971 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
7972 if (ST->isIndexed())
7973 return false;
7974 VT = ST->getMemoryVT();
7975 if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
7976 !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
7977 return false;
7978 Ptr = ST->getBasePtr();
7979 isLoad = false;
7980 } else {
7981 return false;
7982 }
7984 if (Ptr.getNode()->hasOneUse())
7985 return false;
7987 for (SDNode *Op : Ptr.getNode()->uses()) {
7988 if (Op == N ||
7989 (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
7990 continue;
7992 SDValue BasePtr;
7993 SDValue Offset;
7994 ISD::MemIndexedMode AM = ISD::UNINDEXED;
7995 if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
7996 // Don't create a indexed load / store with zero offset.
7997 if (isa<ConstantSDNode>(Offset) &&
7998 cast<ConstantSDNode>(Offset)->isNullValue())
7999 continue;
8001 // Try turning it into a post-indexed load / store except when
8002 // 1) All uses are load / store ops that use it as base ptr (and
8003 // it may be folded as addressing mmode).
8004 // 2) Op must be independent of N, i.e. Op is neither a predecessor
8005 // nor a successor of N. Otherwise, if Op is folded that would
8006 // create a cycle.
8008 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
8009 continue;
8011 // Check for #1.
8012 bool TryNext = false;
8013 for (SDNode *Use : BasePtr.getNode()->uses()) {
8014 if (Use == Ptr.getNode())
8015 continue;
8017 // If all the uses are load / store addresses, then don't do the
8018 // transformation.
8019 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
8020 bool RealUse = false;
8021 for (SDNode *UseUse : Use->uses()) {
8022 if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
8023 RealUse = true;
8024 }
8026 if (!RealUse) {
8027 TryNext = true;
8028 break;
8029 }
8030 }
8031 }
8033 if (TryNext)
8034 continue;
8036 // Check for #2
8037 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) {
8038 SDValue Result = isLoad
8039 ? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
8040 BasePtr, Offset, AM)
8041 : DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
8042 BasePtr, Offset, AM);
8043 ++PostIndexedNodes;
8044 ++NodesCombined;
8045 DEBUG(dbgs() << "\nReplacing.5 ";
8046 N->dump(&DAG);
8047 dbgs() << "\nWith: ";
8048 Result.getNode()->dump(&DAG);
8049 dbgs() << '\n');
8050 WorklistRemover DeadNodes(*this);
8051 if (isLoad) {
8052 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
8053 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
8054 } else {
8055 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
8056 }
8058 // Finally, since the node is now dead, remove it from the graph.
8059 deleteAndRecombine(N);
8061 // Replace the uses of Use with uses of the updated base value.
8062 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
8063 Result.getValue(isLoad ? 1 : 0));
8064 deleteAndRecombine(Op);
8065 return true;
8066 }
8067 }
8068 }
8070 return false;
8071 }
8073 /// \brief Return the base-pointer arithmetic from an indexed \p LD.
8074 SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {
8075 ISD::MemIndexedMode AM = LD->getAddressingMode();
8076 assert(AM != ISD::UNINDEXED);
8077 SDValue BP = LD->getOperand(1);
8078 SDValue Inc = LD->getOperand(2);
8080 // Some backends use TargetConstants for load offsets, but don't expect
8081 // TargetConstants in general ADD nodes. We can convert these constants into
8082 // regular Constants (if the constant is not opaque).
8083 assert((Inc.getOpcode() != ISD::TargetConstant ||
8084 !cast<ConstantSDNode>(Inc)->isOpaque()) &&
8085 "Cannot split out indexing using opaque target constants");
8086 if (Inc.getOpcode() == ISD::TargetConstant) {
8087 ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc);
8088 Inc = DAG.getConstant(*ConstInc->getConstantIntValue(),
8089 ConstInc->getValueType(0));
8090 }
8092 unsigned Opc =
8093 (AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB);
8094 return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc);
8095 }
8097 SDValue DAGCombiner::visitLOAD(SDNode *N) {
8098 LoadSDNode *LD = cast<LoadSDNode>(N);
8099 SDValue Chain = LD->getChain();
8100 SDValue Ptr = LD->getBasePtr();
8102 // If load is not volatile and there are no uses of the loaded value (and
8103 // the updated indexed value in case of indexed loads), change uses of the
8104 // chain value into uses of the chain input (i.e. delete the dead load).
8105 if (!LD->isVolatile()) {
8106 if (N->getValueType(1) == MVT::Other) {
8107 // Unindexed loads.
8108 if (!N->hasAnyUseOfValue(0)) {
8109 // It's not safe to use the two value CombineTo variant here. e.g.
8110 // v1, chain2 = load chain1, loc
8111 // v2, chain3 = load chain2, loc
8112 // v3 = add v2, c
8113 // Now we replace use of chain2 with chain1. This makes the second load
8114 // isomorphic to the one we are deleting, and thus makes this load live.
8115 DEBUG(dbgs() << "\nReplacing.6 ";
8116 N->dump(&DAG);
8117 dbgs() << "\nWith chain: ";
8118 Chain.getNode()->dump(&DAG);
8119 dbgs() << "\n");
8120 WorklistRemover DeadNodes(*this);
8121 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
8123 if (N->use_empty())
8124 deleteAndRecombine(N);
8126 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8127 }
8128 } else {
8129 // Indexed loads.
8130 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
8132 // If this load has an opaque TargetConstant offset, then we cannot split
8133 // the indexing into an add/sub directly (that TargetConstant may not be
8134 // valid for a different type of node, and we cannot convert an opaque
8135 // target constant into a regular constant).
8136 bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant &&
8137 cast<ConstantSDNode>(LD->getOperand(2))->isOpaque();
8139 if (!N->hasAnyUseOfValue(0) &&
8140 ((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) {
8141 SDValue Undef = DAG.getUNDEF(N->getValueType(0));
8142 SDValue Index;
8143 if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) {
8144 Index = SplitIndexingFromLoad(LD);
8145 // Try to fold the base pointer arithmetic into subsequent loads and
8146 // stores.
8147 AddUsersToWorklist(N);
8148 } else
8149 Index = DAG.getUNDEF(N->getValueType(1));
8150 DEBUG(dbgs() << "\nReplacing.7 ";
8151 N->dump(&DAG);
8152 dbgs() << "\nWith: ";
8153 Undef.getNode()->dump(&DAG);
8154 dbgs() << " and 2 other values\n");
8155 WorklistRemover DeadNodes(*this);
8156 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
8157 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index);
8158 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
8159 deleteAndRecombine(N);
8160 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8161 }
8162 }
8163 }
8165 // If this load is directly stored, replace the load value with the stored
8166 // value.
8167 // TODO: Handle store large -> read small portion.
8168 // TODO: Handle TRUNCSTORE/LOADEXT
8169 if (ISD::isNormalLoad(N) && !LD->isVolatile()) {
8170 if (ISD::isNON_TRUNCStore(Chain.getNode())) {
8171 StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
8172 if (PrevST->getBasePtr() == Ptr &&
8173 PrevST->getValue().getValueType() == N->getValueType(0))
8174 return CombineTo(N, Chain.getOperand(1), Chain);
8175 }
8176 }
8178 // Try to infer better alignment information than the load already has.
8179 if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
8180 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
8181 if (Align > LD->getMemOperand()->getBaseAlignment()) {
8182 SDValue NewLoad =
8183 DAG.getExtLoad(LD->getExtensionType(), SDLoc(N),
8184 LD->getValueType(0),
8185 Chain, Ptr, LD->getPointerInfo(),
8186 LD->getMemoryVT(),
8187 LD->isVolatile(), LD->isNonTemporal(),
8188 LD->isInvariant(), Align, LD->getAAInfo());
8189 return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true);
8190 }
8191 }
8192 }
8194 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA :
8195 TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA();
8196 #ifndef NDEBUG
8197 if (CombinerAAOnlyFunc.getNumOccurrences() &&
8198 CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
8199 UseAA = false;
8200 #endif
8201 if (UseAA && LD->isUnindexed()) {
8202 // Walk up chain skipping non-aliasing memory nodes.
8203 SDValue BetterChain = FindBetterChain(N, Chain);
8205 // If there is a better chain.
8206 if (Chain != BetterChain) {
8207 SDValue ReplLoad;
8209 // Replace the chain to void dependency.
8210 if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
8211 ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD),
8212 BetterChain, Ptr, LD->getMemOperand());
8213 } else {
8214 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD),
8215 LD->getValueType(0),
8216 BetterChain, Ptr, LD->getMemoryVT(),
8217 LD->getMemOperand());
8218 }
8220 // Create token factor to keep old chain connected.
8221 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
8222 MVT::Other, Chain, ReplLoad.getValue(1));
8224 // Make sure the new and old chains are cleaned up.
8225 AddToWorklist(Token.getNode());
8227 // Replace uses with load result and token factor. Don't add users
8228 // to work list.
8229 return CombineTo(N, ReplLoad.getValue(0), Token, false);
8230 }
8231 }
8233 // Try transforming N to an indexed load.
8234 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
8235 return SDValue(N, 0);
8237 // Try to slice up N to more direct loads if the slices are mapped to
8238 // different register banks or pairing can take place.
8239 if (SliceUpLoad(N))
8240 return SDValue(N, 0);
8242 return SDValue();
8243 }
8245 namespace {
8246 /// \brief Helper structure used to slice a load in smaller loads.
8247 /// Basically a slice is obtained from the following sequence:
8248 /// Origin = load Ty1, Base
8249 /// Shift = srl Ty1 Origin, CstTy Amount
8250 /// Inst = trunc Shift to Ty2
8251 ///
8252 /// Then, it will be rewriten into:
8253 /// Slice = load SliceTy, Base + SliceOffset
8254 /// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2
8255 ///
8256 /// SliceTy is deduced from the number of bits that are actually used to
8257 /// build Inst.
8258 struct LoadedSlice {
8259 /// \brief Helper structure used to compute the cost of a slice.
8260 struct Cost {
8261 /// Are we optimizing for code size.
8262 bool ForCodeSize;
8263 /// Various cost.
8264 unsigned Loads;
8265 unsigned Truncates;
8266 unsigned CrossRegisterBanksCopies;
8267 unsigned ZExts;
8268 unsigned Shift;
8270 Cost(bool ForCodeSize = false)
8271 : ForCodeSize(ForCodeSize), Loads(0), Truncates(0),
8272 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {}
8274 /// \brief Get the cost of one isolated slice.
8275 Cost(const LoadedSlice &LS, bool ForCodeSize = false)
8276 : ForCodeSize(ForCodeSize), Loads(1), Truncates(0),
8277 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {
8278 EVT TruncType = LS.Inst->getValueType(0);
8279 EVT LoadedType = LS.getLoadedType();
8280 if (TruncType != LoadedType &&
8281 !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType))
8282 ZExts = 1;
8283 }
8285 /// \brief Account for slicing gain in the current cost.
8286 /// Slicing provide a few gains like removing a shift or a
8287 /// truncate. This method allows to grow the cost of the original
8288 /// load with the gain from this slice.
8289 void addSliceGain(const LoadedSlice &LS) {
8290 // Each slice saves a truncate.
8291 const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo();
8292 if (!TLI.isTruncateFree(LS.Inst->getValueType(0),
8293 LS.Inst->getOperand(0).getValueType()))
8294 ++Truncates;
8295 // If there is a shift amount, this slice gets rid of it.
8296 if (LS.Shift)
8297 ++Shift;
8298 // If this slice can merge a cross register bank copy, account for it.
8299 if (LS.canMergeExpensiveCrossRegisterBankCopy())
8300 ++CrossRegisterBanksCopies;
8301 }
8303 Cost &operator+=(const Cost &RHS) {
8304 Loads += RHS.Loads;
8305 Truncates += RHS.Truncates;
8306 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies;
8307 ZExts += RHS.ZExts;
8308 Shift += RHS.Shift;
8309 return *this;
8310 }
8312 bool operator==(const Cost &RHS) const {
8313 return Loads == RHS.Loads && Truncates == RHS.Truncates &&
8314 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies &&
8315 ZExts == RHS.ZExts && Shift == RHS.Shift;
8316 }
8318 bool operator!=(const Cost &RHS) const { return !(*this == RHS); }
8320 bool operator<(const Cost &RHS) const {
8321 // Assume cross register banks copies are as expensive as loads.
8322 // FIXME: Do we want some more target hooks?
8323 unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies;
8324 unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies;
8325 // Unless we are optimizing for code size, consider the
8326 // expensive operation first.
8327 if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS)
8328 return ExpensiveOpsLHS < ExpensiveOpsRHS;
8329 return (Truncates + ZExts + Shift + ExpensiveOpsLHS) <
8330 (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS);
8331 }
8333 bool operator>(const Cost &RHS) const { return RHS < *this; }
8335 bool operator<=(const Cost &RHS) const { return !(RHS < *this); }
8337 bool operator>=(const Cost &RHS) const { return !(*this < RHS); }
8338 };
8339 // The last instruction that represent the slice. This should be a
8340 // truncate instruction.
8341 SDNode *Inst;
8342 // The original load instruction.
8343 LoadSDNode *Origin;
8344 // The right shift amount in bits from the original load.
8345 unsigned Shift;
8346 // The DAG from which Origin came from.
8347 // This is used to get some contextual information about legal types, etc.
8348 SelectionDAG *DAG;
8350 LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr,
8351 unsigned Shift = 0, SelectionDAG *DAG = nullptr)
8352 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}
8354 LoadedSlice(const LoadedSlice &LS)
8355 : Inst(LS.Inst), Origin(LS.Origin), Shift(LS.Shift), DAG(LS.DAG) {}
8357 /// \brief Get the bits used in a chunk of bits \p BitWidth large.
8358 /// \return Result is \p BitWidth and has used bits set to 1 and
8359 /// not used bits set to 0.
8360 APInt getUsedBits() const {
8361 // Reproduce the trunc(lshr) sequence:
8362 // - Start from the truncated value.
8363 // - Zero extend to the desired bit width.
8364 // - Shift left.
8365 assert(Origin && "No original load to compare against.");
8366 unsigned BitWidth = Origin->getValueSizeInBits(0);
8367 assert(Inst && "This slice is not bound to an instruction");
8368 assert(Inst->getValueSizeInBits(0) <= BitWidth &&
8369 "Extracted slice is bigger than the whole type!");
8370 APInt UsedBits(Inst->getValueSizeInBits(0), 0);
8371 UsedBits.setAllBits();
8372 UsedBits = UsedBits.zext(BitWidth);
8373 UsedBits <<= Shift;
8374 return UsedBits;
8375 }
8377 /// \brief Get the size of the slice to be loaded in bytes.
8378 unsigned getLoadedSize() const {
8379 unsigned SliceSize = getUsedBits().countPopulation();
8380 assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte.");
8381 return SliceSize / 8;
8382 }
8384 /// \brief Get the type that will be loaded for this slice.
8385 /// Note: This may not be the final type for the slice.
8386 EVT getLoadedType() const {
8387 assert(DAG && "Missing context");
8388 LLVMContext &Ctxt = *DAG->getContext();
8389 return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8);
8390 }
8392 /// \brief Get the alignment of the load used for this slice.
8393 unsigned getAlignment() const {
8394 unsigned Alignment = Origin->getAlignment();
8395 unsigned Offset = getOffsetFromBase();
8396 if (Offset != 0)
8397 Alignment = MinAlign(Alignment, Alignment + Offset);
8398 return Alignment;
8399 }
8401 /// \brief Check if this slice can be rewritten with legal operations.
8402 bool isLegal() const {
8403 // An invalid slice is not legal.
8404 if (!Origin || !Inst || !DAG)
8405 return false;
8407 // Offsets are for indexed load only, we do not handle that.
8408 if (Origin->getOffset().getOpcode() != ISD::UNDEF)
8409 return false;
8411 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
8413 // Check that the type is legal.
8414 EVT SliceType = getLoadedType();
8415 if (!TLI.isTypeLegal(SliceType))
8416 return false;
8418 // Check that the load is legal for this type.
8419 if (!TLI.isOperationLegal(ISD::LOAD, SliceType))
8420 return false;
8422 // Check that the offset can be computed.
8423 // 1. Check its type.
8424 EVT PtrType = Origin->getBasePtr().getValueType();
8425 if (PtrType == MVT::Untyped || PtrType.isExtended())
8426 return false;
8428 // 2. Check that it fits in the immediate.
8429 if (!TLI.isLegalAddImmediate(getOffsetFromBase()))
8430 return false;
8432 // 3. Check that the computation is legal.
8433 if (!TLI.isOperationLegal(ISD::ADD, PtrType))
8434 return false;
8436 // Check that the zext is legal if it needs one.
8437 EVT TruncateType = Inst->getValueType(0);
8438 if (TruncateType != SliceType &&
8439 !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType))
8440 return false;
8442 return true;
8443 }
8445 /// \brief Get the offset in bytes of this slice in the original chunk of
8446 /// bits.
8447 /// \pre DAG != nullptr.
8448 uint64_t getOffsetFromBase() const {
8449 assert(DAG && "Missing context.");
8450 bool IsBigEndian =
8451 DAG->getTargetLoweringInfo().getDataLayout()->isBigEndian();
8452 assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.");
8453 uint64_t Offset = Shift / 8;
8454 unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
8455 assert(!(Origin->getValueSizeInBits(0) & 0x7) &&
8456 "The size of the original loaded type is not a multiple of a"
8457 " byte.");
8458 // If Offset is bigger than TySizeInBytes, it means we are loading all
8459 // zeros. This should have been optimized before in the process.
8460 assert(TySizeInBytes > Offset &&
8461 "Invalid shift amount for given loaded size");
8462 if (IsBigEndian)
8463 Offset = TySizeInBytes - Offset - getLoadedSize();
8464 return Offset;
8465 }
8467 /// \brief Generate the sequence of instructions to load the slice
8468 /// represented by this object and redirect the uses of this slice to
8469 /// this new sequence of instructions.
8470 /// \pre this->Inst && this->Origin are valid Instructions and this
8471 /// object passed the legal check: LoadedSlice::isLegal returned true.
8472 /// \return The last instruction of the sequence used to load the slice.
8473 SDValue loadSlice() const {
8474 assert(Inst && Origin && "Unable to replace a non-existing slice.");
8475 const SDValue &OldBaseAddr = Origin->getBasePtr();
8476 SDValue BaseAddr = OldBaseAddr;
8477 // Get the offset in that chunk of bytes w.r.t. the endianess.
8478 int64_t Offset = static_cast<int64_t>(getOffsetFromBase());
8479 assert(Offset >= 0 && "Offset too big to fit in int64_t!");
8480 if (Offset) {
8481 // BaseAddr = BaseAddr + Offset.
8482 EVT ArithType = BaseAddr.getValueType();
8483 BaseAddr = DAG->getNode(ISD::ADD, SDLoc(Origin), ArithType, BaseAddr,
8484 DAG->getConstant(Offset, ArithType));
8485 }
8487 // Create the type of the loaded slice according to its size.
8488 EVT SliceType = getLoadedType();
8490 // Create the load for the slice.
8491 SDValue LastInst = DAG->getLoad(
8492 SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr,
8493 Origin->getPointerInfo().getWithOffset(Offset), Origin->isVolatile(),
8494 Origin->isNonTemporal(), Origin->isInvariant(), getAlignment());
8495 // If the final type is not the same as the loaded type, this means that
8496 // we have to pad with zero. Create a zero extend for that.
8497 EVT FinalType = Inst->getValueType(0);
8498 if (SliceType != FinalType)
8499 LastInst =
8500 DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst);
8501 return LastInst;
8502 }
8504 /// \brief Check if this slice can be merged with an expensive cross register
8505 /// bank copy. E.g.,
8506 /// i = load i32
8507 /// f = bitcast i32 i to float
8508 bool canMergeExpensiveCrossRegisterBankCopy() const {
8509 if (!Inst || !Inst->hasOneUse())
8510 return false;
8511 SDNode *Use = *Inst->use_begin();
8512 if (Use->getOpcode() != ISD::BITCAST)
8513 return false;
8514 assert(DAG && "Missing context");
8515 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
8516 EVT ResVT = Use->getValueType(0);
8517 const TargetRegisterClass *ResRC = TLI.getRegClassFor(ResVT.getSimpleVT());
8518 const TargetRegisterClass *ArgRC =
8519 TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT());
8520 if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT))
8521 return false;
8523 // At this point, we know that we perform a cross-register-bank copy.
8524 // Check if it is expensive.
8525 const TargetRegisterInfo *TRI =
8526 TLI.getTargetMachine().getSubtargetImpl()->getRegisterInfo();
8527 // Assume bitcasts are cheap, unless both register classes do not
8528 // explicitly share a common sub class.
8529 if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC))
8530 return false;
8532 // Check if it will be merged with the load.
8533 // 1. Check the alignment constraint.
8534 unsigned RequiredAlignment = TLI.getDataLayout()->getABITypeAlignment(
8535 ResVT.getTypeForEVT(*DAG->getContext()));
8537 if (RequiredAlignment > getAlignment())
8538 return false;
8540 // 2. Check that the load is a legal operation for that type.
8541 if (!TLI.isOperationLegal(ISD::LOAD, ResVT))
8542 return false;
8544 // 3. Check that we do not have a zext in the way.
8545 if (Inst->getValueType(0) != getLoadedType())
8546 return false;
8548 return true;
8549 }
8550 };
8551 }
8553 /// \brief Check that all bits set in \p UsedBits form a dense region, i.e.,
8554 /// \p UsedBits looks like 0..0 1..1 0..0.
8555 static bool areUsedBitsDense(const APInt &UsedBits) {
8556 // If all the bits are one, this is dense!
8557 if (UsedBits.isAllOnesValue())
8558 return true;
8560 // Get rid of the unused bits on the right.
8561 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros());
8562 // Get rid of the unused bits on the left.
8563 if (NarrowedUsedBits.countLeadingZeros())
8564 NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits());
8565 // Check that the chunk of bits is completely used.
8566 return NarrowedUsedBits.isAllOnesValue();
8567 }
8569 /// \brief Check whether or not \p First and \p Second are next to each other
8570 /// in memory. This means that there is no hole between the bits loaded
8571 /// by \p First and the bits loaded by \p Second.
8572 static bool areSlicesNextToEachOther(const LoadedSlice &First,
8573 const LoadedSlice &Second) {
8574 assert(First.Origin == Second.Origin && First.Origin &&
8575 "Unable to match different memory origins.");
8576 APInt UsedBits = First.getUsedBits();
8577 assert((UsedBits & Second.getUsedBits()) == 0 &&
8578 "Slices are not supposed to overlap.");
8579 UsedBits |= Second.getUsedBits();
8580 return areUsedBitsDense(UsedBits);
8581 }
8583 /// \brief Adjust the \p GlobalLSCost according to the target
8584 /// paring capabilities and the layout of the slices.
8585 /// \pre \p GlobalLSCost should account for at least as many loads as
8586 /// there is in the slices in \p LoadedSlices.
8587 static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
8588 LoadedSlice::Cost &GlobalLSCost) {
8589 unsigned NumberOfSlices = LoadedSlices.size();
8590 // If there is less than 2 elements, no pairing is possible.
8591 if (NumberOfSlices < 2)
8592 return;
8594 // Sort the slices so that elements that are likely to be next to each
8595 // other in memory are next to each other in the list.
8596 std::sort(LoadedSlices.begin(), LoadedSlices.end(),
8597 [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
8598 assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
8599 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
8600 });
8601 const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
8602 // First (resp. Second) is the first (resp. Second) potentially candidate
8603 // to be placed in a paired load.
8604 const LoadedSlice *First = nullptr;
8605 const LoadedSlice *Second = nullptr;
8606 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice,
8607 // Set the beginning of the pair.
8608 First = Second) {
8610 Second = &LoadedSlices[CurrSlice];
8612 // If First is NULL, it means we start a new pair.
8613 // Get to the next slice.
8614 if (!First)
8615 continue;
8617 EVT LoadedType = First->getLoadedType();
8619 // If the types of the slices are different, we cannot pair them.
8620 if (LoadedType != Second->getLoadedType())
8621 continue;
8623 // Check if the target supplies paired loads for this type.
8624 unsigned RequiredAlignment = 0;
8625 if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) {
8626 // move to the next pair, this type is hopeless.
8627 Second = nullptr;
8628 continue;
8629 }
8630 // Check if we meet the alignment requirement.
8631 if (RequiredAlignment > First->getAlignment())
8632 continue;
8634 // Check that both loads are next to each other in memory.
8635 if (!areSlicesNextToEachOther(*First, *Second))
8636 continue;
8638 assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!");
8639 --GlobalLSCost.Loads;
8640 // Move to the next pair.
8641 Second = nullptr;
8642 }
8643 }
8645 /// \brief Check the profitability of all involved LoadedSlice.
8646 /// Currently, it is considered profitable if there is exactly two
8647 /// involved slices (1) which are (2) next to each other in memory, and
8648 /// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3).
8649 ///
8650 /// Note: The order of the elements in \p LoadedSlices may be modified, but not
8651 /// the elements themselves.
8652 ///
8653 /// FIXME: When the cost model will be mature enough, we can relax
8654 /// constraints (1) and (2).
8655 static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices,
8656 const APInt &UsedBits, bool ForCodeSize) {
8657 unsigned NumberOfSlices = LoadedSlices.size();
8658 if (StressLoadSlicing)
8659 return NumberOfSlices > 1;
8661 // Check (1).
8662 if (NumberOfSlices != 2)
8663 return false;
8665 // Check (2).
8666 if (!areUsedBitsDense(UsedBits))
8667 return false;
8669 // Check (3).
8670 LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize);
8671 // The original code has one big load.
8672 OrigCost.Loads = 1;
8673 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) {
8674 const LoadedSlice &LS = LoadedSlices[CurrSlice];
8675 // Accumulate the cost of all the slices.
8676 LoadedSlice::Cost SliceCost(LS, ForCodeSize);
8677 GlobalSlicingCost += SliceCost;
8679 // Account as cost in the original configuration the gain obtained
8680 // with the current slices.
8681 OrigCost.addSliceGain(LS);
8682 }
8684 // If the target supports paired load, adjust the cost accordingly.
8685 adjustCostForPairing(LoadedSlices, GlobalSlicingCost);
8686 return OrigCost > GlobalSlicingCost;
8687 }
8689 /// \brief If the given load, \p LI, is used only by trunc or trunc(lshr)
8690 /// operations, split it in the various pieces being extracted.
8691 ///
8692 /// This sort of thing is introduced by SROA.
8693 /// This slicing takes care not to insert overlapping loads.
8694 /// \pre LI is a simple load (i.e., not an atomic or volatile load).
8695 bool DAGCombiner::SliceUpLoad(SDNode *N) {
8696 if (Level < AfterLegalizeDAG)
8697 return false;
8699 LoadSDNode *LD = cast<LoadSDNode>(N);
8700 if (LD->isVolatile() || !ISD::isNormalLoad(LD) ||
8701 !LD->getValueType(0).isInteger())
8702 return false;
8704 // Keep track of already used bits to detect overlapping values.
8705 // In that case, we will just abort the transformation.
8706 APInt UsedBits(LD->getValueSizeInBits(0), 0);
8708 SmallVector<LoadedSlice, 4> LoadedSlices;
8710 // Check if this load is used as several smaller chunks of bits.
8711 // Basically, look for uses in trunc or trunc(lshr) and record a new chain
8712 // of computation for each trunc.
8713 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end();
8714 UI != UIEnd; ++UI) {
8715 // Skip the uses of the chain.
8716 if (UI.getUse().getResNo() != 0)
8717 continue;
8719 SDNode *User = *UI;
8720 unsigned Shift = 0;
8722 // Check if this is a trunc(lshr).
8723 if (User->getOpcode() == ISD::SRL && User->hasOneUse() &&
8724 isa<ConstantSDNode>(User->getOperand(1))) {
8725 Shift = cast<ConstantSDNode>(User->getOperand(1))->getZExtValue();
8726 User = *User->use_begin();
8727 }
8729 // At this point, User is a Truncate, iff we encountered, trunc or
8730 // trunc(lshr).
8731 if (User->getOpcode() != ISD::TRUNCATE)
8732 return false;
8734 // The width of the type must be a power of 2 and greater than 8-bits.
8735 // Otherwise the load cannot be represented in LLVM IR.
8736 // Moreover, if we shifted with a non-8-bits multiple, the slice
8737 // will be across several bytes. We do not support that.
8738 unsigned Width = User->getValueSizeInBits(0);
8739 if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
8740 return 0;
8742 // Build the slice for this chain of computations.
8743 LoadedSlice LS(User, LD, Shift, &DAG);
8744 APInt CurrentUsedBits = LS.getUsedBits();
8746 // Check if this slice overlaps with another.
8747 if ((CurrentUsedBits & UsedBits) != 0)
8748 return false;
8749 // Update the bits used globally.
8750 UsedBits |= CurrentUsedBits;
8752 // Check if the new slice would be legal.
8753 if (!LS.isLegal())
8754 return false;
8756 // Record the slice.
8757 LoadedSlices.push_back(LS);
8758 }
8760 // Abort slicing if it does not seem to be profitable.
8761 if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize))
8762 return false;
8764 ++SlicedLoads;
8766 // Rewrite each chain to use an independent load.
8767 // By construction, each chain can be represented by a unique load.
8769 // Prepare the argument for the new token factor for all the slices.
8770 SmallVector<SDValue, 8> ArgChains;
8771 for (SmallVectorImpl<LoadedSlice>::const_iterator
8772 LSIt = LoadedSlices.begin(),
8773 LSItEnd = LoadedSlices.end();
8774 LSIt != LSItEnd; ++LSIt) {
8775 SDValue SliceInst = LSIt->loadSlice();
8776 CombineTo(LSIt->Inst, SliceInst, true);
8777 if (SliceInst.getNode()->getOpcode() != ISD::LOAD)
8778 SliceInst = SliceInst.getOperand(0);
8779 assert(SliceInst->getOpcode() == ISD::LOAD &&
8780 "It takes more than a zext to get to the loaded slice!!");
8781 ArgChains.push_back(SliceInst.getValue(1));
8782 }
8784 SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
8785 ArgChains);
8786 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
8787 return true;
8788 }
8790 /// Check to see if V is (and load (ptr), imm), where the load is having
8791 /// specific bytes cleared out. If so, return the byte size being masked out
8792 /// and the shift amount.
8793 static std::pair<unsigned, unsigned>
8794 CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
8795 std::pair<unsigned, unsigned> Result(0, 0);
8797 // Check for the structure we're looking for.
8798 if (V->getOpcode() != ISD::AND ||
8799 !isa<ConstantSDNode>(V->getOperand(1)) ||
8800 !ISD::isNormalLoad(V->getOperand(0).getNode()))
8801 return Result;
8803 // Check the chain and pointer.
8804 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
8805 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
8807 // The store should be chained directly to the load or be an operand of a
8808 // tokenfactor.
8809 if (LD == Chain.getNode())
8810 ; // ok.
8811 else if (Chain->getOpcode() != ISD::TokenFactor)
8812 return Result; // Fail.
8813 else {
8814 bool isOk = false;
8815 for (unsigned i = 0, e = Chain->getNumOperands(); i != e; ++i)
8816 if (Chain->getOperand(i).getNode() == LD) {
8817 isOk = true;
8818 break;
8819 }
8820 if (!isOk) return Result;
8821 }
8823 // This only handles simple types.
8824 if (V.getValueType() != MVT::i16 &&
8825 V.getValueType() != MVT::i32 &&
8826 V.getValueType() != MVT::i64)
8827 return Result;
8829 // Check the constant mask. Invert it so that the bits being masked out are
8830 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits
8831 // follow the sign bit for uniformity.
8832 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
8833 unsigned NotMaskLZ = countLeadingZeros(NotMask);
8834 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
8835 unsigned NotMaskTZ = countTrailingZeros(NotMask);
8836 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
8837 if (NotMaskLZ == 64) return Result; // All zero mask.
8839 // See if we have a continuous run of bits. If so, we have 0*1+0*
8840 if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64)
8841 return Result;
8843 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
8844 if (V.getValueType() != MVT::i64 && NotMaskLZ)
8845 NotMaskLZ -= 64-V.getValueSizeInBits();
8847 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
8848 switch (MaskedBytes) {
8849 case 1:
8850 case 2:
8851 case 4: break;
8852 default: return Result; // All one mask, or 5-byte mask.
8853 }
8855 // Verify that the first bit starts at a multiple of mask so that the access
8856 // is aligned the same as the access width.
8857 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
8859 Result.first = MaskedBytes;
8860 Result.second = NotMaskTZ/8;
8861 return Result;
8862 }
8865 /// Check to see if IVal is something that provides a value as specified by
8866 /// MaskInfo. If so, replace the specified store with a narrower store of
8867 /// truncated IVal.
8868 static SDNode *
8869 ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
8870 SDValue IVal, StoreSDNode *St,
8871 DAGCombiner *DC) {
8872 unsigned NumBytes = MaskInfo.first;
8873 unsigned ByteShift = MaskInfo.second;
8874 SelectionDAG &DAG = DC->getDAG();
8876 // Check to see if IVal is all zeros in the part being masked in by the 'or'
8877 // that uses this. If not, this is not a replacement.
8878 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
8879 ByteShift*8, (ByteShift+NumBytes)*8);
8880 if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr;
8882 // Check that it is legal on the target to do this. It is legal if the new
8883 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
8884 // legalization.
8885 MVT VT = MVT::getIntegerVT(NumBytes*8);
8886 if (!DC->isTypeLegal(VT))
8887 return nullptr;
8889 // Okay, we can do this! Replace the 'St' store with a store of IVal that is
8890 // shifted by ByteShift and truncated down to NumBytes.
8891 if (ByteShift)
8892 IVal = DAG.getNode(ISD::SRL, SDLoc(IVal), IVal.getValueType(), IVal,
8893 DAG.getConstant(ByteShift*8,
8894 DC->getShiftAmountTy(IVal.getValueType())));
8896 // Figure out the offset for the store and the alignment of the access.
8897 unsigned StOffset;
8898 unsigned NewAlign = St->getAlignment();
8900 if (DAG.getTargetLoweringInfo().isLittleEndian())
8901 StOffset = ByteShift;
8902 else
8903 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
8905 SDValue Ptr = St->getBasePtr();
8906 if (StOffset) {
8907 Ptr = DAG.getNode(ISD::ADD, SDLoc(IVal), Ptr.getValueType(),
8908 Ptr, DAG.getConstant(StOffset, Ptr.getValueType()));
8909 NewAlign = MinAlign(NewAlign, StOffset);
8910 }
8912 // Truncate down to the new size.
8913 IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal);
8915 ++OpsNarrowed;
8916 return DAG.getStore(St->getChain(), SDLoc(St), IVal, Ptr,
8917 St->getPointerInfo().getWithOffset(StOffset),
8918 false, false, NewAlign).getNode();
8919 }
8922 /// Look for sequence of load / op / store where op is one of 'or', 'xor', and
8923 /// 'and' of immediates. If 'op' is only touching some of the loaded bits, try
8924 /// narrowing the load and store if it would end up being a win for performance
8925 /// or code size.
8926 SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
8927 StoreSDNode *ST = cast<StoreSDNode>(N);
8928 if (ST->isVolatile())
8929 return SDValue();
8931 SDValue Chain = ST->getChain();
8932 SDValue Value = ST->getValue();
8933 SDValue Ptr = ST->getBasePtr();
8934 EVT VT = Value.getValueType();
8936 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
8937 return SDValue();
8939 unsigned Opc = Value.getOpcode();
8941 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
8942 // is a byte mask indicating a consecutive number of bytes, check to see if
8943 // Y is known to provide just those bytes. If so, we try to replace the
8944 // load + replace + store sequence with a single (narrower) store, which makes
8945 // the load dead.
8946 if (Opc == ISD::OR) {
8947 std::pair<unsigned, unsigned> MaskedLoad;
8948 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
8949 if (MaskedLoad.first)
8950 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
8951 Value.getOperand(1), ST,this))
8952 return SDValue(NewST, 0);
8954 // Or is commutative, so try swapping X and Y.
8955 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
8956 if (MaskedLoad.first)
8957 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
8958 Value.getOperand(0), ST,this))
8959 return SDValue(NewST, 0);
8960 }
8962 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
8963 Value.getOperand(1).getOpcode() != ISD::Constant)
8964 return SDValue();
8966 SDValue N0 = Value.getOperand(0);
8967 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
8968 Chain == SDValue(N0.getNode(), 1)) {
8969 LoadSDNode *LD = cast<LoadSDNode>(N0);
8970 if (LD->getBasePtr() != Ptr ||
8971 LD->getPointerInfo().getAddrSpace() !=
8972 ST->getPointerInfo().getAddrSpace())
8973 return SDValue();
8975 // Find the type to narrow it the load / op / store to.
8976 SDValue N1 = Value.getOperand(1);
8977 unsigned BitWidth = N1.getValueSizeInBits();
8978 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
8979 if (Opc == ISD::AND)
8980 Imm ^= APInt::getAllOnesValue(BitWidth);
8981 if (Imm == 0 || Imm.isAllOnesValue())
8982 return SDValue();
8983 unsigned ShAmt = Imm.countTrailingZeros();
8984 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
8985 unsigned NewBW = NextPowerOf2(MSB - ShAmt);
8986 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
8987 while (NewBW < BitWidth &&
8988 !(TLI.isOperationLegalOrCustom(Opc, NewVT) &&
8989 TLI.isNarrowingProfitable(VT, NewVT))) {
8990 NewBW = NextPowerOf2(NewBW);
8991 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
8992 }
8993 if (NewBW >= BitWidth)
8994 return SDValue();
8996 // If the lsb changed does not start at the type bitwidth boundary,
8997 // start at the previous one.
8998 if (ShAmt % NewBW)
8999 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
9000 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt,
9001 std::min(BitWidth, ShAmt + NewBW));
9002 if ((Imm & Mask) == Imm) {
9003 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
9004 if (Opc == ISD::AND)
9005 NewImm ^= APInt::getAllOnesValue(NewBW);
9006 uint64_t PtrOff = ShAmt / 8;
9007 // For big endian targets, we need to adjust the offset to the pointer to
9008 // load the correct bytes.
9009 if (TLI.isBigEndian())
9010 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
9012 unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
9013 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
9014 if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy))
9015 return SDValue();
9017 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
9018 Ptr.getValueType(), Ptr,
9019 DAG.getConstant(PtrOff, Ptr.getValueType()));
9020 SDValue NewLD = DAG.getLoad(NewVT, SDLoc(N0),
9021 LD->getChain(), NewPtr,
9022 LD->getPointerInfo().getWithOffset(PtrOff),
9023 LD->isVolatile(), LD->isNonTemporal(),
9024 LD->isInvariant(), NewAlign,
9025 LD->getAAInfo());
9026 SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD,
9027 DAG.getConstant(NewImm, NewVT));
9028 SDValue NewST = DAG.getStore(Chain, SDLoc(N),
9029 NewVal, NewPtr,
9030 ST->getPointerInfo().getWithOffset(PtrOff),
9031 false, false, NewAlign);
9033 AddToWorklist(NewPtr.getNode());
9034 AddToWorklist(NewLD.getNode());
9035 AddToWorklist(NewVal.getNode());
9036 WorklistRemover DeadNodes(*this);
9037 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
9038 ++OpsNarrowed;
9039 return NewST;
9040 }
9041 }
9043 return SDValue();
9044 }
9046 /// For a given floating point load / store pair, if the load value isn't used
9047 /// by any other operations, then consider transforming the pair to integer
9048 /// load / store operations if the target deems the transformation profitable.
9049 SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
9050 StoreSDNode *ST = cast<StoreSDNode>(N);
9051 SDValue Chain = ST->getChain();
9052 SDValue Value = ST->getValue();
9053 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) &&
9054 Value.hasOneUse() &&
9055 Chain == SDValue(Value.getNode(), 1)) {
9056 LoadSDNode *LD = cast<LoadSDNode>(Value);
9057 EVT VT = LD->getMemoryVT();
9058 if (!VT.isFloatingPoint() ||
9059 VT != ST->getMemoryVT() ||
9060 LD->isNonTemporal() ||
9061 ST->isNonTemporal() ||
9062 LD->getPointerInfo().getAddrSpace() != 0 ||
9063 ST->getPointerInfo().getAddrSpace() != 0)
9064 return SDValue();
9066 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
9067 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) ||
9068 !TLI.isOperationLegal(ISD::STORE, IntVT) ||
9069 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) ||
9070 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT))
9071 return SDValue();
9073 unsigned LDAlign = LD->getAlignment();
9074 unsigned STAlign = ST->getAlignment();
9075 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
9076 unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy);
9077 if (LDAlign < ABIAlign || STAlign < ABIAlign)
9078 return SDValue();
9080 SDValue NewLD = DAG.getLoad(IntVT, SDLoc(Value),
9081 LD->getChain(), LD->getBasePtr(),
9082 LD->getPointerInfo(),
9083 false, false, false, LDAlign);
9085 SDValue NewST = DAG.getStore(NewLD.getValue(1), SDLoc(N),
9086 NewLD, ST->getBasePtr(),
9087 ST->getPointerInfo(),
9088 false, false, STAlign);
9090 AddToWorklist(NewLD.getNode());
9091 AddToWorklist(NewST.getNode());
9092 WorklistRemover DeadNodes(*this);
9093 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
9094 ++LdStFP2Int;
9095 return NewST;
9096 }
9098 return SDValue();
9099 }
9101 /// Helper struct to parse and store a memory address as base + index + offset.
9102 /// We ignore sign extensions when it is safe to do so.
9103 /// The following two expressions are not equivalent. To differentiate we need
9104 /// to store whether there was a sign extension involved in the index
9105 /// computation.
9106 /// (load (i64 add (i64 copyfromreg %c)
9107 /// (i64 signextend (add (i8 load %index)
9108 /// (i8 1))))
9109 /// vs
9110 ///
9111 /// (load (i64 add (i64 copyfromreg %c)
9112 /// (i64 signextend (i32 add (i32 signextend (i8 load %index))
9113 /// (i32 1)))))
9114 struct BaseIndexOffset {
9115 SDValue Base;
9116 SDValue Index;
9117 int64_t Offset;
9118 bool IsIndexSignExt;
9120 BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {}
9122 BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset,
9123 bool IsIndexSignExt) :
9124 Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {}
9126 bool equalBaseIndex(const BaseIndexOffset &Other) {
9127 return Other.Base == Base && Other.Index == Index &&
9128 Other.IsIndexSignExt == IsIndexSignExt;
9129 }
9131 /// Parses tree in Ptr for base, index, offset addresses.
9132 static BaseIndexOffset match(SDValue Ptr) {
9133 bool IsIndexSignExt = false;
9135 // We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD
9136 // instruction, then it could be just the BASE or everything else we don't
9137 // know how to handle. Just use Ptr as BASE and give up.
9138 if (Ptr->getOpcode() != ISD::ADD)
9139 return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt);
9141 // We know that we have at least an ADD instruction. Try to pattern match
9142 // the simple case of BASE + OFFSET.
9143 if (isa<ConstantSDNode>(Ptr->getOperand(1))) {
9144 int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue();
9145 return BaseIndexOffset(Ptr->getOperand(0), SDValue(), Offset,
9146 IsIndexSignExt);
9147 }
9149 // Inside a loop the current BASE pointer is calculated using an ADD and a
9150 // MUL instruction. In this case Ptr is the actual BASE pointer.
9151 // (i64 add (i64 %array_ptr)
9152 // (i64 mul (i64 %induction_var)
9153 // (i64 %element_size)))
9154 if (Ptr->getOperand(1)->getOpcode() == ISD::MUL)
9155 return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt);
9157 // Look at Base + Index + Offset cases.
9158 SDValue Base = Ptr->getOperand(0);
9159 SDValue IndexOffset = Ptr->getOperand(1);
9161 // Skip signextends.
9162 if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) {
9163 IndexOffset = IndexOffset->getOperand(0);
9164 IsIndexSignExt = true;
9165 }
9167 // Either the case of Base + Index (no offset) or something else.
9168 if (IndexOffset->getOpcode() != ISD::ADD)
9169 return BaseIndexOffset(Base, IndexOffset, 0, IsIndexSignExt);
9171 // Now we have the case of Base + Index + offset.
9172 SDValue Index = IndexOffset->getOperand(0);
9173 SDValue Offset = IndexOffset->getOperand(1);
9175 if (!isa<ConstantSDNode>(Offset))
9176 return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt);
9178 // Ignore signextends.
9179 if (Index->getOpcode() == ISD::SIGN_EXTEND) {
9180 Index = Index->getOperand(0);
9181 IsIndexSignExt = true;
9182 } else IsIndexSignExt = false;
9184 int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue();
9185 return BaseIndexOffset(Base, Index, Off, IsIndexSignExt);
9186 }
9187 };
9189 /// Holds a pointer to an LSBaseSDNode as well as information on where it
9190 /// is located in a sequence of memory operations connected by a chain.
9191 struct MemOpLink {
9192 MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq):
9193 MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { }
9194 // Ptr to the mem node.
9195 LSBaseSDNode *MemNode;
9196 // Offset from the base ptr.
9197 int64_t OffsetFromBase;
9198 // What is the sequence number of this mem node.
9199 // Lowest mem operand in the DAG starts at zero.
9200 unsigned SequenceNum;
9201 };
9203 bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
9204 EVT MemVT = St->getMemoryVT();
9205 int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
9206 bool NoVectors = DAG.getMachineFunction().getFunction()->getAttributes().
9207 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
9209 // Don't merge vectors into wider inputs.
9210 if (MemVT.isVector() || !MemVT.isSimple())
9211 return false;
9213 // Perform an early exit check. Do not bother looking at stored values that
9214 // are not constants or loads.
9215 SDValue StoredVal = St->getValue();
9216 bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
9217 if (!isa<ConstantSDNode>(StoredVal) && !isa<ConstantFPSDNode>(StoredVal) &&
9218 !IsLoadSrc)
9219 return false;
9221 // Only look at ends of store sequences.
9222 SDValue Chain = SDValue(St, 0);
9223 if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
9224 return false;
9226 // This holds the base pointer, index, and the offset in bytes from the base
9227 // pointer.
9228 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr());
9230 // We must have a base and an offset.
9231 if (!BasePtr.Base.getNode())
9232 return false;
9234 // Do not handle stores to undef base pointers.
9235 if (BasePtr.Base.getOpcode() == ISD::UNDEF)
9236 return false;
9238 // Save the LoadSDNodes that we find in the chain.
9239 // We need to make sure that these nodes do not interfere with
9240 // any of the store nodes.
9241 SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
9243 // Save the StoreSDNodes that we find in the chain.
9244 SmallVector<MemOpLink, 8> StoreNodes;
9246 // Walk up the chain and look for nodes with offsets from the same
9247 // base pointer. Stop when reaching an instruction with a different kind
9248 // or instruction which has a different base pointer.
9249 unsigned Seq = 0;
9250 StoreSDNode *Index = St;
9251 while (Index) {
9252 // If the chain has more than one use, then we can't reorder the mem ops.
9253 if (Index != St && !SDValue(Index, 0)->hasOneUse())
9254 break;
9256 // Find the base pointer and offset for this memory node.
9257 BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr());
9259 // Check that the base pointer is the same as the original one.
9260 if (!Ptr.equalBaseIndex(BasePtr))
9261 break;
9263 // Check that the alignment is the same.
9264 if (Index->getAlignment() != St->getAlignment())
9265 break;
9267 // The memory operands must not be volatile.
9268 if (Index->isVolatile() || Index->isIndexed())
9269 break;
9271 // No truncation.
9272 if (StoreSDNode *St = dyn_cast<StoreSDNode>(Index))
9273 if (St->isTruncatingStore())
9274 break;
9276 // The stored memory type must be the same.
9277 if (Index->getMemoryVT() != MemVT)
9278 break;
9280 // We do not allow unaligned stores because we want to prevent overriding
9281 // stores.
9282 if (Index->getAlignment()*8 != MemVT.getSizeInBits())
9283 break;
9285 // We found a potential memory operand to merge.
9286 StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++));
9288 // Find the next memory operand in the chain. If the next operand in the
9289 // chain is a store then move up and continue the scan with the next
9290 // memory operand. If the next operand is a load save it and use alias
9291 // information to check if it interferes with anything.
9292 SDNode *NextInChain = Index->getChain().getNode();
9293 while (1) {
9294 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) {
9295 // We found a store node. Use it for the next iteration.
9296 Index = STn;
9297 break;
9298 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) {
9299 if (Ldn->isVolatile()) {
9300 Index = nullptr;
9301 break;
9302 }
9304 // Save the load node for later. Continue the scan.
9305 AliasLoadNodes.push_back(Ldn);
9306 NextInChain = Ldn->getChain().getNode();
9307 continue;
9308 } else {
9309 Index = nullptr;
9310 break;
9311 }
9312 }
9313 }
9315 // Check if there is anything to merge.
9316 if (StoreNodes.size() < 2)
9317 return false;
9319 // Sort the memory operands according to their distance from the base pointer.
9320 std::sort(StoreNodes.begin(), StoreNodes.end(),
9321 [](MemOpLink LHS, MemOpLink RHS) {
9322 return LHS.OffsetFromBase < RHS.OffsetFromBase ||
9323 (LHS.OffsetFromBase == RHS.OffsetFromBase &&
9324 LHS.SequenceNum > RHS.SequenceNum);
9325 });
9327 // Scan the memory operations on the chain and find the first non-consecutive
9328 // store memory address.
9329 unsigned LastConsecutiveStore = 0;
9330 int64_t StartAddress = StoreNodes[0].OffsetFromBase;
9331 for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) {
9333 // Check that the addresses are consecutive starting from the second
9334 // element in the list of stores.
9335 if (i > 0) {
9336 int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
9337 if (CurrAddress - StartAddress != (ElementSizeBytes * i))
9338 break;
9339 }
9341 bool Alias = false;
9342 // Check if this store interferes with any of the loads that we found.
9343 for (unsigned ld = 0, lde = AliasLoadNodes.size(); ld < lde; ++ld)
9344 if (isAlias(AliasLoadNodes[ld], StoreNodes[i].MemNode)) {
9345 Alias = true;
9346 break;
9347 }
9348 // We found a load that alias with this store. Stop the sequence.
9349 if (Alias)
9350 break;
9352 // Mark this node as useful.
9353 LastConsecutiveStore = i;
9354 }
9356 // The node with the lowest store address.
9357 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
9359 // Store the constants into memory as one consecutive store.
9360 if (!IsLoadSrc) {
9361 unsigned LastLegalType = 0;
9362 unsigned LastLegalVectorType = 0;
9363 bool NonZero = false;
9364 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
9365 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
9366 SDValue StoredVal = St->getValue();
9368 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) {
9369 NonZero |= !C->isNullValue();
9370 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) {
9371 NonZero |= !C->getConstantFPValue()->isNullValue();
9372 } else {
9373 // Non-constant.
9374 break;
9375 }
9377 // Find a legal type for the constant store.
9378 unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
9379 EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
9380 if (TLI.isTypeLegal(StoreTy))
9381 LastLegalType = i+1;
9382 // Or check whether a truncstore is legal.
9383 else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
9384 TargetLowering::TypePromoteInteger) {
9385 EVT LegalizedStoredValueTy =
9386 TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
9387 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy))
9388 LastLegalType = i+1;
9389 }
9391 // Find a legal type for the vector store.
9392 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
9393 if (TLI.isTypeLegal(Ty))
9394 LastLegalVectorType = i + 1;
9395 }
9397 // We only use vectors if the constant is known to be zero and the
9398 // function is not marked with the noimplicitfloat attribute.
9399 if (NonZero || NoVectors)
9400 LastLegalVectorType = 0;
9402 // Check if we found a legal integer type to store.
9403 if (LastLegalType == 0 && LastLegalVectorType == 0)
9404 return false;
9406 bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors;
9407 unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType;
9409 // Make sure we have something to merge.
9410 if (NumElem < 2)
9411 return false;
9413 unsigned EarliestNodeUsed = 0;
9414 for (unsigned i=0; i < NumElem; ++i) {
9415 // Find a chain for the new wide-store operand. Notice that some
9416 // of the store nodes that we found may not be selected for inclusion
9417 // in the wide store. The chain we use needs to be the chain of the
9418 // earliest store node which is *used* and replaced by the wide store.
9419 if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum)
9420 EarliestNodeUsed = i;
9421 }
9423 // The earliest Node in the DAG.
9424 LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode;
9425 SDLoc DL(StoreNodes[0].MemNode);
9427 SDValue StoredVal;
9428 if (UseVector) {
9429 // Find a legal type for the vector store.
9430 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
9431 assert(TLI.isTypeLegal(Ty) && "Illegal vector store");
9432 StoredVal = DAG.getConstant(0, Ty);
9433 } else {
9434 unsigned StoreBW = NumElem * ElementSizeBytes * 8;
9435 APInt StoreInt(StoreBW, 0);
9437 // Construct a single integer constant which is made of the smaller
9438 // constant inputs.
9439 bool IsLE = TLI.isLittleEndian();
9440 for (unsigned i = 0; i < NumElem ; ++i) {
9441 unsigned Idx = IsLE ?(NumElem - 1 - i) : i;
9442 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
9443 SDValue Val = St->getValue();
9444 StoreInt<<=ElementSizeBytes*8;
9445 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
9446 StoreInt|=C->getAPIntValue().zext(StoreBW);
9447 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
9448 StoreInt|= C->getValueAPF().bitcastToAPInt().zext(StoreBW);
9449 } else {
9450 assert(false && "Invalid constant element type");
9451 }
9452 }
9454 // Create the new Load and Store operations.
9455 EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
9456 StoredVal = DAG.getConstant(StoreInt, StoreTy);
9457 }
9459 SDValue NewStore = DAG.getStore(EarliestOp->getChain(), DL, StoredVal,
9460 FirstInChain->getBasePtr(),
9461 FirstInChain->getPointerInfo(),
9462 false, false,
9463 FirstInChain->getAlignment());
9465 // Replace the first store with the new store
9466 CombineTo(EarliestOp, NewStore);
9467 // Erase all other stores.
9468 for (unsigned i = 0; i < NumElem ; ++i) {
9469 if (StoreNodes[i].MemNode == EarliestOp)
9470 continue;
9471 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
9472 // ReplaceAllUsesWith will replace all uses that existed when it was
9473 // called, but graph optimizations may cause new ones to appear. For
9474 // example, the case in pr14333 looks like
9475 //
9476 // St's chain -> St -> another store -> X
9477 //
9478 // And the only difference from St to the other store is the chain.
9479 // When we change it's chain to be St's chain they become identical,
9480 // get CSEed and the net result is that X is now a use of St.
9481 // Since we know that St is redundant, just iterate.
9482 while (!St->use_empty())
9483 DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain());
9484 deleteAndRecombine(St);
9485 }
9487 return true;
9488 }
9490 // Below we handle the case of multiple consecutive stores that
9491 // come from multiple consecutive loads. We merge them into a single
9492 // wide load and a single wide store.
9494 // Look for load nodes which are used by the stored values.
9495 SmallVector<MemOpLink, 8> LoadNodes;
9497 // Find acceptable loads. Loads need to have the same chain (token factor),
9498 // must not be zext, volatile, indexed, and they must be consecutive.
9499 BaseIndexOffset LdBasePtr;
9500 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
9501 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
9502 LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue());
9503 if (!Ld) break;
9505 // Loads must only have one use.
9506 if (!Ld->hasNUsesOfValue(1, 0))
9507 break;
9509 // Check that the alignment is the same as the stores.
9510 if (Ld->getAlignment() != St->getAlignment())
9511 break;
9513 // The memory operands must not be volatile.
9514 if (Ld->isVolatile() || Ld->isIndexed())
9515 break;
9517 // We do not accept ext loads.
9518 if (Ld->getExtensionType() != ISD::NON_EXTLOAD)
9519 break;
9521 // The stored memory type must be the same.
9522 if (Ld->getMemoryVT() != MemVT)
9523 break;
9525 BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr());
9526 // If this is not the first ptr that we check.
9527 if (LdBasePtr.Base.getNode()) {
9528 // The base ptr must be the same.
9529 if (!LdPtr.equalBaseIndex(LdBasePtr))
9530 break;
9531 } else {
9532 // Check that all other base pointers are the same as this one.
9533 LdBasePtr = LdPtr;
9534 }
9536 // We found a potential memory operand to merge.
9537 LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0));
9538 }
9540 if (LoadNodes.size() < 2)
9541 return false;
9543 // If we have load/store pair instructions and we only have two values,
9544 // don't bother.
9545 unsigned RequiredAlignment;
9546 if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
9547 St->getAlignment() >= RequiredAlignment)
9548 return false;
9550 // Scan the memory operations on the chain and find the first non-consecutive
9551 // load memory address. These variables hold the index in the store node
9552 // array.
9553 unsigned LastConsecutiveLoad = 0;
9554 // This variable refers to the size and not index in the array.
9555 unsigned LastLegalVectorType = 0;
9556 unsigned LastLegalIntegerType = 0;
9557 StartAddress = LoadNodes[0].OffsetFromBase;
9558 SDValue FirstChain = LoadNodes[0].MemNode->getChain();
9559 for (unsigned i = 1; i < LoadNodes.size(); ++i) {
9560 // All loads much share the same chain.
9561 if (LoadNodes[i].MemNode->getChain() != FirstChain)
9562 break;
9564 int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
9565 if (CurrAddress - StartAddress != (ElementSizeBytes * i))
9566 break;
9567 LastConsecutiveLoad = i;
9569 // Find a legal type for the vector store.
9570 EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
9571 if (TLI.isTypeLegal(StoreTy))
9572 LastLegalVectorType = i + 1;
9574 // Find a legal type for the integer store.
9575 unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
9576 StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
9577 if (TLI.isTypeLegal(StoreTy))
9578 LastLegalIntegerType = i + 1;
9579 // Or check whether a truncstore and extload is legal.
9580 else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
9581 TargetLowering::TypePromoteInteger) {
9582 EVT LegalizedStoredValueTy =
9583 TLI.getTypeToTransformTo(*DAG.getContext(), StoreTy);
9584 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
9585 TLI.isLoadExtLegal(ISD::ZEXTLOAD, StoreTy) &&
9586 TLI.isLoadExtLegal(ISD::SEXTLOAD, StoreTy) &&
9587 TLI.isLoadExtLegal(ISD::EXTLOAD, StoreTy))
9588 LastLegalIntegerType = i+1;
9589 }
9590 }
9592 // Only use vector types if the vector type is larger than the integer type.
9593 // If they are the same, use integers.
9594 bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors;
9595 unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType);
9597 // We add +1 here because the LastXXX variables refer to location while
9598 // the NumElem refers to array/index size.
9599 unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1;
9600 NumElem = std::min(LastLegalType, NumElem);
9602 if (NumElem < 2)
9603 return false;
9605 // The earliest Node in the DAG.
9606 unsigned EarliestNodeUsed = 0;
9607 LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode;
9608 for (unsigned i=1; i<NumElem; ++i) {
9609 // Find a chain for the new wide-store operand. Notice that some
9610 // of the store nodes that we found may not be selected for inclusion
9611 // in the wide store. The chain we use needs to be the chain of the
9612 // earliest store node which is *used* and replaced by the wide store.
9613 if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum)
9614 EarliestNodeUsed = i;
9615 }
9617 // Find if it is better to use vectors or integers to load and store
9618 // to memory.
9619 EVT JointMemOpVT;
9620 if (UseVectorTy) {
9621 JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
9622 } else {
9623 unsigned StoreBW = NumElem * ElementSizeBytes * 8;
9624 JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
9625 }
9627 SDLoc LoadDL(LoadNodes[0].MemNode);
9628 SDLoc StoreDL(StoreNodes[0].MemNode);
9630 LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
9631 SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL,
9632 FirstLoad->getChain(),
9633 FirstLoad->getBasePtr(),
9634 FirstLoad->getPointerInfo(),
9635 false, false, false,
9636 FirstLoad->getAlignment());
9638 SDValue NewStore = DAG.getStore(EarliestOp->getChain(), StoreDL, NewLoad,
9639 FirstInChain->getBasePtr(),
9640 FirstInChain->getPointerInfo(), false, false,
9641 FirstInChain->getAlignment());
9643 // Replace one of the loads with the new load.
9644 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[0].MemNode);
9645 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
9646 SDValue(NewLoad.getNode(), 1));
9648 // Remove the rest of the load chains.
9649 for (unsigned i = 1; i < NumElem ; ++i) {
9650 // Replace all chain users of the old load nodes with the chain of the new
9651 // load node.
9652 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
9653 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Ld->getChain());
9654 }
9656 // Replace the first store with the new store.
9657 CombineTo(EarliestOp, NewStore);
9658 // Erase all other stores.
9659 for (unsigned i = 0; i < NumElem ; ++i) {
9660 // Remove all Store nodes.
9661 if (StoreNodes[i].MemNode == EarliestOp)
9662 continue;
9663 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
9664 DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain());
9665 deleteAndRecombine(St);
9666 }
9668 return true;
9669 }
9671 SDValue DAGCombiner::visitSTORE(SDNode *N) {
9672 StoreSDNode *ST = cast<StoreSDNode>(N);
9673 SDValue Chain = ST->getChain();
9674 SDValue Value = ST->getValue();
9675 SDValue Ptr = ST->getBasePtr();
9677 // If this is a store of a bit convert, store the input value if the
9678 // resultant store does not need a higher alignment than the original.
9679 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
9680 ST->isUnindexed()) {
9681 unsigned OrigAlign = ST->getAlignment();
9682 EVT SVT = Value.getOperand(0).getValueType();
9683 unsigned Align = TLI.getDataLayout()->
9684 getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
9685 if (Align <= OrigAlign &&
9686 ((!LegalOperations && !ST->isVolatile()) ||
9687 TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
9688 return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0),
9689 Ptr, ST->getPointerInfo(), ST->isVolatile(),
9690 ST->isNonTemporal(), OrigAlign,
9691 ST->getAAInfo());
9692 }
9694 // Turn 'store undef, Ptr' -> nothing.
9695 if (Value.getOpcode() == ISD::UNDEF && ST->isUnindexed())
9696 return Chain;
9698 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
9699 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
9700 // NOTE: If the original store is volatile, this transform must not increase
9701 // the number of stores. For example, on x86-32 an f64 can be stored in one
9702 // processor operation but an i64 (which is not legal) requires two. So the
9703 // transform should not be done in this case.
9704 if (Value.getOpcode() != ISD::TargetConstantFP) {
9705 SDValue Tmp;
9706 switch (CFP->getSimpleValueType(0).SimpleTy) {
9707 default: llvm_unreachable("Unknown FP type");
9708 case MVT::f16: // We don't do this for these yet.
9709 case MVT::f80:
9710 case MVT::f128:
9711 case MVT::ppcf128:
9712 break;
9713 case MVT::f32:
9714 if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) ||
9715 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
9716 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
9717 bitcastToAPInt().getZExtValue(), MVT::i32);
9718 return DAG.getStore(Chain, SDLoc(N), Tmp,
9719 Ptr, ST->getMemOperand());
9720 }
9721 break;
9722 case MVT::f64:
9723 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
9724 !ST->isVolatile()) ||
9725 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
9726 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
9727 getZExtValue(), MVT::i64);
9728 return DAG.getStore(Chain, SDLoc(N), Tmp,
9729 Ptr, ST->getMemOperand());
9730 }
9732 if (!ST->isVolatile() &&
9733 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
9734 // Many FP stores are not made apparent until after legalize, e.g. for
9735 // argument passing. Since this is so common, custom legalize the
9736 // 64-bit integer store into two 32-bit stores.
9737 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
9738 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32);
9739 SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32);
9740 if (TLI.isBigEndian()) std::swap(Lo, Hi);
9742 unsigned Alignment = ST->getAlignment();
9743 bool isVolatile = ST->isVolatile();
9744 bool isNonTemporal = ST->isNonTemporal();
9745 AAMDNodes AAInfo = ST->getAAInfo();
9747 SDValue St0 = DAG.getStore(Chain, SDLoc(ST), Lo,
9748 Ptr, ST->getPointerInfo(),
9749 isVolatile, isNonTemporal,
9750 ST->getAlignment(), AAInfo);
9751 Ptr = DAG.getNode(ISD::ADD, SDLoc(N), Ptr.getValueType(), Ptr,
9752 DAG.getConstant(4, Ptr.getValueType()));
9753 Alignment = MinAlign(Alignment, 4U);
9754 SDValue St1 = DAG.getStore(Chain, SDLoc(ST), Hi,
9755 Ptr, ST->getPointerInfo().getWithOffset(4),
9756 isVolatile, isNonTemporal,
9757 Alignment, AAInfo);
9758 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other,
9759 St0, St1);
9760 }
9762 break;
9763 }
9764 }
9765 }
9767 // Try to infer better alignment information than the store already has.
9768 if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
9769 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
9770 if (Align > ST->getAlignment())
9771 return DAG.getTruncStore(Chain, SDLoc(N), Value,
9772 Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
9773 ST->isVolatile(), ST->isNonTemporal(), Align,
9774 ST->getAAInfo());
9775 }
9776 }
9778 // Try transforming a pair floating point load / store ops to integer
9779 // load / store ops.
9780 SDValue NewST = TransformFPLoadStorePair(N);
9781 if (NewST.getNode())
9782 return NewST;
9784 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA :
9785 TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA();
9786 #ifndef NDEBUG
9787 if (CombinerAAOnlyFunc.getNumOccurrences() &&
9788 CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
9789 UseAA = false;
9790 #endif
9791 if (UseAA && ST->isUnindexed()) {
9792 // Walk up chain skipping non-aliasing memory nodes.
9793 SDValue BetterChain = FindBetterChain(N, Chain);
9795 // If there is a better chain.
9796 if (Chain != BetterChain) {
9797 SDValue ReplStore;
9799 // Replace the chain to avoid dependency.
9800 if (ST->isTruncatingStore()) {
9801 ReplStore = DAG.getTruncStore(BetterChain, SDLoc(N), Value, Ptr,
9802 ST->getMemoryVT(), ST->getMemOperand());
9803 } else {
9804 ReplStore = DAG.getStore(BetterChain, SDLoc(N), Value, Ptr,
9805 ST->getMemOperand());
9806 }
9808 // Create token to keep both nodes around.
9809 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
9810 MVT::Other, Chain, ReplStore);
9812 // Make sure the new and old chains are cleaned up.
9813 AddToWorklist(Token.getNode());
9815 // Don't add users to work list.
9816 return CombineTo(N, Token, false);
9817 }
9818 }
9820 // Try transforming N to an indexed store.
9821 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
9822 return SDValue(N, 0);
9824 // FIXME: is there such a thing as a truncating indexed store?
9825 if (ST->isTruncatingStore() && ST->isUnindexed() &&
9826 Value.getValueType().isInteger()) {
9827 // See if we can simplify the input to this truncstore with knowledge that
9828 // only the low bits are being used. For example:
9829 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
9830 SDValue Shorter =
9831 GetDemandedBits(Value,
9832 APInt::getLowBitsSet(
9833 Value.getValueType().getScalarType().getSizeInBits(),
9834 ST->getMemoryVT().getScalarType().getSizeInBits()));
9835 AddToWorklist(Value.getNode());
9836 if (Shorter.getNode())
9837 return DAG.getTruncStore(Chain, SDLoc(N), Shorter,
9838 Ptr, ST->getMemoryVT(), ST->getMemOperand());
9840 // Otherwise, see if we can simplify the operation with
9841 // SimplifyDemandedBits, which only works if the value has a single use.
9842 if (SimplifyDemandedBits(Value,
9843 APInt::getLowBitsSet(
9844 Value.getValueType().getScalarType().getSizeInBits(),
9845 ST->getMemoryVT().getScalarType().getSizeInBits())))
9846 return SDValue(N, 0);
9847 }
9849 // If this is a load followed by a store to the same location, then the store
9850 // is dead/noop.
9851 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
9852 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
9853 ST->isUnindexed() && !ST->isVolatile() &&
9854 // There can't be any side effects between the load and store, such as
9855 // a call or store.
9856 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
9857 // The store is dead, remove it.
9858 return Chain;
9859 }
9860 }
9862 // If this is a store followed by a store with the same value to the same
9863 // location, then the store is dead/noop.
9864 if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) {
9865 if (ST1->getBasePtr() == Ptr && ST->getMemoryVT() == ST1->getMemoryVT() &&
9866 ST1->getValue() == Value && ST->isUnindexed() && !ST->isVolatile() &&
9867 ST1->isUnindexed() && !ST1->isVolatile()) {
9868 // The store is dead, remove it.
9869 return Chain;
9870 }
9871 }
9873 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
9874 // truncating store. We can do this even if this is already a truncstore.
9875 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
9876 && Value.getNode()->hasOneUse() && ST->isUnindexed() &&
9877 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
9878 ST->getMemoryVT())) {
9879 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0),
9880 Ptr, ST->getMemoryVT(), ST->getMemOperand());
9881 }
9883 // Only perform this optimization before the types are legal, because we
9884 // don't want to perform this optimization on every DAGCombine invocation.
9885 if (!LegalTypes) {
9886 bool EverChanged = false;
9888 do {
9889 // There can be multiple store sequences on the same chain.
9890 // Keep trying to merge store sequences until we are unable to do so
9891 // or until we merge the last store on the chain.
9892 bool Changed = MergeConsecutiveStores(ST);
9893 EverChanged |= Changed;
9894 if (!Changed) break;
9895 } while (ST->getOpcode() != ISD::DELETED_NODE);
9897 if (EverChanged)
9898 return SDValue(N, 0);
9899 }
9901 return ReduceLoadOpStoreWidth(N);
9902 }
9904 SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
9905 SDValue InVec = N->getOperand(0);
9906 SDValue InVal = N->getOperand(1);
9907 SDValue EltNo = N->getOperand(2);
9908 SDLoc dl(N);
9910 // If the inserted element is an UNDEF, just use the input vector.
9911 if (InVal.getOpcode() == ISD::UNDEF)
9912 return InVec;
9914 EVT VT = InVec.getValueType();
9916 // If we can't generate a legal BUILD_VECTOR, exit
9917 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
9918 return SDValue();
9920 // Check that we know which element is being inserted
9921 if (!isa<ConstantSDNode>(EltNo))
9922 return SDValue();
9923 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
9925 // Canonicalize insert_vector_elt dag nodes.
9926 // Example:
9927 // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
9928 // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
9929 //
9930 // Do this only if the child insert_vector node has one use; also
9931 // do this only if indices are both constants and Idx1 < Idx0.
9932 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
9933 && isa<ConstantSDNode>(InVec.getOperand(2))) {
9934 unsigned OtherElt =
9935 cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue();
9936 if (Elt < OtherElt) {
9937 // Swap nodes.
9938 SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VT,
9939 InVec.getOperand(0), InVal, EltNo);
9940 AddToWorklist(NewOp.getNode());
9941 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
9942 VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
9943 }
9944 }
9946 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
9947 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the
9948 // vector elements.
9949 SmallVector<SDValue, 8> Ops;
9950 // Do not combine these two vectors if the output vector will not replace
9951 // the input vector.
9952 if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) {
9953 Ops.append(InVec.getNode()->op_begin(),
9954 InVec.getNode()->op_end());
9955 } else if (InVec.getOpcode() == ISD::UNDEF) {
9956 unsigned NElts = VT.getVectorNumElements();
9957 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
9958 } else {
9959 return SDValue();
9960 }
9962 // Insert the element
9963 if (Elt < Ops.size()) {
9964 // All the operands of BUILD_VECTOR must have the same type;
9965 // we enforce that here.
9966 EVT OpVT = Ops[0].getValueType();
9967 if (InVal.getValueType() != OpVT)
9968 InVal = OpVT.bitsGT(InVal.getValueType()) ?
9969 DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) :
9970 DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal);
9971 Ops[Elt] = InVal;
9972 }
9974 // Return the new vector
9975 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
9976 }
9978 SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
9979 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad) {
9980 EVT ResultVT = EVE->getValueType(0);
9981 EVT VecEltVT = InVecVT.getVectorElementType();
9982 unsigned Align = OriginalLoad->getAlignment();
9983 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
9984 VecEltVT.getTypeForEVT(*DAG.getContext()));
9986 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
9987 return SDValue();
9989 Align = NewAlign;
9991 SDValue NewPtr = OriginalLoad->getBasePtr();
9992 SDValue Offset;
9993 EVT PtrType = NewPtr.getValueType();
9994 MachinePointerInfo MPI;
9995 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
9996 int Elt = ConstEltNo->getZExtValue();
9997 unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8;
9998 if (TLI.isBigEndian())
9999 PtrOff = InVecVT.getSizeInBits() / 8 - PtrOff;
10000 Offset = DAG.getConstant(PtrOff, PtrType);
10001 MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff);
10002 } else {
10003 Offset = DAG.getNode(
10004 ISD::MUL, SDLoc(EVE), EltNo.getValueType(), EltNo,
10005 DAG.getConstant(VecEltVT.getStoreSize(), EltNo.getValueType()));
10006 if (TLI.isBigEndian())
10007 Offset = DAG.getNode(
10008 ISD::SUB, SDLoc(EVE), EltNo.getValueType(),
10009 DAG.getConstant(InVecVT.getStoreSize(), EltNo.getValueType()), Offset);
10010 MPI = OriginalLoad->getPointerInfo();
10011 }
10012 NewPtr = DAG.getNode(ISD::ADD, SDLoc(EVE), PtrType, NewPtr, Offset);
10014 // The replacement we need to do here is a little tricky: we need to
10015 // replace an extractelement of a load with a load.
10016 // Use ReplaceAllUsesOfValuesWith to do the replacement.
10017 // Note that this replacement assumes that the extractvalue is the only
10018 // use of the load; that's okay because we don't want to perform this
10019 // transformation in other cases anyway.
10020 SDValue Load;
10021 SDValue Chain;
10022 if (ResultVT.bitsGT(VecEltVT)) {
10023 // If the result type of vextract is wider than the load, then issue an
10024 // extending load instead.
10025 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, VecEltVT)
10026 ? ISD::ZEXTLOAD
10027 : ISD::EXTLOAD;
10028 Load = DAG.getExtLoad(
10029 ExtType, SDLoc(EVE), ResultVT, OriginalLoad->getChain(), NewPtr, MPI,
10030 VecEltVT, OriginalLoad->isVolatile(), OriginalLoad->isNonTemporal(),
10031 OriginalLoad->isInvariant(), Align, OriginalLoad->getAAInfo());
10032 Chain = Load.getValue(1);
10033 } else {
10034 Load = DAG.getLoad(
10035 VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, MPI,
10036 OriginalLoad->isVolatile(), OriginalLoad->isNonTemporal(),
10037 OriginalLoad->isInvariant(), Align, OriginalLoad->getAAInfo());
10038 Chain = Load.getValue(1);
10039 if (ResultVT.bitsLT(VecEltVT))
10040 Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
10041 else
10042 Load = DAG.getNode(ISD::BITCAST, SDLoc(EVE), ResultVT, Load);
10043 }
10044 WorklistRemover DeadNodes(*this);
10045 SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
10046 SDValue To[] = { Load, Chain };
10047 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10048 // Since we're explicitly calling ReplaceAllUses, add the new node to the
10049 // worklist explicitly as well.
10050 AddToWorklist(Load.getNode());
10051 AddUsersToWorklist(Load.getNode()); // Add users too
10052 // Make sure to revisit this node to clean it up; it will usually be dead.
10053 AddToWorklist(EVE);
10054 ++OpsNarrowed;
10055 return SDValue(EVE, 0);
10056 }
10058 SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
10059 // (vextract (scalar_to_vector val, 0) -> val
10060 SDValue InVec = N->getOperand(0);
10061 EVT VT = InVec.getValueType();
10062 EVT NVT = N->getValueType(0);
10064 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
10065 // Check if the result type doesn't match the inserted element type. A
10066 // SCALAR_TO_VECTOR may truncate the inserted element and the
10067 // EXTRACT_VECTOR_ELT may widen the extracted vector.
10068 SDValue InOp = InVec.getOperand(0);
10069 if (InOp.getValueType() != NVT) {
10070 assert(InOp.getValueType().isInteger() && NVT.isInteger());
10071 return DAG.getSExtOrTrunc(InOp, SDLoc(InVec), NVT);
10072 }
10073 return InOp;
10074 }
10076 SDValue EltNo = N->getOperand(1);
10077 bool ConstEltNo = isa<ConstantSDNode>(EltNo);
10079 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
10080 // We only perform this optimization before the op legalization phase because
10081 // we may introduce new vector instructions which are not backed by TD
10082 // patterns. For example on AVX, extracting elements from a wide vector
10083 // without using extract_subvector. However, if we can find an underlying
10084 // scalar value, then we can always use that.
10085 if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE
10086 && ConstEltNo) {
10087 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
10088 int NumElem = VT.getVectorNumElements();
10089 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec);
10090 // Find the new index to extract from.
10091 int OrigElt = SVOp->getMaskElt(Elt);
10093 // Extracting an undef index is undef.
10094 if (OrigElt == -1)
10095 return DAG.getUNDEF(NVT);
10097 // Select the right vector half to extract from.
10098 SDValue SVInVec;
10099 if (OrigElt < NumElem) {
10100 SVInVec = InVec->getOperand(0);
10101 } else {
10102 SVInVec = InVec->getOperand(1);
10103 OrigElt -= NumElem;
10104 }
10106 if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) {
10107 SDValue InOp = SVInVec.getOperand(OrigElt);
10108 if (InOp.getValueType() != NVT) {
10109 assert(InOp.getValueType().isInteger() && NVT.isInteger());
10110 InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT);
10111 }
10113 return InOp;
10114 }
10116 // FIXME: We should handle recursing on other vector shuffles and
10117 // scalar_to_vector here as well.
10119 if (!LegalOperations) {
10120 EVT IndexTy = TLI.getVectorIdxTy();
10121 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT,
10122 SVInVec, DAG.getConstant(OrigElt, IndexTy));
10123 }
10124 }
10126 bool BCNumEltsChanged = false;
10127 EVT ExtVT = VT.getVectorElementType();
10128 EVT LVT = ExtVT;
10130 // If the result of load has to be truncated, then it's not necessarily
10131 // profitable.
10132 if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT))
10133 return SDValue();
10135 if (InVec.getOpcode() == ISD::BITCAST) {
10136 // Don't duplicate a load with other uses.
10137 if (!InVec.hasOneUse())
10138 return SDValue();
10140 EVT BCVT = InVec.getOperand(0).getValueType();
10141 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
10142 return SDValue();
10143 if (VT.getVectorNumElements() != BCVT.getVectorNumElements())
10144 BCNumEltsChanged = true;
10145 InVec = InVec.getOperand(0);
10146 ExtVT = BCVT.getVectorElementType();
10147 }
10149 // (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size)
10150 if (!LegalOperations && !ConstEltNo && InVec.hasOneUse() &&
10151 ISD::isNormalLoad(InVec.getNode()) &&
10152 !N->getOperand(1)->hasPredecessor(InVec.getNode())) {
10153 SDValue Index = N->getOperand(1);
10154 if (LoadSDNode *OrigLoad = dyn_cast<LoadSDNode>(InVec))
10155 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index,
10156 OrigLoad);
10157 }
10159 // Perform only after legalization to ensure build_vector / vector_shuffle
10160 // optimizations have already been done.
10161 if (!LegalOperations) return SDValue();
10163 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
10164 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
10165 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
10167 if (ConstEltNo) {
10168 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
10170 LoadSDNode *LN0 = nullptr;
10171 const ShuffleVectorSDNode *SVN = nullptr;
10172 if (ISD::isNormalLoad(InVec.getNode())) {
10173 LN0 = cast<LoadSDNode>(InVec);
10174 } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
10175 InVec.getOperand(0).getValueType() == ExtVT &&
10176 ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
10177 // Don't duplicate a load with other uses.
10178 if (!InVec.hasOneUse())
10179 return SDValue();
10181 LN0 = cast<LoadSDNode>(InVec.getOperand(0));
10182 } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) {
10183 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
10184 // =>
10185 // (load $addr+1*size)
10187 // Don't duplicate a load with other uses.
10188 if (!InVec.hasOneUse())
10189 return SDValue();
10191 // If the bit convert changed the number of elements, it is unsafe
10192 // to examine the mask.
10193 if (BCNumEltsChanged)
10194 return SDValue();
10196 // Select the input vector, guarding against out of range extract vector.
10197 unsigned NumElems = VT.getVectorNumElements();
10198 int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
10199 InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
10201 if (InVec.getOpcode() == ISD::BITCAST) {
10202 // Don't duplicate a load with other uses.
10203 if (!InVec.hasOneUse())
10204 return SDValue();
10206 InVec = InVec.getOperand(0);
10207 }
10208 if (ISD::isNormalLoad(InVec.getNode())) {
10209 LN0 = cast<LoadSDNode>(InVec);
10210 Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems;
10211 EltNo = DAG.getConstant(Elt, EltNo.getValueType());
10212 }
10213 }
10215 // Make sure we found a non-volatile load and the extractelement is
10216 // the only use.
10217 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile())
10218 return SDValue();
10220 // If Idx was -1 above, Elt is going to be -1, so just return undef.
10221 if (Elt == -1)
10222 return DAG.getUNDEF(LVT);
10224 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, EltNo, LN0);
10225 }
10227 return SDValue();
10228 }
10230 // Simplify (build_vec (ext )) to (bitcast (build_vec ))
10231 SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
10232 // We perform this optimization post type-legalization because
10233 // the type-legalizer often scalarizes integer-promoted vectors.
10234 // Performing this optimization before may create bit-casts which
10235 // will be type-legalized to complex code sequences.
10236 // We perform this optimization only before the operation legalizer because we
10237 // may introduce illegal operations.
10238 if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
10239 return SDValue();
10241 unsigned NumInScalars = N->getNumOperands();
10242 SDLoc dl(N);
10243 EVT VT = N->getValueType(0);
10245 // Check to see if this is a BUILD_VECTOR of a bunch of values
10246 // which come from any_extend or zero_extend nodes. If so, we can create
10247 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
10248 // optimizations. We do not handle sign-extend because we can't fill the sign
10249 // using shuffles.
10250 EVT SourceType = MVT::Other;
10251 bool AllAnyExt = true;
10253 for (unsigned i = 0; i != NumInScalars; ++i) {
10254 SDValue In = N->getOperand(i);
10255 // Ignore undef inputs.
10256 if (In.getOpcode() == ISD::UNDEF) continue;
10258 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
10259 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
10261 // Abort if the element is not an extension.
10262 if (!ZeroExt && !AnyExt) {
10263 SourceType = MVT::Other;
10264 break;
10265 }
10267 // The input is a ZeroExt or AnyExt. Check the original type.
10268 EVT InTy = In.getOperand(0).getValueType();
10270 // Check that all of the widened source types are the same.
10271 if (SourceType == MVT::Other)
10272 // First time.
10273 SourceType = InTy;
10274 else if (InTy != SourceType) {
10275 // Multiple income types. Abort.
10276 SourceType = MVT::Other;
10277 break;
10278 }
10280 // Check if all of the extends are ANY_EXTENDs.
10281 AllAnyExt &= AnyExt;
10282 }
10284 // In order to have valid types, all of the inputs must be extended from the
10285 // same source type and all of the inputs must be any or zero extend.
10286 // Scalar sizes must be a power of two.
10287 EVT OutScalarTy = VT.getScalarType();
10288 bool ValidTypes = SourceType != MVT::Other &&
10289 isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
10290 isPowerOf2_32(SourceType.getSizeInBits());
10292 // Create a new simpler BUILD_VECTOR sequence which other optimizations can
10293 // turn into a single shuffle instruction.
10294 if (!ValidTypes)
10295 return SDValue();
10297 bool isLE = TLI.isLittleEndian();
10298 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
10299 assert(ElemRatio > 1 && "Invalid element size ratio");
10300 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
10301 DAG.getConstant(0, SourceType);
10303 unsigned NewBVElems = ElemRatio * VT.getVectorNumElements();
10304 SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
10306 // Populate the new build_vector
10307 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
10308 SDValue Cast = N->getOperand(i);
10309 assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
10310 Cast.getOpcode() == ISD::ZERO_EXTEND ||
10311 Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode");
10312 SDValue In;
10313 if (Cast.getOpcode() == ISD::UNDEF)
10314 In = DAG.getUNDEF(SourceType);
10315 else
10316 In = Cast->getOperand(0);
10317 unsigned Index = isLE ? (i * ElemRatio) :
10318 (i * ElemRatio + (ElemRatio - 1));
10320 assert(Index < Ops.size() && "Invalid index");
10321 Ops[Index] = In;
10322 }
10324 // The type of the new BUILD_VECTOR node.
10325 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
10326 assert(VecVT.getSizeInBits() == VT.getSizeInBits() &&
10327 "Invalid vector size");
10328 // Check if the new vector type is legal.
10329 if (!isTypeLegal(VecVT)) return SDValue();
10331 // Make the new BUILD_VECTOR.
10332 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
10334 // The new BUILD_VECTOR node has the potential to be further optimized.
10335 AddToWorklist(BV.getNode());
10336 // Bitcast to the desired type.
10337 return DAG.getNode(ISD::BITCAST, dl, VT, BV);
10338 }
10340 SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
10341 EVT VT = N->getValueType(0);
10343 unsigned NumInScalars = N->getNumOperands();
10344 SDLoc dl(N);
10346 EVT SrcVT = MVT::Other;
10347 unsigned Opcode = ISD::DELETED_NODE;
10348 unsigned NumDefs = 0;
10350 for (unsigned i = 0; i != NumInScalars; ++i) {
10351 SDValue In = N->getOperand(i);
10352 unsigned Opc = In.getOpcode();
10354 if (Opc == ISD::UNDEF)
10355 continue;
10357 // If all scalar values are floats and converted from integers.
10358 if (Opcode == ISD::DELETED_NODE &&
10359 (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) {
10360 Opcode = Opc;
10361 }
10363 if (Opc != Opcode)
10364 return SDValue();
10366 EVT InVT = In.getOperand(0).getValueType();
10368 // If all scalar values are typed differently, bail out. It's chosen to
10369 // simplify BUILD_VECTOR of integer types.
10370 if (SrcVT == MVT::Other)
10371 SrcVT = InVT;
10372 if (SrcVT != InVT)
10373 return SDValue();
10374 NumDefs++;
10375 }
10377 // If the vector has just one element defined, it's not worth to fold it into
10378 // a vectorized one.
10379 if (NumDefs < 2)
10380 return SDValue();
10382 assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP)
10383 && "Should only handle conversion from integer to float.");
10384 assert(SrcVT != MVT::Other && "Cannot determine source type!");
10386 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars);
10388 if (!TLI.isOperationLegalOrCustom(Opcode, NVT))
10389 return SDValue();
10391 SmallVector<SDValue, 8> Opnds;
10392 for (unsigned i = 0; i != NumInScalars; ++i) {
10393 SDValue In = N->getOperand(i);
10395 if (In.getOpcode() == ISD::UNDEF)
10396 Opnds.push_back(DAG.getUNDEF(SrcVT));
10397 else
10398 Opnds.push_back(In.getOperand(0));
10399 }
10400 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Opnds);
10401 AddToWorklist(BV.getNode());
10403 return DAG.getNode(Opcode, dl, VT, BV);
10404 }
10406 SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
10407 unsigned NumInScalars = N->getNumOperands();
10408 SDLoc dl(N);
10409 EVT VT = N->getValueType(0);
10411 // A vector built entirely of undefs is undef.
10412 if (ISD::allOperandsUndef(N))
10413 return DAG.getUNDEF(VT);
10415 SDValue V = reduceBuildVecExtToExtBuildVec(N);
10416 if (V.getNode())
10417 return V;
10419 V = reduceBuildVecConvertToConvertBuildVec(N);
10420 if (V.getNode())
10421 return V;
10423 // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
10424 // operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
10425 // at most two distinct vectors, turn this into a shuffle node.
10427 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes.
10428 if (!isTypeLegal(VT))
10429 return SDValue();
10431 // May only combine to shuffle after legalize if shuffle is legal.
10432 if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT))
10433 return SDValue();
10435 SDValue VecIn1, VecIn2;
10436 for (unsigned i = 0; i != NumInScalars; ++i) {
10437 // Ignore undef inputs.
10438 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
10440 // If this input is something other than a EXTRACT_VECTOR_ELT with a
10441 // constant index, bail out.
10442 if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10443 !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) {
10444 VecIn1 = VecIn2 = SDValue(nullptr, 0);
10445 break;
10446 }
10448 // We allow up to two distinct input vectors.
10449 SDValue ExtractedFromVec = N->getOperand(i).getOperand(0);
10450 if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
10451 continue;
10453 if (!VecIn1.getNode()) {
10454 VecIn1 = ExtractedFromVec;
10455 } else if (!VecIn2.getNode()) {
10456 VecIn2 = ExtractedFromVec;
10457 } else {
10458 // Too many inputs.
10459 VecIn1 = VecIn2 = SDValue(nullptr, 0);
10460 break;
10461 }
10462 }
10464 // If everything is good, we can make a shuffle operation.
10465 if (VecIn1.getNode()) {
10466 SmallVector<int, 8> Mask;
10467 for (unsigned i = 0; i != NumInScalars; ++i) {
10468 if (N->getOperand(i).getOpcode() == ISD::UNDEF) {
10469 Mask.push_back(-1);
10470 continue;
10471 }
10473 // If extracting from the first vector, just use the index directly.
10474 SDValue Extract = N->getOperand(i);
10475 SDValue ExtVal = Extract.getOperand(1);
10476 if (Extract.getOperand(0) == VecIn1) {
10477 unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue();
10478 if (ExtIndex > VT.getVectorNumElements())
10479 return SDValue();
10481 Mask.push_back(ExtIndex);
10482 continue;
10483 }
10485 // Otherwise, use InIdx + VecSize
10486 unsigned Idx = cast<ConstantSDNode>(ExtVal)->getZExtValue();
10487 Mask.push_back(Idx+NumInScalars);
10488 }
10490 // We can't generate a shuffle node with mismatched input and output types.
10491 // Attempt to transform a single input vector to the correct type.
10492 if ((VT != VecIn1.getValueType())) {
10493 // We don't support shuffeling between TWO values of different types.
10494 if (VecIn2.getNode())
10495 return SDValue();
10497 // We only support widening of vectors which are half the size of the
10498 // output registers. For example XMM->YMM widening on X86 with AVX.
10499 if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits())
10500 return SDValue();
10502 // If the input vector type has a different base type to the output
10503 // vector type, bail out.
10504 if (VecIn1.getValueType().getVectorElementType() !=
10505 VT.getVectorElementType())
10506 return SDValue();
10508 // Widen the input vector by adding undef values.
10509 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
10510 VecIn1, DAG.getUNDEF(VecIn1.getValueType()));
10511 }
10513 // If VecIn2 is unused then change it to undef.
10514 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
10516 // Check that we were able to transform all incoming values to the same
10517 // type.
10518 if (VecIn2.getValueType() != VecIn1.getValueType() ||
10519 VecIn1.getValueType() != VT)
10520 return SDValue();
10522 // Return the new VECTOR_SHUFFLE node.
10523 SDValue Ops[2];
10524 Ops[0] = VecIn1;
10525 Ops[1] = VecIn2;
10526 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], &Mask[0]);
10527 }
10529 return SDValue();
10530 }
10532 SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
10533 // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of
10534 // EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector
10535 // inputs come from at most two distinct vectors, turn this into a shuffle
10536 // node.
10538 // If we only have one input vector, we don't need to do any concatenation.
10539 if (N->getNumOperands() == 1)
10540 return N->getOperand(0);
10542 // Check if all of the operands are undefs.
10543 EVT VT = N->getValueType(0);
10544 if (ISD::allOperandsUndef(N))
10545 return DAG.getUNDEF(VT);
10547 // Optimize concat_vectors where one of the vectors is undef.
10548 if (N->getNumOperands() == 2 &&
10549 N->getOperand(1)->getOpcode() == ISD::UNDEF) {
10550 SDValue In = N->getOperand(0);
10551 assert(In.getValueType().isVector() && "Must concat vectors");
10553 // Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr).
10554 if (In->getOpcode() == ISD::BITCAST &&
10555 !In->getOperand(0)->getValueType(0).isVector()) {
10556 SDValue Scalar = In->getOperand(0);
10557 EVT SclTy = Scalar->getValueType(0);
10559 if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
10560 return SDValue();
10562 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy,
10563 VT.getSizeInBits() / SclTy.getSizeInBits());
10564 if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType()))
10565 return SDValue();
10567 SDLoc dl = SDLoc(N);
10568 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NVT, Scalar);
10569 return DAG.getNode(ISD::BITCAST, dl, VT, Res);
10570 }
10571 }
10573 // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
10574 // -> (BUILD_VECTOR A, B, ..., C, D, ...)
10575 if (N->getNumOperands() == 2 &&
10576 N->getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
10577 N->getOperand(1).getOpcode() == ISD::BUILD_VECTOR) {
10578 EVT VT = N->getValueType(0);
10579 SDValue N0 = N->getOperand(0);
10580 SDValue N1 = N->getOperand(1);
10581 SmallVector<SDValue, 8> Opnds;
10582 unsigned BuildVecNumElts = N0.getNumOperands();
10584 EVT SclTy0 = N0.getOperand(0)->getValueType(0);
10585 EVT SclTy1 = N1.getOperand(0)->getValueType(0);
10586 if (SclTy0.isFloatingPoint()) {
10587 for (unsigned i = 0; i != BuildVecNumElts; ++i)
10588 Opnds.push_back(N0.getOperand(i));
10589 for (unsigned i = 0; i != BuildVecNumElts; ++i)
10590 Opnds.push_back(N1.getOperand(i));
10591 } else {
10592 // If BUILD_VECTOR are from built from integer, they may have different
10593 // operand types. Get the smaller type and truncate all operands to it.
10594 EVT MinTy = SclTy0.bitsLE(SclTy1) ? SclTy0 : SclTy1;
10595 for (unsigned i = 0; i != BuildVecNumElts; ++i)
10596 Opnds.push_back(DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinTy,
10597 N0.getOperand(i)));
10598 for (unsigned i = 0; i != BuildVecNumElts; ++i)
10599 Opnds.push_back(DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinTy,
10600 N1.getOperand(i)));
10601 }
10603 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
10604 }
10606 // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
10607 // nodes often generate nop CONCAT_VECTOR nodes.
10608 // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
10609 // place the incoming vectors at the exact same location.
10610 SDValue SingleSource = SDValue();
10611 unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements();
10613 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
10614 SDValue Op = N->getOperand(i);
10616 if (Op.getOpcode() == ISD::UNDEF)
10617 continue;
10619 // Check if this is the identity extract:
10620 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
10621 return SDValue();
10623 // Find the single incoming vector for the extract_subvector.
10624 if (SingleSource.getNode()) {
10625 if (Op.getOperand(0) != SingleSource)
10626 return SDValue();
10627 } else {
10628 SingleSource = Op.getOperand(0);
10630 // Check the source type is the same as the type of the result.
10631 // If not, this concat may extend the vector, so we can not
10632 // optimize it away.
10633 if (SingleSource.getValueType() != N->getValueType(0))
10634 return SDValue();
10635 }
10637 unsigned IdentityIndex = i * PartNumElem;
10638 ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1));
10639 // The extract index must be constant.
10640 if (!CS)
10641 return SDValue();
10643 // Check that we are reading from the identity index.
10644 if (CS->getZExtValue() != IdentityIndex)
10645 return SDValue();
10646 }
10648 if (SingleSource.getNode())
10649 return SingleSource;
10651 return SDValue();
10652 }
10654 SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
10655 EVT NVT = N->getValueType(0);
10656 SDValue V = N->getOperand(0);
10658 if (V->getOpcode() == ISD::CONCAT_VECTORS) {
10659 // Combine:
10660 // (extract_subvec (concat V1, V2, ...), i)
10661 // Into:
10662 // Vi if possible
10663 // Only operand 0 is checked as 'concat' assumes all inputs of the same
10664 // type.
10665 if (V->getOperand(0).getValueType() != NVT)
10666 return SDValue();
10667 unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10668 unsigned NumElems = NVT.getVectorNumElements();
10669 assert((Idx % NumElems) == 0 &&
10670 "IDX in concat is not a multiple of the result vector length.");
10671 return V->getOperand(Idx / NumElems);
10672 }
10674 // Skip bitcasting
10675 if (V->getOpcode() == ISD::BITCAST)
10676 V = V.getOperand(0);
10678 if (V->getOpcode() == ISD::INSERT_SUBVECTOR) {
10679 SDLoc dl(N);
10680 // Handle only simple case where vector being inserted and vector
10681 // being extracted are of same type, and are half size of larger vectors.
10682 EVT BigVT = V->getOperand(0).getValueType();
10683 EVT SmallVT = V->getOperand(1).getValueType();
10684 if (!NVT.bitsEq(SmallVT) || NVT.getSizeInBits()*2 != BigVT.getSizeInBits())
10685 return SDValue();
10687 // Only handle cases where both indexes are constants with the same type.
10688 ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
10689 ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2));
10691 if (InsIdx && ExtIdx &&
10692 InsIdx->getValueType(0).getSizeInBits() <= 64 &&
10693 ExtIdx->getValueType(0).getSizeInBits() <= 64) {
10694 // Combine:
10695 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
10696 // Into:
10697 // indices are equal or bit offsets are equal => V1
10698 // otherwise => (extract_subvec V1, ExtIdx)
10699 if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() ==
10700 ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits())
10701 return DAG.getNode(ISD::BITCAST, dl, NVT, V->getOperand(1));
10702 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT,
10703 DAG.getNode(ISD::BITCAST, dl,
10704 N->getOperand(0).getValueType(),
10705 V->getOperand(0)), N->getOperand(1));
10706 }
10707 }
10709 return SDValue();
10710 }
10712 static SDValue simplifyShuffleOperandRecursively(SmallBitVector &UsedElements,
10713 SDValue V, SelectionDAG &DAG) {
10714 SDLoc DL(V);
10715 EVT VT = V.getValueType();
10717 switch (V.getOpcode()) {
10718 default:
10719 return V;
10721 case ISD::CONCAT_VECTORS: {
10722 EVT OpVT = V->getOperand(0).getValueType();
10723 int OpSize = OpVT.getVectorNumElements();
10724 SmallBitVector OpUsedElements(OpSize, false);
10725 bool FoundSimplification = false;
10726 SmallVector<SDValue, 4> NewOps;
10727 NewOps.reserve(V->getNumOperands());
10728 for (int i = 0, NumOps = V->getNumOperands(); i < NumOps; ++i) {
10729 SDValue Op = V->getOperand(i);
10730 bool OpUsed = false;
10731 for (int j = 0; j < OpSize; ++j)
10732 if (UsedElements[i * OpSize + j]) {
10733 OpUsedElements[j] = true;
10734 OpUsed = true;
10735 }
10736 NewOps.push_back(
10737 OpUsed ? simplifyShuffleOperandRecursively(OpUsedElements, Op, DAG)
10738 : DAG.getUNDEF(OpVT));
10739 FoundSimplification |= Op == NewOps.back();
10740 OpUsedElements.reset();
10741 }
10742 if (FoundSimplification)
10743 V = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, NewOps);
10744 return V;
10745 }
10747 case ISD::INSERT_SUBVECTOR: {
10748 SDValue BaseV = V->getOperand(0);
10749 SDValue SubV = V->getOperand(1);
10750 auto *IdxN = dyn_cast<ConstantSDNode>(V->getOperand(2));
10751 if (!IdxN)
10752 return V;
10754 int SubSize = SubV.getValueType().getVectorNumElements();
10755 int Idx = IdxN->getZExtValue();
10756 bool SubVectorUsed = false;
10757 SmallBitVector SubUsedElements(SubSize, false);
10758 for (int i = 0; i < SubSize; ++i)
10759 if (UsedElements[i + Idx]) {
10760 SubVectorUsed = true;
10761 SubUsedElements[i] = true;
10762 UsedElements[i + Idx] = false;
10763 }
10765 // Now recurse on both the base and sub vectors.
10766 SDValue SimplifiedSubV =
10767 SubVectorUsed
10768 ? simplifyShuffleOperandRecursively(SubUsedElements, SubV, DAG)
10769 : DAG.getUNDEF(SubV.getValueType());
10770 SDValue SimplifiedBaseV = simplifyShuffleOperandRecursively(UsedElements, BaseV, DAG);
10771 if (SimplifiedSubV != SubV || SimplifiedBaseV != BaseV)
10772 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
10773 SimplifiedBaseV, SimplifiedSubV, V->getOperand(2));
10774 return V;
10775 }
10776 }
10777 }
10779 static SDValue simplifyShuffleOperands(ShuffleVectorSDNode *SVN, SDValue N0,
10780 SDValue N1, SelectionDAG &DAG) {
10781 EVT VT = SVN->getValueType(0);
10782 int NumElts = VT.getVectorNumElements();
10783 SmallBitVector N0UsedElements(NumElts, false), N1UsedElements(NumElts, false);
10784 for (int M : SVN->getMask())
10785 if (M >= 0 && M < NumElts)
10786 N0UsedElements[M] = true;
10787 else if (M >= NumElts)
10788 N1UsedElements[M - NumElts] = true;
10790 SDValue S0 = simplifyShuffleOperandRecursively(N0UsedElements, N0, DAG);
10791 SDValue S1 = simplifyShuffleOperandRecursively(N1UsedElements, N1, DAG);
10792 if (S0 == N0 && S1 == N1)
10793 return SDValue();
10795 return DAG.getVectorShuffle(VT, SDLoc(SVN), S0, S1, SVN->getMask());
10796 }
10798 // Tries to turn a shuffle of two CONCAT_VECTORS into a single concat.
10799 static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
10800 EVT VT = N->getValueType(0);
10801 unsigned NumElts = VT.getVectorNumElements();
10803 SDValue N0 = N->getOperand(0);
10804 SDValue N1 = N->getOperand(1);
10805 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
10807 SmallVector<SDValue, 4> Ops;
10808 EVT ConcatVT = N0.getOperand(0).getValueType();
10809 unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements();
10810 unsigned NumConcats = NumElts / NumElemsPerConcat;
10812 // Look at every vector that's inserted. We're looking for exact
10813 // subvector-sized copies from a concatenated vector
10814 for (unsigned I = 0; I != NumConcats; ++I) {
10815 // Make sure we're dealing with a copy.
10816 unsigned Begin = I * NumElemsPerConcat;
10817 bool AllUndef = true, NoUndef = true;
10818 for (unsigned J = Begin; J != Begin + NumElemsPerConcat; ++J) {
10819 if (SVN->getMaskElt(J) >= 0)
10820 AllUndef = false;
10821 else
10822 NoUndef = false;
10823 }
10825 if (NoUndef) {
10826 if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0)
10827 return SDValue();
10829 for (unsigned J = 1; J != NumElemsPerConcat; ++J)
10830 if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J))
10831 return SDValue();
10833 unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat;
10834 if (FirstElt < N0.getNumOperands())
10835 Ops.push_back(N0.getOperand(FirstElt));
10836 else
10837 Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands()));
10839 } else if (AllUndef) {
10840 Ops.push_back(DAG.getUNDEF(N0.getOperand(0).getValueType()));
10841 } else { // Mixed with general masks and undefs, can't do optimization.
10842 return SDValue();
10843 }
10844 }
10846 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
10847 }
10849 SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
10850 EVT VT = N->getValueType(0);
10851 unsigned NumElts = VT.getVectorNumElements();
10853 SDValue N0 = N->getOperand(0);
10854 SDValue N1 = N->getOperand(1);
10856 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG");
10858 // Canonicalize shuffle undef, undef -> undef
10859 if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
10860 return DAG.getUNDEF(VT);
10862 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
10864 // Canonicalize shuffle v, v -> v, undef
10865 if (N0 == N1) {
10866 SmallVector<int, 8> NewMask;
10867 for (unsigned i = 0; i != NumElts; ++i) {
10868 int Idx = SVN->getMaskElt(i);
10869 if (Idx >= (int)NumElts) Idx -= NumElts;
10870 NewMask.push_back(Idx);
10871 }
10872 return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT),
10873 &NewMask[0]);
10874 }
10876 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
10877 if (N0.getOpcode() == ISD::UNDEF) {
10878 SmallVector<int, 8> NewMask;
10879 for (unsigned i = 0; i != NumElts; ++i) {
10880 int Idx = SVN->getMaskElt(i);
10881 if (Idx >= 0) {
10882 if (Idx >= (int)NumElts)
10883 Idx -= NumElts;
10884 else
10885 Idx = -1; // remove reference to lhs
10886 }
10887 NewMask.push_back(Idx);
10888 }
10889 return DAG.getVectorShuffle(VT, SDLoc(N), N1, DAG.getUNDEF(VT),
10890 &NewMask[0]);
10891 }
10893 // Remove references to rhs if it is undef
10894 if (N1.getOpcode() == ISD::UNDEF) {
10895 bool Changed = false;
10896 SmallVector<int, 8> NewMask;
10897 for (unsigned i = 0; i != NumElts; ++i) {
10898 int Idx = SVN->getMaskElt(i);
10899 if (Idx >= (int)NumElts) {
10900 Idx = -1;
10901 Changed = true;
10902 }
10903 NewMask.push_back(Idx);
10904 }
10905 if (Changed)
10906 return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, &NewMask[0]);
10907 }
10909 // If it is a splat, check if the argument vector is another splat or a
10910 // build_vector with all scalar elements the same.
10911 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
10912 SDNode *V = N0.getNode();
10914 // If this is a bit convert that changes the element type of the vector but
10915 // not the number of vector elements, look through it. Be careful not to
10916 // look though conversions that change things like v4f32 to v2f64.
10917 if (V->getOpcode() == ISD::BITCAST) {
10918 SDValue ConvInput = V->getOperand(0);
10919 if (ConvInput.getValueType().isVector() &&
10920 ConvInput.getValueType().getVectorNumElements() == NumElts)
10921 V = ConvInput.getNode();
10922 }
10924 if (V->getOpcode() == ISD::BUILD_VECTOR) {
10925 assert(V->getNumOperands() == NumElts &&
10926 "BUILD_VECTOR has wrong number of operands");
10927 SDValue Base;
10928 bool AllSame = true;
10929 for (unsigned i = 0; i != NumElts; ++i) {
10930 if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
10931 Base = V->getOperand(i);
10932 break;
10933 }
10934 }
10935 // Splat of <u, u, u, u>, return <u, u, u, u>
10936 if (!Base.getNode())
10937 return N0;
10938 for (unsigned i = 0; i != NumElts; ++i) {
10939 if (V->getOperand(i) != Base) {
10940 AllSame = false;
10941 break;
10942 }
10943 }
10944 // Splat of <x, x, x, x>, return <x, x, x, x>
10945 if (AllSame)
10946 return N0;
10947 }
10948 }
10950 // There are various patterns used to build up a vector from smaller vectors,
10951 // subvectors, or elements. Scan chains of these and replace unused insertions
10952 // or components with undef.
10953 if (SDValue S = simplifyShuffleOperands(SVN, N0, N1, DAG))
10954 return S;
10956 if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
10957 Level < AfterLegalizeVectorOps &&
10958 (N1.getOpcode() == ISD::UNDEF ||
10959 (N1.getOpcode() == ISD::CONCAT_VECTORS &&
10960 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
10961 SDValue V = partitionShuffleOfConcats(N, DAG);
10963 if (V.getNode())
10964 return V;
10965 }
10967 // If this shuffle node is simply a swizzle of another shuffle node,
10968 // then try to simplify it.
10969 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
10970 N1.getOpcode() == ISD::UNDEF) {
10972 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
10974 // The incoming shuffle must be of the same type as the result of the
10975 // current shuffle.
10976 assert(OtherSV->getOperand(0).getValueType() == VT &&
10977 "Shuffle types don't match");
10979 SmallVector<int, 4> Mask;
10980 // Compute the combined shuffle mask.
10981 for (unsigned i = 0; i != NumElts; ++i) {
10982 int Idx = SVN->getMaskElt(i);
10983 assert(Idx < (int)NumElts && "Index references undef operand");
10984 // Next, this index comes from the first value, which is the incoming
10985 // shuffle. Adopt the incoming index.
10986 if (Idx >= 0)
10987 Idx = OtherSV->getMaskElt(Idx);
10988 Mask.push_back(Idx);
10989 }
10991 // Check if all indices in Mask are Undef. In case, propagate Undef.
10992 bool isUndefMask = true;
10993 for (unsigned i = 0; i != NumElts && isUndefMask; ++i)
10994 isUndefMask &= Mask[i] < 0;
10996 if (isUndefMask)
10997 return DAG.getUNDEF(VT);
10999 bool CommuteOperands = false;
11000 if (N0.getOperand(1).getOpcode() != ISD::UNDEF) {
11001 // To be valid, the combine shuffle mask should only reference elements
11002 // from one of the two vectors in input to the inner shufflevector.
11003 bool IsValidMask = true;
11004 for (unsigned i = 0; i != NumElts && IsValidMask; ++i)
11005 // See if the combined mask only reference undefs or elements coming
11006 // from the first shufflevector operand.
11007 IsValidMask = Mask[i] < 0 || (unsigned)Mask[i] < NumElts;
11009 if (!IsValidMask) {
11010 IsValidMask = true;
11011 for (unsigned i = 0; i != NumElts && IsValidMask; ++i)
11012 // Check that all the elements come from the second shuffle operand.
11013 IsValidMask = Mask[i] < 0 || (unsigned)Mask[i] >= NumElts;
11014 CommuteOperands = IsValidMask;
11015 }
11017 // Early exit if the combined shuffle mask is not valid.
11018 if (!IsValidMask)
11019 return SDValue();
11020 }
11022 // See if this pair of shuffles can be safely folded according to either
11023 // of the following rules:
11024 // shuffle(shuffle(x, y), undef) -> x
11025 // shuffle(shuffle(x, undef), undef) -> x
11026 // shuffle(shuffle(x, y), undef) -> y
11027 bool IsIdentityMask = true;
11028 unsigned BaseMaskIndex = CommuteOperands ? NumElts : 0;
11029 for (unsigned i = 0; i != NumElts && IsIdentityMask; ++i) {
11030 // Skip Undefs.
11031 if (Mask[i] < 0)
11032 continue;
11034 // The combined shuffle must map each index to itself.
11035 IsIdentityMask = (unsigned)Mask[i] == i + BaseMaskIndex;
11036 }
11038 if (IsIdentityMask) {
11039 if (CommuteOperands)
11040 // optimize shuffle(shuffle(x, y), undef) -> y.
11041 return OtherSV->getOperand(1);
11043 // optimize shuffle(shuffle(x, undef), undef) -> x
11044 // optimize shuffle(shuffle(x, y), undef) -> x
11045 return OtherSV->getOperand(0);
11046 }
11048 // It may still be beneficial to combine the two shuffles if the
11049 // resulting shuffle is legal.
11050 if (TLI.isTypeLegal(VT)) {
11051 if (!CommuteOperands) {
11052 if (TLI.isShuffleMaskLegal(Mask, VT))
11053 // shuffle(shuffle(x, undef, M1), undef, M2) -> shuffle(x, undef, M3).
11054 // shuffle(shuffle(x, y, M1), undef, M2) -> shuffle(x, undef, M3)
11055 return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0), N1,
11056 &Mask[0]);
11057 } else {
11058 // Compute the commuted shuffle mask.
11059 for (unsigned i = 0; i != NumElts; ++i) {
11060 int idx = Mask[i];
11061 if (idx < 0)
11062 continue;
11063 else if (idx < (int)NumElts)
11064 Mask[i] = idx + NumElts;
11065 else
11066 Mask[i] = idx - NumElts;
11067 }
11069 if (TLI.isShuffleMaskLegal(Mask, VT))
11070 // shuffle(shuffle(x, y, M1), undef, M2) -> shuffle(y, undef, M3)
11071 return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(1), N1,
11072 &Mask[0]);
11073 }
11074 }
11075 }
11077 // Canonicalize shuffles according to rules:
11078 // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
11079 // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
11080 // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
11081 if (N1.getOpcode() == ISD::VECTOR_SHUFFLE && N0.getOpcode() != ISD::UNDEF &&
11082 N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
11083 TLI.isTypeLegal(VT)) {
11084 // The incoming shuffle must be of the same type as the result of the
11085 // current shuffle.
11086 assert(N1->getOperand(0).getValueType() == VT &&
11087 "Shuffle types don't match");
11089 SDValue SV0 = N1->getOperand(0);
11090 SDValue SV1 = N1->getOperand(1);
11091 bool HasSameOp0 = N0 == SV0;
11092 bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF;
11093 if (HasSameOp0 || IsSV1Undef || N0 == SV1)
11094 // Commute the operands of this shuffle so that next rule
11095 // will trigger.
11096 return DAG.getCommutedVectorShuffle(*SVN);
11097 }
11099 // Try to fold according to rules:
11100 // shuffle(shuffle(A, B, M0), B, M1) -> shuffle(A, B, M2)
11101 // shuffle(shuffle(A, B, M0), A, M1) -> shuffle(A, B, M2)
11102 // shuffle(shuffle(A, Undef, M0), B, M1) -> shuffle(A, B, M2)
11103 // shuffle(shuffle(A, Undef, M0), A, M1) -> shuffle(A, Undef, M2)
11104 // Don't try to fold shuffles with illegal type.
11105 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
11106 N1.getOpcode() != ISD::UNDEF && TLI.isTypeLegal(VT)) {
11107 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
11109 // The incoming shuffle must be of the same type as the result of the
11110 // current shuffle.
11111 assert(OtherSV->getOperand(0).getValueType() == VT &&
11112 "Shuffle types don't match");
11114 SDValue SV0 = OtherSV->getOperand(0);
11115 SDValue SV1 = OtherSV->getOperand(1);
11116 bool HasSameOp0 = N1 == SV0;
11117 bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF;
11118 if (!HasSameOp0 && !IsSV1Undef && N1 != SV1)
11119 // Early exit.
11120 return SDValue();
11122 SmallVector<int, 4> Mask;
11123 // Compute the combined shuffle mask for a shuffle with SV0 as the first
11124 // operand, and SV1 as the second operand.
11125 for (unsigned i = 0; i != NumElts; ++i) {
11126 int Idx = SVN->getMaskElt(i);
11127 if (Idx < 0) {
11128 // Propagate Undef.
11129 Mask.push_back(Idx);
11130 continue;
11131 }
11133 if (Idx < (int)NumElts) {
11134 Idx = OtherSV->getMaskElt(Idx);
11135 if (IsSV1Undef && Idx >= (int) NumElts)
11136 Idx = -1; // Propagate Undef.
11137 } else
11138 Idx = HasSameOp0 ? Idx - NumElts : Idx;
11140 Mask.push_back(Idx);
11141 }
11143 // Check if all indices in Mask are Undef. In case, propagate Undef.
11144 bool isUndefMask = true;
11145 for (unsigned i = 0; i != NumElts && isUndefMask; ++i)
11146 isUndefMask &= Mask[i] < 0;
11148 if (isUndefMask)
11149 return DAG.getUNDEF(VT);
11151 // Avoid introducing shuffles with illegal mask.
11152 if (TLI.isShuffleMaskLegal(Mask, VT)) {
11153 if (IsSV1Undef)
11154 // shuffle(shuffle(A, Undef, M0), B, M1) -> shuffle(A, B, M2)
11155 // shuffle(shuffle(A, Undef, M0), A, M1) -> shuffle(A, Undef, M2)
11156 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, N1, &Mask[0]);
11157 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, &Mask[0]);
11158 }
11159 }
11161 return SDValue();
11162 }
11164 SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
11165 SDValue N0 = N->getOperand(0);
11166 SDValue N2 = N->getOperand(2);
11168 // If the input vector is a concatenation, and the insert replaces
11169 // one of the halves, we can optimize into a single concat_vectors.
11170 if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
11171 N0->getNumOperands() == 2 && N2.getOpcode() == ISD::Constant) {
11172 APInt InsIdx = cast<ConstantSDNode>(N2)->getAPIntValue();
11173 EVT VT = N->getValueType(0);
11175 // Lower half: fold (insert_subvector (concat_vectors X, Y), Z) ->
11176 // (concat_vectors Z, Y)
11177 if (InsIdx == 0)
11178 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
11179 N->getOperand(1), N0.getOperand(1));
11181 // Upper half: fold (insert_subvector (concat_vectors X, Y), Z) ->
11182 // (concat_vectors X, Z)
11183 if (InsIdx == VT.getVectorNumElements()/2)
11184 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
11185 N0.getOperand(0), N->getOperand(1));
11186 }
11188 return SDValue();
11189 }
11191 /// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle
11192 /// with the destination vector and a zero vector.
11193 /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
11194 /// vector_shuffle V, Zero, <0, 4, 2, 4>
11195 SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
11196 EVT VT = N->getValueType(0);
11197 SDLoc dl(N);
11198 SDValue LHS = N->getOperand(0);
11199 SDValue RHS = N->getOperand(1);
11200 if (N->getOpcode() == ISD::AND) {
11201 if (RHS.getOpcode() == ISD::BITCAST)
11202 RHS = RHS.getOperand(0);
11203 if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
11204 SmallVector<int, 8> Indices;
11205 unsigned NumElts = RHS.getNumOperands();
11206 for (unsigned i = 0; i != NumElts; ++i) {
11207 SDValue Elt = RHS.getOperand(i);
11208 if (!isa<ConstantSDNode>(Elt))
11209 return SDValue();
11211 if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
11212 Indices.push_back(i);
11213 else if (cast<ConstantSDNode>(Elt)->isNullValue())
11214 Indices.push_back(NumElts);
11215 else
11216 return SDValue();
11217 }
11219 // Let's see if the target supports this vector_shuffle.
11220 EVT RVT = RHS.getValueType();
11221 if (!TLI.isVectorClearMaskLegal(Indices, RVT))
11222 return SDValue();
11224 // Return the new VECTOR_SHUFFLE node.
11225 EVT EltVT = RVT.getVectorElementType();
11226 SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(),
11227 DAG.getConstant(0, EltVT));
11228 SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), RVT, ZeroOps);
11229 LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
11230 SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
11231 return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
11232 }
11233 }
11235 return SDValue();
11236 }
11238 /// Visit a binary vector operation, like ADD.
11239 SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
11240 assert(N->getValueType(0).isVector() &&
11241 "SimplifyVBinOp only works on vectors!");
11243 SDValue LHS = N->getOperand(0);
11244 SDValue RHS = N->getOperand(1);
11245 SDValue Shuffle = XformToShuffleWithZero(N);
11246 if (Shuffle.getNode()) return Shuffle;
11248 // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold
11249 // this operation.
11250 if (LHS.getOpcode() == ISD::BUILD_VECTOR &&
11251 RHS.getOpcode() == ISD::BUILD_VECTOR) {
11252 // Check if both vectors are constants. If not bail out.
11253 if (!(cast<BuildVectorSDNode>(LHS)->isConstant() &&
11254 cast<BuildVectorSDNode>(RHS)->isConstant()))
11255 return SDValue();
11257 SmallVector<SDValue, 8> Ops;
11258 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
11259 SDValue LHSOp = LHS.getOperand(i);
11260 SDValue RHSOp = RHS.getOperand(i);
11262 // Can't fold divide by zero.
11263 if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV ||
11264 N->getOpcode() == ISD::FDIV) {
11265 if ((RHSOp.getOpcode() == ISD::Constant &&
11266 cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) ||
11267 (RHSOp.getOpcode() == ISD::ConstantFP &&
11268 cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero()))
11269 break;
11270 }
11272 EVT VT = LHSOp.getValueType();
11273 EVT RVT = RHSOp.getValueType();
11274 if (RVT != VT) {
11275 // Integer BUILD_VECTOR operands may have types larger than the element
11276 // size (e.g., when the element type is not legal). Prior to type
11277 // legalization, the types may not match between the two BUILD_VECTORS.
11278 // Truncate one of the operands to make them match.
11279 if (RVT.getSizeInBits() > VT.getSizeInBits()) {
11280 RHSOp = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, RHSOp);
11281 } else {
11282 LHSOp = DAG.getNode(ISD::TRUNCATE, SDLoc(N), RVT, LHSOp);
11283 VT = RVT;
11284 }
11285 }
11286 SDValue FoldOp = DAG.getNode(N->getOpcode(), SDLoc(LHS), VT,
11287 LHSOp, RHSOp);
11288 if (FoldOp.getOpcode() != ISD::UNDEF &&
11289 FoldOp.getOpcode() != ISD::Constant &&
11290 FoldOp.getOpcode() != ISD::ConstantFP)
11291 break;
11292 Ops.push_back(FoldOp);
11293 AddToWorklist(FoldOp.getNode());
11294 }
11296 if (Ops.size() == LHS.getNumOperands())
11297 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), LHS.getValueType(), Ops);
11298 }
11300 // Type legalization might introduce new shuffles in the DAG.
11301 // Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask)))
11302 // -> (shuffle (VBinOp (A, B)), Undef, Mask).
11303 if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) &&
11304 isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() &&
11305 LHS.getOperand(1).getOpcode() == ISD::UNDEF &&
11306 RHS.getOperand(1).getOpcode() == ISD::UNDEF) {
11307 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS);
11308 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS);
11310 if (SVN0->getMask().equals(SVN1->getMask())) {
11311 EVT VT = N->getValueType(0);
11312 SDValue UndefVector = LHS.getOperand(1);
11313 SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
11314 LHS.getOperand(0), RHS.getOperand(0));
11315 AddUsersToWorklist(N);
11316 return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector,
11317 &SVN0->getMask()[0]);
11318 }
11319 }
11321 return SDValue();
11322 }
11324 /// Visit a binary vector operation, like FABS/FNEG.
11325 SDValue DAGCombiner::SimplifyVUnaryOp(SDNode *N) {
11326 assert(N->getValueType(0).isVector() &&
11327 "SimplifyVUnaryOp only works on vectors!");
11329 SDValue N0 = N->getOperand(0);
11331 if (N0.getOpcode() != ISD::BUILD_VECTOR)
11332 return SDValue();
11334 // Operand is a BUILD_VECTOR node, see if we can constant fold it.
11335 SmallVector<SDValue, 8> Ops;
11336 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
11337 SDValue Op = N0.getOperand(i);
11338 if (Op.getOpcode() != ISD::UNDEF &&
11339 Op.getOpcode() != ISD::ConstantFP)
11340 break;
11341 EVT EltVT = Op.getValueType();
11342 SDValue FoldOp = DAG.getNode(N->getOpcode(), SDLoc(N0), EltVT, Op);
11343 if (FoldOp.getOpcode() != ISD::UNDEF &&
11344 FoldOp.getOpcode() != ISD::ConstantFP)
11345 break;
11346 Ops.push_back(FoldOp);
11347 AddToWorklist(FoldOp.getNode());
11348 }
11350 if (Ops.size() != N0.getNumOperands())
11351 return SDValue();
11353 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N0.getValueType(), Ops);
11354 }
11356 SDValue DAGCombiner::SimplifySelect(SDLoc DL, SDValue N0,
11357 SDValue N1, SDValue N2){
11358 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
11360 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
11361 cast<CondCodeSDNode>(N0.getOperand(2))->get());
11363 // If we got a simplified select_cc node back from SimplifySelectCC, then
11364 // break it down into a new SETCC node, and a new SELECT node, and then return
11365 // the SELECT node, since we were called with a SELECT node.
11366 if (SCC.getNode()) {
11367 // Check to see if we got a select_cc back (to turn into setcc/select).
11368 // Otherwise, just return whatever node we got back, like fabs.
11369 if (SCC.getOpcode() == ISD::SELECT_CC) {
11370 SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0),
11371 N0.getValueType(),
11372 SCC.getOperand(0), SCC.getOperand(1),
11373 SCC.getOperand(4));
11374 AddToWorklist(SETCC.getNode());
11375 return DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC,
11376 SCC.getOperand(2), SCC.getOperand(3));
11377 }
11379 return SCC;
11380 }
11381 return SDValue();
11382 }
11384 /// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values
11385 /// being selected between, see if we can simplify the select. Callers of this
11386 /// should assume that TheSelect is deleted if this returns true. As such, they
11387 /// should return the appropriate thing (e.g. the node) back to the top-level of
11388 /// the DAG combiner loop to avoid it being looked at.
11389 bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
11390 SDValue RHS) {
11392 // Cannot simplify select with vector condition
11393 if (TheSelect->getOperand(0).getValueType().isVector()) return false;
11395 // If this is a select from two identical things, try to pull the operation
11396 // through the select.
11397 if (LHS.getOpcode() != RHS.getOpcode() ||
11398 !LHS.hasOneUse() || !RHS.hasOneUse())
11399 return false;
11401 // If this is a load and the token chain is identical, replace the select
11402 // of two loads with a load through a select of the address to load from.
11403 // This triggers in things like "select bool X, 10.0, 123.0" after the FP
11404 // constants have been dropped into the constant pool.
11405 if (LHS.getOpcode() == ISD::LOAD) {
11406 LoadSDNode *LLD = cast<LoadSDNode>(LHS);
11407 LoadSDNode *RLD = cast<LoadSDNode>(RHS);
11409 // Token chains must be identical.
11410 if (LHS.getOperand(0) != RHS.getOperand(0) ||
11411 // Do not let this transformation reduce the number of volatile loads.
11412 LLD->isVolatile() || RLD->isVolatile() ||
11413 // If this is an EXTLOAD, the VT's must match.
11414 LLD->getMemoryVT() != RLD->getMemoryVT() ||
11415 // If this is an EXTLOAD, the kind of extension must match.
11416 (LLD->getExtensionType() != RLD->getExtensionType() &&
11417 // The only exception is if one of the extensions is anyext.
11418 LLD->getExtensionType() != ISD::EXTLOAD &&
11419 RLD->getExtensionType() != ISD::EXTLOAD) ||
11420 // FIXME: this discards src value information. This is
11421 // over-conservative. It would be beneficial to be able to remember
11422 // both potential memory locations. Since we are discarding
11423 // src value info, don't do the transformation if the memory
11424 // locations are not in the default address space.
11425 LLD->getPointerInfo().getAddrSpace() != 0 ||
11426 RLD->getPointerInfo().getAddrSpace() != 0 ||
11427 !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(),
11428 LLD->getBasePtr().getValueType()))
11429 return false;
11431 // Check that the select condition doesn't reach either load. If so,
11432 // folding this will induce a cycle into the DAG. If not, this is safe to
11433 // xform, so create a select of the addresses.
11434 SDValue Addr;
11435 if (TheSelect->getOpcode() == ISD::SELECT) {
11436 SDNode *CondNode = TheSelect->getOperand(0).getNode();
11437 if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) ||
11438 (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode)))
11439 return false;
11440 // The loads must not depend on one another.
11441 if (LLD->isPredecessorOf(RLD) ||
11442 RLD->isPredecessorOf(LLD))
11443 return false;
11444 Addr = DAG.getSelect(SDLoc(TheSelect),
11445 LLD->getBasePtr().getValueType(),
11446 TheSelect->getOperand(0), LLD->getBasePtr(),
11447 RLD->getBasePtr());
11448 } else { // Otherwise SELECT_CC
11449 SDNode *CondLHS = TheSelect->getOperand(0).getNode();
11450 SDNode *CondRHS = TheSelect->getOperand(1).getNode();
11452 if ((LLD->hasAnyUseOfValue(1) &&
11453 (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) ||
11454 (RLD->hasAnyUseOfValue(1) &&
11455 (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS))))
11456 return false;
11458 Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect),
11459 LLD->getBasePtr().getValueType(),
11460 TheSelect->getOperand(0),
11461 TheSelect->getOperand(1),
11462 LLD->getBasePtr(), RLD->getBasePtr(),
11463 TheSelect->getOperand(4));
11464 }
11466 SDValue Load;
11467 // It is safe to replace the two loads if they have different alignments,
11468 // but the new load must be the minimum (most restrictive) alignment of the
11469 // inputs.
11470 bool isInvariant = LLD->getAlignment() & RLD->getAlignment();
11471 unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment());
11472 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
11473 Load = DAG.getLoad(TheSelect->getValueType(0),
11474 SDLoc(TheSelect),
11475 // FIXME: Discards pointer and AA info.
11476 LLD->getChain(), Addr, MachinePointerInfo(),
11477 LLD->isVolatile(), LLD->isNonTemporal(),
11478 isInvariant, Alignment);
11479 } else {
11480 Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ?
11481 RLD->getExtensionType() : LLD->getExtensionType(),
11482 SDLoc(TheSelect),
11483 TheSelect->getValueType(0),
11484 // FIXME: Discards pointer and AA info.
11485 LLD->getChain(), Addr, MachinePointerInfo(),
11486 LLD->getMemoryVT(), LLD->isVolatile(),
11487 LLD->isNonTemporal(), isInvariant, Alignment);
11488 }
11490 // Users of the select now use the result of the load.
11491 CombineTo(TheSelect, Load);
11493 // Users of the old loads now use the new load's chain. We know the
11494 // old-load value is dead now.
11495 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
11496 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
11497 return true;
11498 }
11500 return false;
11501 }
11503 /// Simplify an expression of the form (N0 cond N1) ? N2 : N3
11504 /// where 'cond' is the comparison specified by CC.
11505 SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
11506 SDValue N2, SDValue N3,
11507 ISD::CondCode CC, bool NotExtCompare) {
11508 // (x ? y : y) -> y.
11509 if (N2 == N3) return N2;
11511 EVT VT = N2.getValueType();
11512 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
11513 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
11514 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
11516 // Determine if the condition we're dealing with is constant
11517 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
11518 N0, N1, CC, DL, false);
11519 if (SCC.getNode()) AddToWorklist(SCC.getNode());
11520 ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode());
11522 // fold select_cc true, x, y -> x
11523 if (SCCC && !SCCC->isNullValue())
11524 return N2;
11525 // fold select_cc false, x, y -> y
11526 if (SCCC && SCCC->isNullValue())
11527 return N3;
11529 // Check to see if we can simplify the select into an fabs node
11530 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) {
11531 // Allow either -0.0 or 0.0
11532 if (CFP->getValueAPF().isZero()) {
11533 // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs
11534 if ((CC == ISD::SETGE || CC == ISD::SETGT) &&
11535 N0 == N2 && N3.getOpcode() == ISD::FNEG &&
11536 N2 == N3.getOperand(0))
11537 return DAG.getNode(ISD::FABS, DL, VT, N0);
11539 // select (setl[te] X, +/-0.0), fneg(X), X -> fabs
11540 if ((CC == ISD::SETLT || CC == ISD::SETLE) &&
11541 N0 == N3 && N2.getOpcode() == ISD::FNEG &&
11542 N2.getOperand(0) == N3)
11543 return DAG.getNode(ISD::FABS, DL, VT, N3);
11544 }
11545 }
11547 // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
11548 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
11549 // in it. This is a win when the constant is not otherwise available because
11550 // it replaces two constant pool loads with one. We only do this if the FP
11551 // type is known to be legal, because if it isn't, then we are before legalize
11552 // types an we want the other legalization to happen first (e.g. to avoid
11553 // messing with soft float) and if the ConstantFP is not legal, because if
11554 // it is legal, we may not need to store the FP constant in a constant pool.
11555 if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2))
11556 if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) {
11557 if (TLI.isTypeLegal(N2.getValueType()) &&
11558 (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) !=
11559 TargetLowering::Legal &&
11560 !TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) &&
11561 !TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) &&
11562 // If both constants have multiple uses, then we won't need to do an
11563 // extra load, they are likely around in registers for other users.
11564 (TV->hasOneUse() || FV->hasOneUse())) {
11565 Constant *Elts[] = {
11566 const_cast<ConstantFP*>(FV->getConstantFPValue()),
11567 const_cast<ConstantFP*>(TV->getConstantFPValue())
11568 };
11569 Type *FPTy = Elts[0]->getType();
11570 const DataLayout &TD = *TLI.getDataLayout();
11572 // Create a ConstantArray of the two constants.
11573 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
11574 SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
11575 TD.getPrefTypeAlignment(FPTy));
11576 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
11578 // Get the offsets to the 0 and 1 element of the array so that we can
11579 // select between them.
11580 SDValue Zero = DAG.getIntPtrConstant(0);
11581 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
11582 SDValue One = DAG.getIntPtrConstant(EltSize);
11584 SDValue Cond = DAG.getSetCC(DL,
11585 getSetCCResultType(N0.getValueType()),
11586 N0, N1, CC);
11587 AddToWorklist(Cond.getNode());
11588 SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(),
11589 Cond, One, Zero);
11590 AddToWorklist(CstOffset.getNode());
11591 CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx,
11592 CstOffset);
11593 AddToWorklist(CPIdx.getNode());
11594 return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
11595 MachinePointerInfo::getConstantPool(), false,
11596 false, false, Alignment);
11598 }
11599 }
11601 // Check to see if we can perform the "gzip trick", transforming
11602 // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
11603 if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
11604 (N1C->isNullValue() || // (a < 0) ? b : 0
11605 (N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0
11606 EVT XType = N0.getValueType();
11607 EVT AType = N2.getValueType();
11608 if (XType.bitsGE(AType)) {
11609 // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
11610 // single-bit constant.
11611 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) {
11612 unsigned ShCtV = N2C->getAPIntValue().logBase2();
11613 ShCtV = XType.getSizeInBits()-ShCtV-1;
11614 SDValue ShCt = DAG.getConstant(ShCtV,
11615 getShiftAmountTy(N0.getValueType()));
11616 SDValue Shift = DAG.getNode(ISD::SRL, SDLoc(N0),
11617 XType, N0, ShCt);
11618 AddToWorklist(Shift.getNode());
11620 if (XType.bitsGT(AType)) {
11621 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
11622 AddToWorklist(Shift.getNode());
11623 }
11625 return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
11626 }
11628 SDValue Shift = DAG.getNode(ISD::SRA, SDLoc(N0),
11629 XType, N0,
11630 DAG.getConstant(XType.getSizeInBits()-1,
11631 getShiftAmountTy(N0.getValueType())));
11632 AddToWorklist(Shift.getNode());
11634 if (XType.bitsGT(AType)) {
11635 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
11636 AddToWorklist(Shift.getNode());
11637 }
11639 return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
11640 }
11641 }
11643 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
11644 // where y is has a single bit set.
11645 // A plaintext description would be, we can turn the SELECT_CC into an AND
11646 // when the condition can be materialized as an all-ones register. Any
11647 // single bit-test can be materialized as an all-ones register with
11648 // shift-left and shift-right-arith.
11649 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
11650 N0->getValueType(0) == VT &&
11651 N1C && N1C->isNullValue() &&
11652 N2C && N2C->isNullValue()) {
11653 SDValue AndLHS = N0->getOperand(0);
11654 ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
11655 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
11656 // Shift the tested bit over the sign bit.
11657 APInt AndMask = ConstAndRHS->getAPIntValue();
11658 SDValue ShlAmt =
11659 DAG.getConstant(AndMask.countLeadingZeros(),
11660 getShiftAmountTy(AndLHS.getValueType()));
11661 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt);
11663 // Now arithmetic right shift it all the way over, so the result is either
11664 // all-ones, or zero.
11665 SDValue ShrAmt =
11666 DAG.getConstant(AndMask.getBitWidth()-1,
11667 getShiftAmountTy(Shl.getValueType()));
11668 SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt);
11670 return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
11671 }
11672 }
11674 // fold select C, 16, 0 -> shl C, 4
11675 if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
11676 TLI.getBooleanContents(N0.getValueType()) ==
11677 TargetLowering::ZeroOrOneBooleanContent) {
11679 // If the caller doesn't want us to simplify this into a zext of a compare,
11680 // don't do it.
11681 if (NotExtCompare && N2C->getAPIntValue() == 1)
11682 return SDValue();
11684 // Get a SetCC of the condition
11685 // NOTE: Don't create a SETCC if it's not legal on this target.
11686 if (!LegalOperations ||
11687 TLI.isOperationLegal(ISD::SETCC,
11688 LegalTypes ? getSetCCResultType(N0.getValueType()) : MVT::i1)) {
11689 SDValue Temp, SCC;
11690 // cast from setcc result type to select result type
11691 if (LegalTypes) {
11692 SCC = DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()),
11693 N0, N1, CC);
11694 if (N2.getValueType().bitsLT(SCC.getValueType()))
11695 Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2),
11696 N2.getValueType());
11697 else
11698 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2),
11699 N2.getValueType(), SCC);
11700 } else {
11701 SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC);
11702 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2),
11703 N2.getValueType(), SCC);
11704 }
11706 AddToWorklist(SCC.getNode());
11707 AddToWorklist(Temp.getNode());
11709 if (N2C->getAPIntValue() == 1)
11710 return Temp;
11712 // shl setcc result by log2 n2c
11713 return DAG.getNode(
11714 ISD::SHL, DL, N2.getValueType(), Temp,
11715 DAG.getConstant(N2C->getAPIntValue().logBase2(),
11716 getShiftAmountTy(Temp.getValueType())));
11717 }
11718 }
11720 // Check to see if this is the equivalent of setcc
11721 // FIXME: Turn all of these into setcc if setcc if setcc is legal
11722 // otherwise, go ahead with the folds.
11723 if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) {
11724 EVT XType = N0.getValueType();
11725 if (!LegalOperations ||
11726 TLI.isOperationLegal(ISD::SETCC, getSetCCResultType(XType))) {
11727 SDValue Res = DAG.getSetCC(DL, getSetCCResultType(XType), N0, N1, CC);
11728 if (Res.getValueType() != VT)
11729 Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res);
11730 return Res;
11731 }
11733 // fold (seteq X, 0) -> (srl (ctlz X, log2(size(X))))
11734 if (N1C && N1C->isNullValue() && CC == ISD::SETEQ &&
11735 (!LegalOperations ||
11736 TLI.isOperationLegal(ISD::CTLZ, XType))) {
11737 SDValue Ctlz = DAG.getNode(ISD::CTLZ, SDLoc(N0), XType, N0);
11738 return DAG.getNode(ISD::SRL, DL, XType, Ctlz,
11739 DAG.getConstant(Log2_32(XType.getSizeInBits()),
11740 getShiftAmountTy(Ctlz.getValueType())));
11741 }
11742 // fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1))
11743 if (N1C && N1C->isNullValue() && CC == ISD::SETGT) {
11744 SDValue NegN0 = DAG.getNode(ISD::SUB, SDLoc(N0),
11745 XType, DAG.getConstant(0, XType), N0);
11746 SDValue NotN0 = DAG.getNOT(SDLoc(N0), N0, XType);
11747 return DAG.getNode(ISD::SRL, DL, XType,
11748 DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0),
11749 DAG.getConstant(XType.getSizeInBits()-1,
11750 getShiftAmountTy(XType)));
11751 }
11752 // fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1))
11753 if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
11754 SDValue Sign = DAG.getNode(ISD::SRL, SDLoc(N0), XType, N0,
11755 DAG.getConstant(XType.getSizeInBits()-1,
11756 getShiftAmountTy(N0.getValueType())));
11757 return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType));
11758 }
11759 }
11761 // Check to see if this is an integer abs.
11762 // select_cc setg[te] X, 0, X, -X ->
11763 // select_cc setgt X, -1, X, -X ->
11764 // select_cc setl[te] X, 0, -X, X ->
11765 // select_cc setlt X, 1, -X, X ->
11766 // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
11767 if (N1C) {
11768 ConstantSDNode *SubC = nullptr;
11769 if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
11770 (N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
11771 N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
11772 SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0));
11773 else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) ||
11774 (N1C->isOne() && CC == ISD::SETLT)) &&
11775 N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1))
11776 SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0));
11778 EVT XType = N0.getValueType();
11779 if (SubC && SubC->isNullValue() && XType.isInteger()) {
11780 SDValue Shift = DAG.getNode(ISD::SRA, SDLoc(N0), XType,
11781 N0,
11782 DAG.getConstant(XType.getSizeInBits()-1,
11783 getShiftAmountTy(N0.getValueType())));
11784 SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0),
11785 XType, N0, Shift);
11786 AddToWorklist(Shift.getNode());
11787 AddToWorklist(Add.getNode());
11788 return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
11789 }
11790 }
11792 return SDValue();
11793 }
11795 /// This is a stub for TargetLowering::SimplifySetCC.
11796 SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
11797 SDValue N1, ISD::CondCode Cond,
11798 SDLoc DL, bool foldBooleans) {
11799 TargetLowering::DAGCombinerInfo
11800 DagCombineInfo(DAG, Level, false, this);
11801 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
11802 }
11804 /// Given an ISD::SDIV node expressing a divide by constant, return
11805 /// a DAG expression to select that will generate the same value by multiplying
11806 /// by a magic number.
11807 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
11808 SDValue DAGCombiner::BuildSDIV(SDNode *N) {
11809 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
11810 if (!C)
11811 return SDValue();
11813 // Avoid division by zero.
11814 if (!C->getAPIntValue())
11815 return SDValue();
11817 std::vector<SDNode*> Built;
11818 SDValue S =
11819 TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built);
11821 for (SDNode *N : Built)
11822 AddToWorklist(N);
11823 return S;
11824 }
11826 /// Given an ISD::SDIV node expressing a divide by constant power of 2, return a
11827 /// DAG expression that will generate the same value by right shifting.
11828 SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) {
11829 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
11830 if (!C)
11831 return SDValue();
11833 // Avoid division by zero.
11834 if (!C->getAPIntValue())
11835 return SDValue();
11837 std::vector<SDNode *> Built;
11838 SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, &Built);
11840 for (SDNode *N : Built)
11841 AddToWorklist(N);
11842 return S;
11843 }
11845 /// Given an ISD::UDIV node expressing a divide by constant, return a DAG
11846 /// expression that will generate the same value by multiplying by a magic
11847 /// number.
11848 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
11849 SDValue DAGCombiner::BuildUDIV(SDNode *N) {
11850 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
11851 if (!C)
11852 return SDValue();
11854 // Avoid division by zero.
11855 if (!C->getAPIntValue())
11856 return SDValue();
11858 std::vector<SDNode*> Built;
11859 SDValue S =
11860 TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built);
11862 for (SDNode *N : Built)
11863 AddToWorklist(N);
11864 return S;
11865 }
11867 SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op) {
11868 if (Level >= AfterLegalizeDAG)
11869 return SDValue();
11871 // Expose the DAG combiner to the target combiner implementations.
11872 TargetLowering::DAGCombinerInfo DCI(DAG, Level, false, this);
11874 unsigned Iterations = 0;
11875 if (SDValue Est = TLI.getRecipEstimate(Op, DCI, Iterations)) {
11876 if (Iterations) {
11877 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
11878 // For the reciprocal, we need to find the zero of the function:
11879 // F(X) = A X - 1 [which has a zero at X = 1/A]
11880 // =>
11881 // X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
11882 // does not require additional intermediate precision]
11883 EVT VT = Op.getValueType();
11884 SDLoc DL(Op);
11885 SDValue FPOne = DAG.getConstantFP(1.0, VT);
11887 AddToWorklist(Est.getNode());
11889 // Newton iterations: Est = Est + Est (1 - Arg * Est)
11890 for (unsigned i = 0; i < Iterations; ++i) {
11891 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, Est);
11892 AddToWorklist(NewEst.getNode());
11894 NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPOne, NewEst);
11895 AddToWorklist(NewEst.getNode());
11897 NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst);
11898 AddToWorklist(NewEst.getNode());
11900 Est = DAG.getNode(ISD::FADD, DL, VT, Est, NewEst);
11901 AddToWorklist(Est.getNode());
11902 }
11903 }
11904 return Est;
11905 }
11907 return SDValue();
11908 }
11910 SDValue DAGCombiner::BuildRsqrtEstimate(SDValue Op) {
11911 if (Level >= AfterLegalizeDAG)
11912 return SDValue();
11914 // Expose the DAG combiner to the target combiner implementations.
11915 TargetLowering::DAGCombinerInfo DCI(DAG, Level, false, this);
11916 unsigned Iterations = 0;
11917 if (SDValue Est = TLI.getRsqrtEstimate(Op, DCI, Iterations)) {
11918 if (Iterations) {
11919 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
11920 // For the reciprocal sqrt, we need to find the zero of the function:
11921 // F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
11922 // =>
11923 // X_{i+1} = X_i (1.5 - A X_i^2 / 2)
11924 // As a result, we precompute A/2 prior to the iteration loop.
11925 EVT VT = Op.getValueType();
11926 SDLoc DL(Op);
11927 SDValue FPThreeHalves = DAG.getConstantFP(1.5, VT);
11929 AddToWorklist(Est.getNode());
11931 // We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that
11932 // this entire sequence requires only one FP constant.
11933 SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, FPThreeHalves, Op);
11934 AddToWorklist(HalfArg.getNode());
11936 HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Op);
11937 AddToWorklist(HalfArg.getNode());
11939 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est)
11940 for (unsigned i = 0; i < Iterations; ++i) {
11941 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est);
11942 AddToWorklist(NewEst.getNode());
11944 NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst);
11945 AddToWorklist(NewEst.getNode());
11947 NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPThreeHalves, NewEst);
11948 AddToWorklist(NewEst.getNode());
11950 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst);
11951 AddToWorklist(Est.getNode());
11952 }
11953 }
11954 return Est;
11955 }
11957 return SDValue();
11958 }
11960 /// Return true if base is a frame index, which is known not to alias with
11961 /// anything but itself. Provides base object and offset as results.
11962 static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
11963 const GlobalValue *&GV, const void *&CV) {
11964 // Assume it is a primitive operation.
11965 Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr;
11967 // If it's an adding a simple constant then integrate the offset.
11968 if (Base.getOpcode() == ISD::ADD) {
11969 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) {
11970 Base = Base.getOperand(0);
11971 Offset += C->getZExtValue();
11972 }
11973 }
11975 // Return the underlying GlobalValue, and update the Offset. Return false
11976 // for GlobalAddressSDNode since the same GlobalAddress may be represented
11977 // by multiple nodes with different offsets.
11978 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) {
11979 GV = G->getGlobal();
11980 Offset += G->getOffset();
11981 return false;
11982 }
11984 // Return the underlying Constant value, and update the Offset. Return false
11985 // for ConstantSDNodes since the same constant pool entry may be represented
11986 // by multiple nodes with different offsets.
11987 if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) {
11988 CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal()
11989 : (const void *)C->getConstVal();
11990 Offset += C->getOffset();
11991 return false;
11992 }
11993 // If it's any of the following then it can't alias with anything but itself.
11994 return isa<FrameIndexSDNode>(Base);
11995 }
11997 /// Return true if there is any possibility that the two addresses overlap.
11998 bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
11999 // If they are the same then they must be aliases.
12000 if (Op0->getBasePtr() == Op1->getBasePtr()) return true;
12002 // If they are both volatile then they cannot be reordered.
12003 if (Op0->isVolatile() && Op1->isVolatile()) return true;
12005 // Gather base node and offset information.
12006 SDValue Base1, Base2;
12007 int64_t Offset1, Offset2;
12008 const GlobalValue *GV1, *GV2;
12009 const void *CV1, *CV2;
12010 bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(),
12011 Base1, Offset1, GV1, CV1);
12012 bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(),
12013 Base2, Offset2, GV2, CV2);
12015 // If they have a same base address then check to see if they overlap.
12016 if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
12017 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
12018 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
12020 // It is possible for different frame indices to alias each other, mostly
12021 // when tail call optimization reuses return address slots for arguments.
12022 // To catch this case, look up the actual index of frame indices to compute
12023 // the real alias relationship.
12024 if (isFrameIndex1 && isFrameIndex2) {
12025 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
12026 Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex());
12027 Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex());
12028 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
12029 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
12030 }
12032 // Otherwise, if we know what the bases are, and they aren't identical, then
12033 // we know they cannot alias.
12034 if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
12035 return false;
12037 // If we know required SrcValue1 and SrcValue2 have relatively large alignment
12038 // compared to the size and offset of the access, we may be able to prove they
12039 // do not alias. This check is conservative for now to catch cases created by
12040 // splitting vector types.
12041 if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) &&
12042 (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) &&
12043 (Op0->getMemoryVT().getSizeInBits() >> 3 ==
12044 Op1->getMemoryVT().getSizeInBits() >> 3) &&
12045 (Op0->getOriginalAlignment() > Op0->getMemoryVT().getSizeInBits()) >> 3) {
12046 int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment();
12047 int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment();
12049 // There is no overlap between these relatively aligned accesses of similar
12050 // size, return no alias.
12051 if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 ||
12052 (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1)
12053 return false;
12054 }
12056 bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0 ? CombinerGlobalAA :
12057 TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA();
12058 #ifndef NDEBUG
12059 if (CombinerAAOnlyFunc.getNumOccurrences() &&
12060 CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
12061 UseAA = false;
12062 #endif
12063 if (UseAA &&
12064 Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) {
12065 // Use alias analysis information.
12066 int64_t MinOffset = std::min(Op0->getSrcValueOffset(),
12067 Op1->getSrcValueOffset());
12068 int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) +
12069 Op0->getSrcValueOffset() - MinOffset;
12070 int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) +
12071 Op1->getSrcValueOffset() - MinOffset;
12072 AliasAnalysis::AliasResult AAResult =
12073 AA.alias(AliasAnalysis::Location(Op0->getMemOperand()->getValue(),
12074 Overlap1,
12075 UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
12076 AliasAnalysis::Location(Op1->getMemOperand()->getValue(),
12077 Overlap2,
12078 UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
12079 if (AAResult == AliasAnalysis::NoAlias)
12080 return false;
12081 }
12083 // Otherwise we have to assume they alias.
12084 return true;
12085 }
12087 /// Walk up chain skipping non-aliasing memory nodes,
12088 /// looking for aliasing nodes and adding them to the Aliases vector.
12089 void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
12090 SmallVectorImpl<SDValue> &Aliases) {
12091 SmallVector<SDValue, 8> Chains; // List of chains to visit.
12092 SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
12094 // Get alias information for node.
12095 bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile();
12097 // Starting off.
12098 Chains.push_back(OriginalChain);
12099 unsigned Depth = 0;
12101 // Look at each chain and determine if it is an alias. If so, add it to the
12102 // aliases list. If not, then continue up the chain looking for the next
12103 // candidate.
12104 while (!Chains.empty()) {
12105 SDValue Chain = Chains.back();
12106 Chains.pop_back();
12108 // For TokenFactor nodes, look at each operand and only continue up the
12109 // chain until we find two aliases. If we've seen two aliases, assume we'll
12110 // find more and revert to original chain since the xform is unlikely to be
12111 // profitable.
12112 //
12113 // FIXME: The depth check could be made to return the last non-aliasing
12114 // chain we found before we hit a tokenfactor rather than the original
12115 // chain.
12116 if (Depth > 6 || Aliases.size() == 2) {
12117 Aliases.clear();
12118 Aliases.push_back(OriginalChain);
12119 return;
12120 }
12122 // Don't bother if we've been before.
12123 if (!Visited.insert(Chain.getNode()))
12124 continue;
12126 switch (Chain.getOpcode()) {
12127 case ISD::EntryToken:
12128 // Entry token is ideal chain operand, but handled in FindBetterChain.
12129 break;
12131 case ISD::LOAD:
12132 case ISD::STORE: {
12133 // Get alias information for Chain.
12134 bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) &&
12135 !cast<LSBaseSDNode>(Chain.getNode())->isVolatile();
12137 // If chain is alias then stop here.
12138 if (!(IsLoad && IsOpLoad) &&
12139 isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) {
12140 Aliases.push_back(Chain);
12141 } else {
12142 // Look further up the chain.
12143 Chains.push_back(Chain.getOperand(0));
12144 ++Depth;
12145 }
12146 break;
12147 }
12149 case ISD::TokenFactor:
12150 // We have to check each of the operands of the token factor for "small"
12151 // token factors, so we queue them up. Adding the operands to the queue
12152 // (stack) in reverse order maintains the original order and increases the
12153 // likelihood that getNode will find a matching token factor (CSE.)
12154 if (Chain.getNumOperands() > 16) {
12155 Aliases.push_back(Chain);
12156 break;
12157 }
12158 for (unsigned n = Chain.getNumOperands(); n;)
12159 Chains.push_back(Chain.getOperand(--n));
12160 ++Depth;
12161 break;
12163 default:
12164 // For all other instructions we will just have to take what we can get.
12165 Aliases.push_back(Chain);
12166 break;
12167 }
12168 }
12170 // We need to be careful here to also search for aliases through the
12171 // value operand of a store, etc. Consider the following situation:
12172 // Token1 = ...
12173 // L1 = load Token1, %52
12174 // S1 = store Token1, L1, %51
12175 // L2 = load Token1, %52+8
12176 // S2 = store Token1, L2, %51+8
12177 // Token2 = Token(S1, S2)
12178 // L3 = load Token2, %53
12179 // S3 = store Token2, L3, %52
12180 // L4 = load Token2, %53+8
12181 // S4 = store Token2, L4, %52+8
12182 // If we search for aliases of S3 (which loads address %52), and we look
12183 // only through the chain, then we'll miss the trivial dependence on L1
12184 // (which also loads from %52). We then might change all loads and
12185 // stores to use Token1 as their chain operand, which could result in
12186 // copying %53 into %52 before copying %52 into %51 (which should
12187 // happen first).
12188 //
12189 // The problem is, however, that searching for such data dependencies
12190 // can become expensive, and the cost is not directly related to the
12191 // chain depth. Instead, we'll rule out such configurations here by
12192 // insisting that we've visited all chain users (except for users
12193 // of the original chain, which is not necessary). When doing this,
12194 // we need to look through nodes we don't care about (otherwise, things
12195 // like register copies will interfere with trivial cases).
12197 SmallVector<const SDNode *, 16> Worklist;
12198 for (const SDNode *N : Visited)
12199 if (N != OriginalChain.getNode())
12200 Worklist.push_back(N);
12202 while (!Worklist.empty()) {
12203 const SDNode *M = Worklist.pop_back_val();
12205 // We have already visited M, and want to make sure we've visited any uses
12206 // of M that we care about. For uses that we've not visisted, and don't
12207 // care about, queue them to the worklist.
12209 for (SDNode::use_iterator UI = M->use_begin(),
12210 UIE = M->use_end(); UI != UIE; ++UI)
12211 if (UI.getUse().getValueType() == MVT::Other && Visited.insert(*UI)) {
12212 if (isa<MemIntrinsicSDNode>(*UI) || isa<MemSDNode>(*UI)) {
12213 // We've not visited this use, and we care about it (it could have an
12214 // ordering dependency with the original node).
12215 Aliases.clear();
12216 Aliases.push_back(OriginalChain);
12217 return;
12218 }
12220 // We've not visited this use, but we don't care about it. Mark it as
12221 // visited and enqueue it to the worklist.
12222 Worklist.push_back(*UI);
12223 }
12224 }
12225 }
12227 /// Walk up chain skipping non-aliasing memory nodes, looking for a better chain
12228 /// (aliasing node.)
12229 SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
12230 SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor.
12232 // Accumulate all the aliases to this node.
12233 GatherAllAliases(N, OldChain, Aliases);
12235 // If no operands then chain to entry token.
12236 if (Aliases.size() == 0)
12237 return DAG.getEntryNode();
12239 // If a single operand then chain to it. We don't need to revisit it.
12240 if (Aliases.size() == 1)
12241 return Aliases[0];
12243 // Construct a custom tailored token factor.
12244 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases);
12245 }
12247 /// This is the entry point for the file.
12248 void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA,
12249 CodeGenOpt::Level OptLevel) {
12250 /// This is the main entry point to this class.
12251 DAGCombiner(*this, AA, OptLevel).Run(Level);
12252 }