1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for load, store and alloca.
11 //
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/LLVMContext.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
21 #include "llvm/Transforms/Utils/Local.h"
22 using namespace llvm;
24 #define DEBUG_TYPE "instcombine"
26 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
27 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
29 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
30 /// some part of a constant global variable. This intentionally only accepts
31 /// constant expressions because we can't rewrite arbitrary instructions.
32 static bool pointsToConstantGlobal(Value *V) {
33 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
34 return GV->isConstant();
36 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
37 if (CE->getOpcode() == Instruction::BitCast ||
38 CE->getOpcode() == Instruction::AddrSpaceCast ||
39 CE->getOpcode() == Instruction::GetElementPtr)
40 return pointsToConstantGlobal(CE->getOperand(0));
41 }
42 return false;
43 }
45 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
46 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
47 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
48 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
49 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
50 /// the alloca, and if the source pointer is a pointer to a constant global, we
51 /// can optimize this.
52 static bool
53 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
54 SmallVectorImpl<Instruction *> &ToDelete) {
55 // We track lifetime intrinsics as we encounter them. If we decide to go
56 // ahead and replace the value with the global, this lets the caller quickly
57 // eliminate the markers.
59 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
60 ValuesToInspect.push_back(std::make_pair(V, false));
61 while (!ValuesToInspect.empty()) {
62 auto ValuePair = ValuesToInspect.pop_back_val();
63 const bool IsOffset = ValuePair.second;
64 for (auto &U : ValuePair.first->uses()) {
65 Instruction *I = cast<Instruction>(U.getUser());
67 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
68 // Ignore non-volatile loads, they are always ok.
69 if (!LI->isSimple()) return false;
70 continue;
71 }
73 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
74 // If uses of the bitcast are ok, we are ok.
75 ValuesToInspect.push_back(std::make_pair(I, IsOffset));
76 continue;
77 }
78 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
79 // If the GEP has all zero indices, it doesn't offset the pointer. If it
80 // doesn't, it does.
81 ValuesToInspect.push_back(
82 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
83 continue;
84 }
86 if (CallSite CS = I) {
87 // If this is the function being called then we treat it like a load and
88 // ignore it.
89 if (CS.isCallee(&U))
90 continue;
92 // Inalloca arguments are clobbered by the call.
93 unsigned ArgNo = CS.getArgumentNo(&U);
94 if (CS.isInAllocaArgument(ArgNo))
95 return false;
97 // If this is a readonly/readnone call site, then we know it is just a
98 // load (but one that potentially returns the value itself), so we can
99 // ignore it if we know that the value isn't captured.
100 if (CS.onlyReadsMemory() &&
101 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
102 continue;
104 // If this is being passed as a byval argument, the caller is making a
105 // copy, so it is only a read of the alloca.
106 if (CS.isByValArgument(ArgNo))
107 continue;
108 }
110 // Lifetime intrinsics can be handled by the caller.
111 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
112 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
113 II->getIntrinsicID() == Intrinsic::lifetime_end) {
114 assert(II->use_empty() && "Lifetime markers have no result to use!");
115 ToDelete.push_back(II);
116 continue;
117 }
118 }
120 // If this is isn't our memcpy/memmove, reject it as something we can't
121 // handle.
122 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
123 if (!MI)
124 return false;
126 // If the transfer is using the alloca as a source of the transfer, then
127 // ignore it since it is a load (unless the transfer is volatile).
128 if (U.getOperandNo() == 1) {
129 if (MI->isVolatile()) return false;
130 continue;
131 }
133 // If we already have seen a copy, reject the second one.
134 if (TheCopy) return false;
136 // If the pointer has been offset from the start of the alloca, we can't
137 // safely handle this.
138 if (IsOffset) return false;
140 // If the memintrinsic isn't using the alloca as the dest, reject it.
141 if (U.getOperandNo() != 0) return false;
143 // If the source of the memcpy/move is not a constant global, reject it.
144 if (!pointsToConstantGlobal(MI->getSource()))
145 return false;
147 // Otherwise, the transform is safe. Remember the copy instruction.
148 TheCopy = MI;
149 }
150 }
151 return true;
152 }
154 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
155 /// modified by a copy from a constant global. If we can prove this, we can
156 /// replace any uses of the alloca with uses of the global directly.
157 static MemTransferInst *
158 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
159 SmallVectorImpl<Instruction *> &ToDelete) {
160 MemTransferInst *TheCopy = nullptr;
161 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
162 return TheCopy;
163 return nullptr;
164 }
166 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
167 // Ensure that the alloca array size argument has type intptr_t, so that
168 // any casting is exposed early.
169 if (DL) {
170 Type *IntPtrTy = DL->getIntPtrType(AI.getType());
171 if (AI.getArraySize()->getType() != IntPtrTy) {
172 Value *V = Builder->CreateIntCast(AI.getArraySize(),
173 IntPtrTy, false);
174 AI.setOperand(0, V);
175 return &AI;
176 }
177 }
179 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
180 if (AI.isArrayAllocation()) { // Check C != 1
181 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
182 Type *NewTy =
183 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
184 AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName());
185 New->setAlignment(AI.getAlignment());
187 // Scan to the end of the allocation instructions, to skip over a block of
188 // allocas if possible...also skip interleaved debug info
189 //
190 BasicBlock::iterator It = New;
191 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
193 // Now that I is pointing to the first non-allocation-inst in the block,
194 // insert our getelementptr instruction...
195 //
196 Type *IdxTy = DL
197 ? DL->getIntPtrType(AI.getType())
198 : Type::getInt64Ty(AI.getContext());
199 Value *NullIdx = Constant::getNullValue(IdxTy);
200 Value *Idx[2] = { NullIdx, NullIdx };
201 Instruction *GEP =
202 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
203 InsertNewInstBefore(GEP, *It);
205 // Now make everything use the getelementptr instead of the original
206 // allocation.
207 return ReplaceInstUsesWith(AI, GEP);
208 } else if (isa<UndefValue>(AI.getArraySize())) {
209 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
210 }
211 }
213 if (DL && AI.getAllocatedType()->isSized()) {
214 // If the alignment is 0 (unspecified), assign it the preferred alignment.
215 if (AI.getAlignment() == 0)
216 AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
218 // Move all alloca's of zero byte objects to the entry block and merge them
219 // together. Note that we only do this for alloca's, because malloc should
220 // allocate and return a unique pointer, even for a zero byte allocation.
221 if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
222 // For a zero sized alloca there is no point in doing an array allocation.
223 // This is helpful if the array size is a complicated expression not used
224 // elsewhere.
225 if (AI.isArrayAllocation()) {
226 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
227 return &AI;
228 }
230 // Get the first instruction in the entry block.
231 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
232 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
233 if (FirstInst != &AI) {
234 // If the entry block doesn't start with a zero-size alloca then move
235 // this one to the start of the entry block. There is no problem with
236 // dominance as the array size was forced to a constant earlier already.
237 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
238 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
239 DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
240 AI.moveBefore(FirstInst);
241 return &AI;
242 }
244 // If the alignment of the entry block alloca is 0 (unspecified),
245 // assign it the preferred alignment.
246 if (EntryAI->getAlignment() == 0)
247 EntryAI->setAlignment(
248 DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
249 // Replace this zero-sized alloca with the one at the start of the entry
250 // block after ensuring that the address will be aligned enough for both
251 // types.
252 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
253 AI.getAlignment());
254 EntryAI->setAlignment(MaxAlign);
255 if (AI.getType() != EntryAI->getType())
256 return new BitCastInst(EntryAI, AI.getType());
257 return ReplaceInstUsesWith(AI, EntryAI);
258 }
259 }
260 }
262 if (AI.getAlignment()) {
263 // Check to see if this allocation is only modified by a memcpy/memmove from
264 // a constant global whose alignment is equal to or exceeds that of the
265 // allocation. If this is the case, we can change all users to use
266 // the constant global instead. This is commonly produced by the CFE by
267 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
268 // is only subsequently read.
269 SmallVector<Instruction *, 4> ToDelete;
270 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
271 unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
272 AI.getAlignment(),
273 DL, AT, &AI, DT);
274 if (AI.getAlignment() <= SourceAlign) {
275 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
276 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
277 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
278 EraseInstFromFunction(*ToDelete[i]);
279 Constant *TheSrc = cast<Constant>(Copy->getSource());
280 Constant *Cast
281 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
282 Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
283 EraseInstFromFunction(*Copy);
284 ++NumGlobalCopies;
285 return NewI;
286 }
287 }
288 }
290 // At last, use the generic allocation site handler to aggressively remove
291 // unused allocas.
292 return visitAllocSite(AI);
293 }
295 /// \brief Helper to combine a load to a new type.
296 ///
297 /// This just does the work of combining a load to a new type. It handles
298 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
299 /// loaded *value* type. This will convert it to a pointer, cast the operand to
300 /// that pointer type, load it, etc.
301 ///
302 /// Note that this will create all of the instructions with whatever insert
303 /// point the \c InstCombiner currently is using.
304 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
305 Value *Ptr = LI.getPointerOperand();
306 unsigned AS = LI.getPointerAddressSpace();
307 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
308 LI.getAllMetadata(MD);
310 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
311 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
312 LI.getAlignment(), LI.getName());
313 for (const auto &MDPair : MD) {
314 unsigned ID = MDPair.first;
315 MDNode *N = MDPair.second;
316 // Note, essentially every kind of metadata should be preserved here! This
317 // routine is supposed to clone a load instruction changing *only its type*.
318 // The only metadata it makes sense to drop is metadata which is invalidated
319 // when the pointer type changes. This should essentially never be the case
320 // in LLVM, but we explicitly switch over only known metadata to be
321 // conservatively correct. If you are adding metadata to LLVM which pertains
322 // to loads, you almost certainly want to add it here.
323 switch (ID) {
324 case LLVMContext::MD_dbg:
325 case LLVMContext::MD_tbaa:
326 case LLVMContext::MD_prof:
327 case LLVMContext::MD_fpmath:
328 case LLVMContext::MD_tbaa_struct:
329 case LLVMContext::MD_invariant_load:
330 case LLVMContext::MD_alias_scope:
331 case LLVMContext::MD_noalias:
332 // All of these directly apply.
333 NewLoad->setMetadata(ID, N);
334 break;
336 case LLVMContext::MD_range:
337 // FIXME: It would be nice to propagate this in some way, but the type
338 // conversions make it hard.
339 break;
340 }
341 }
342 // FIXME: These metadata nodes should really have enumerators and be handled
343 // above.
344 if (MDNode *N = LI.getMetadata("nontemporal"))
345 NewLoad->setMetadata("nontemporal", N);
346 if (MDNode *N = LI.getMetadata("llvm.mem.parallel_loop_access"))
347 NewLoad->setMetadata("llvm.mem.parallel_loop_access", N);
348 return NewLoad;
349 }
351 /// \brief Combine loads to match the type of value their uses after looking
352 /// through intervening bitcasts.
353 ///
354 /// The core idea here is that if the result of a load is used in an operation,
355 /// we should load the type most conducive to that operation. For example, when
356 /// loading an integer and converting that immediately to a pointer, we should
357 /// instead directly load a pointer.
358 ///
359 /// However, this routine must never change the width of a load or the number of
360 /// loads as that would introduce a semantic change. This combine is expected to
361 /// be a semantic no-op which just allows loads to more closely model the types
362 /// of their consuming operations.
363 ///
364 /// Currently, we also refuse to change the precise type used for an atomic load
365 /// or a volatile load. This is debatable, and might be reasonable to change
366 /// later. However, it is risky in case some backend or other part of LLVM is
367 /// relying on the exact type loaded to select appropriate atomic operations.
368 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
369 // FIXME: We could probably with some care handle both volatile and atomic
370 // loads here but it isn't clear that this is important.
371 if (!LI.isSimple())
372 return nullptr;
374 if (LI.use_empty())
375 return nullptr;
378 // Fold away bit casts of the loaded value by loading the desired type.
379 if (LI.hasOneUse())
380 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
381 LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
382 BC->replaceAllUsesWith(NewLoad);
383 IC.EraseInstFromFunction(*BC);
384 return &LI;
385 }
387 // FIXME: We should also canonicalize loads of vectors when their elements are
388 // cast to other types.
389 return nullptr;
390 }
392 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
393 Value *Op = LI.getOperand(0);
395 // Try to canonicalize the loaded type.
396 if (Instruction *Res = combineLoadToOperationType(*this, LI))
397 return Res;
399 // Attempt to improve the alignment.
400 if (DL) {
401 unsigned KnownAlign =
402 getOrEnforceKnownAlignment(Op, DL->getPrefTypeAlignment(LI.getType()),
403 DL, AT, &LI, DT);
404 unsigned LoadAlign = LI.getAlignment();
405 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
406 DL->getABITypeAlignment(LI.getType());
408 if (KnownAlign > EffectiveLoadAlign)
409 LI.setAlignment(KnownAlign);
410 else if (LoadAlign == 0)
411 LI.setAlignment(EffectiveLoadAlign);
412 }
414 // None of the following transforms are legal for volatile/atomic loads.
415 // FIXME: Some of it is okay for atomic loads; needs refactoring.
416 if (!LI.isSimple()) return nullptr;
418 // Do really simple store-to-load forwarding and load CSE, to catch cases
419 // where there are several consecutive memory accesses to the same location,
420 // separated by a few arithmetic operations.
421 BasicBlock::iterator BBI = &LI;
422 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
423 return ReplaceInstUsesWith(
424 LI, Builder->CreateBitCast(AvailableVal, LI.getType()));
426 // load(gep null, ...) -> unreachable
427 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
428 const Value *GEPI0 = GEPI->getOperand(0);
429 // TODO: Consider a target hook for valid address spaces for this xform.
430 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
431 // Insert a new store to null instruction before the load to indicate
432 // that this code is not reachable. We do this instead of inserting
433 // an unreachable instruction directly because we cannot modify the
434 // CFG.
435 new StoreInst(UndefValue::get(LI.getType()),
436 Constant::getNullValue(Op->getType()), &LI);
437 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
438 }
439 }
441 // load null/undef -> unreachable
442 // TODO: Consider a target hook for valid address spaces for this xform.
443 if (isa<UndefValue>(Op) ||
444 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
445 // Insert a new store to null instruction before the load to indicate that
446 // this code is not reachable. We do this instead of inserting an
447 // unreachable instruction directly because we cannot modify the CFG.
448 new StoreInst(UndefValue::get(LI.getType()),
449 Constant::getNullValue(Op->getType()), &LI);
450 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
451 }
453 if (Op->hasOneUse()) {
454 // Change select and PHI nodes to select values instead of addresses: this
455 // helps alias analysis out a lot, allows many others simplifications, and
456 // exposes redundancy in the code.
457 //
458 // Note that we cannot do the transformation unless we know that the
459 // introduced loads cannot trap! Something like this is valid as long as
460 // the condition is always false: load (select bool %C, int* null, int* %G),
461 // but it would not be valid if we transformed it to load from null
462 // unconditionally.
463 //
464 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
465 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
466 unsigned Align = LI.getAlignment();
467 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
468 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
469 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
470 SI->getOperand(1)->getName()+".val");
471 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
472 SI->getOperand(2)->getName()+".val");
473 V1->setAlignment(Align);
474 V2->setAlignment(Align);
475 return SelectInst::Create(SI->getCondition(), V1, V2);
476 }
478 // load (select (cond, null, P)) -> load P
479 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
480 if (C->isNullValue()) {
481 LI.setOperand(0, SI->getOperand(2));
482 return &LI;
483 }
485 // load (select (cond, P, null)) -> load P
486 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
487 if (C->isNullValue()) {
488 LI.setOperand(0, SI->getOperand(1));
489 return &LI;
490 }
491 }
492 }
493 return nullptr;
494 }
496 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
497 /// when possible. This makes it generally easy to do alias analysis and/or
498 /// SROA/mem2reg of the memory object.
499 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
500 User *CI = cast<User>(SI.getOperand(1));
501 Value *CastOp = CI->getOperand(0);
503 Type *DestPTy = CI->getType()->getPointerElementType();
504 PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
505 if (!SrcTy) return nullptr;
507 Type *SrcPTy = SrcTy->getElementType();
509 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
510 return nullptr;
512 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
513 /// to its first element. This allows us to handle things like:
514 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
515 /// on 32-bit hosts.
516 SmallVector<Value*, 4> NewGEPIndices;
518 // If the source is an array, the code below will not succeed. Check to
519 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
520 // constants.
521 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
522 // Index through pointer.
523 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
524 NewGEPIndices.push_back(Zero);
526 while (1) {
527 if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
528 if (!STy->getNumElements()) /* Struct can be empty {} */
529 break;
530 NewGEPIndices.push_back(Zero);
531 SrcPTy = STy->getElementType(0);
532 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
533 NewGEPIndices.push_back(Zero);
534 SrcPTy = ATy->getElementType();
535 } else {
536 break;
537 }
538 }
540 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
541 }
543 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
544 return nullptr;
546 // If the pointers point into different address spaces don't do the
547 // transformation.
548 if (SrcTy->getAddressSpace() != CI->getType()->getPointerAddressSpace())
549 return nullptr;
551 // If the pointers point to values of different sizes don't do the
552 // transformation.
553 if (!IC.getDataLayout() ||
554 IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
555 IC.getDataLayout()->getTypeSizeInBits(DestPTy))
556 return nullptr;
558 // If the pointers point to pointers to different address spaces don't do the
559 // transformation. It is not safe to introduce an addrspacecast instruction in
560 // this case since, depending on the target, addrspacecast may not be a no-op
561 // cast.
562 if (SrcPTy->isPointerTy() && DestPTy->isPointerTy() &&
563 SrcPTy->getPointerAddressSpace() != DestPTy->getPointerAddressSpace())
564 return nullptr;
566 // Okay, we are casting from one integer or pointer type to another of
567 // the same size. Instead of casting the pointer before
568 // the store, cast the value to be stored.
569 Value *NewCast;
570 Instruction::CastOps opcode = Instruction::BitCast;
571 Type* CastSrcTy = DestPTy;
572 Type* CastDstTy = SrcPTy;
573 if (CastDstTy->isPointerTy()) {
574 if (CastSrcTy->isIntegerTy())
575 opcode = Instruction::IntToPtr;
576 } else if (CastDstTy->isIntegerTy()) {
577 if (CastSrcTy->isPointerTy())
578 opcode = Instruction::PtrToInt;
579 }
581 // SIOp0 is a pointer to aggregate and this is a store to the first field,
582 // emit a GEP to index into its first field.
583 if (!NewGEPIndices.empty())
584 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
586 Value *SIOp0 = SI.getOperand(0);
587 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
588 SIOp0->getName()+".c");
589 SI.setOperand(0, NewCast);
590 SI.setOperand(1, CastOp);
591 return &SI;
592 }
594 /// equivalentAddressValues - Test if A and B will obviously have the same
595 /// value. This includes recognizing that %t0 and %t1 will have the same
596 /// value in code like this:
597 /// %t0 = getelementptr \@a, 0, 3
598 /// store i32 0, i32* %t0
599 /// %t1 = getelementptr \@a, 0, 3
600 /// %t2 = load i32* %t1
601 ///
602 static bool equivalentAddressValues(Value *A, Value *B) {
603 // Test if the values are trivially equivalent.
604 if (A == B) return true;
606 // Test if the values come form identical arithmetic instructions.
607 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
608 // its only used to compare two uses within the same basic block, which
609 // means that they'll always either have the same value or one of them
610 // will have an undefined value.
611 if (isa<BinaryOperator>(A) ||
612 isa<CastInst>(A) ||
613 isa<PHINode>(A) ||
614 isa<GetElementPtrInst>(A))
615 if (Instruction *BI = dyn_cast<Instruction>(B))
616 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
617 return true;
619 // Otherwise they may not be equivalent.
620 return false;
621 }
623 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
624 Value *Val = SI.getOperand(0);
625 Value *Ptr = SI.getOperand(1);
627 // Attempt to improve the alignment.
628 if (DL) {
629 unsigned KnownAlign =
630 getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
631 DL, AT, &SI, DT);
632 unsigned StoreAlign = SI.getAlignment();
633 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
634 DL->getABITypeAlignment(Val->getType());
636 if (KnownAlign > EffectiveStoreAlign)
637 SI.setAlignment(KnownAlign);
638 else if (StoreAlign == 0)
639 SI.setAlignment(EffectiveStoreAlign);
640 }
642 // Don't hack volatile/atomic stores.
643 // FIXME: Some bits are legal for atomic stores; needs refactoring.
644 if (!SI.isSimple()) return nullptr;
646 // If the RHS is an alloca with a single use, zapify the store, making the
647 // alloca dead.
648 if (Ptr->hasOneUse()) {
649 if (isa<AllocaInst>(Ptr))
650 return EraseInstFromFunction(SI);
651 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
652 if (isa<AllocaInst>(GEP->getOperand(0))) {
653 if (GEP->getOperand(0)->hasOneUse())
654 return EraseInstFromFunction(SI);
655 }
656 }
657 }
659 // Do really simple DSE, to catch cases where there are several consecutive
660 // stores to the same location, separated by a few arithmetic operations. This
661 // situation often occurs with bitfield accesses.
662 BasicBlock::iterator BBI = &SI;
663 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
664 --ScanInsts) {
665 --BBI;
666 // Don't count debug info directives, lest they affect codegen,
667 // and we skip pointer-to-pointer bitcasts, which are NOPs.
668 if (isa<DbgInfoIntrinsic>(BBI) ||
669 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
670 ScanInsts++;
671 continue;
672 }
674 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
675 // Prev store isn't volatile, and stores to the same location?
676 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
677 SI.getOperand(1))) {
678 ++NumDeadStore;
679 ++BBI;
680 EraseInstFromFunction(*PrevSI);
681 continue;
682 }
683 break;
684 }
686 // If this is a load, we have to stop. However, if the loaded value is from
687 // the pointer we're loading and is producing the pointer we're storing,
688 // then *this* store is dead (X = load P; store X -> P).
689 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
690 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
691 LI->isSimple())
692 return EraseInstFromFunction(SI);
694 // Otherwise, this is a load from some other location. Stores before it
695 // may not be dead.
696 break;
697 }
699 // Don't skip over loads or things that can modify memory.
700 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
701 break;
702 }
704 // store X, null -> turns into 'unreachable' in SimplifyCFG
705 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
706 if (!isa<UndefValue>(Val)) {
707 SI.setOperand(0, UndefValue::get(Val->getType()));
708 if (Instruction *U = dyn_cast<Instruction>(Val))
709 Worklist.Add(U); // Dropped a use.
710 }
711 return nullptr; // Do not modify these!
712 }
714 // store undef, Ptr -> noop
715 if (isa<UndefValue>(Val))
716 return EraseInstFromFunction(SI);
718 // If the pointer destination is a cast, see if we can fold the cast into the
719 // source instead.
720 if (isa<CastInst>(Ptr))
721 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
722 return Res;
723 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
724 if (CE->isCast())
725 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
726 return Res;
729 // If this store is the last instruction in the basic block (possibly
730 // excepting debug info instructions), and if the block ends with an
731 // unconditional branch, try to move it to the successor block.
732 BBI = &SI;
733 do {
734 ++BBI;
735 } while (isa<DbgInfoIntrinsic>(BBI) ||
736 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
737 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
738 if (BI->isUnconditional())
739 if (SimplifyStoreAtEndOfBlock(SI))
740 return nullptr; // xform done!
742 return nullptr;
743 }
745 /// SimplifyStoreAtEndOfBlock - Turn things like:
746 /// if () { *P = v1; } else { *P = v2 }
747 /// into a phi node with a store in the successor.
748 ///
749 /// Simplify things like:
750 /// *P = v1; if () { *P = v2; }
751 /// into a phi node with a store in the successor.
752 ///
753 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
754 BasicBlock *StoreBB = SI.getParent();
756 // Check to see if the successor block has exactly two incoming edges. If
757 // so, see if the other predecessor contains a store to the same location.
758 // if so, insert a PHI node (if needed) and move the stores down.
759 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
761 // Determine whether Dest has exactly two predecessors and, if so, compute
762 // the other predecessor.
763 pred_iterator PI = pred_begin(DestBB);
764 BasicBlock *P = *PI;
765 BasicBlock *OtherBB = nullptr;
767 if (P != StoreBB)
768 OtherBB = P;
770 if (++PI == pred_end(DestBB))
771 return false;
773 P = *PI;
774 if (P != StoreBB) {
775 if (OtherBB)
776 return false;
777 OtherBB = P;
778 }
779 if (++PI != pred_end(DestBB))
780 return false;
782 // Bail out if all the relevant blocks aren't distinct (this can happen,
783 // for example, if SI is in an infinite loop)
784 if (StoreBB == DestBB || OtherBB == DestBB)
785 return false;
787 // Verify that the other block ends in a branch and is not otherwise empty.
788 BasicBlock::iterator BBI = OtherBB->getTerminator();
789 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
790 if (!OtherBr || BBI == OtherBB->begin())
791 return false;
793 // If the other block ends in an unconditional branch, check for the 'if then
794 // else' case. there is an instruction before the branch.
795 StoreInst *OtherStore = nullptr;
796 if (OtherBr->isUnconditional()) {
797 --BBI;
798 // Skip over debugging info.
799 while (isa<DbgInfoIntrinsic>(BBI) ||
800 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
801 if (BBI==OtherBB->begin())
802 return false;
803 --BBI;
804 }
805 // If this isn't a store, isn't a store to the same location, or is not the
806 // right kind of store, bail out.
807 OtherStore = dyn_cast<StoreInst>(BBI);
808 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
809 !SI.isSameOperationAs(OtherStore))
810 return false;
811 } else {
812 // Otherwise, the other block ended with a conditional branch. If one of the
813 // destinations is StoreBB, then we have the if/then case.
814 if (OtherBr->getSuccessor(0) != StoreBB &&
815 OtherBr->getSuccessor(1) != StoreBB)
816 return false;
818 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
819 // if/then triangle. See if there is a store to the same ptr as SI that
820 // lives in OtherBB.
821 for (;; --BBI) {
822 // Check to see if we find the matching store.
823 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
824 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
825 !SI.isSameOperationAs(OtherStore))
826 return false;
827 break;
828 }
829 // If we find something that may be using or overwriting the stored
830 // value, or if we run out of instructions, we can't do the xform.
831 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
832 BBI == OtherBB->begin())
833 return false;
834 }
836 // In order to eliminate the store in OtherBr, we have to
837 // make sure nothing reads or overwrites the stored value in
838 // StoreBB.
839 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
840 // FIXME: This should really be AA driven.
841 if (I->mayReadFromMemory() || I->mayWriteToMemory())
842 return false;
843 }
844 }
846 // Insert a PHI node now if we need it.
847 Value *MergedVal = OtherStore->getOperand(0);
848 if (MergedVal != SI.getOperand(0)) {
849 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
850 PN->addIncoming(SI.getOperand(0), SI.getParent());
851 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
852 MergedVal = InsertNewInstBefore(PN, DestBB->front());
853 }
855 // Advance to a place where it is safe to insert the new store and
856 // insert it.
857 BBI = DestBB->getFirstInsertionPt();
858 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
859 SI.isVolatile(),
860 SI.getAlignment(),
861 SI.getOrdering(),
862 SI.getSynchScope());
863 InsertNewInstBefore(NewSI, *BBI);
864 NewSI->setDebugLoc(OtherStore->getDebugLoc());
866 // If the two stores had AA tags, merge them.
867 AAMDNodes AATags;
868 SI.getAAMetadata(AATags);
869 if (AATags) {
870 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
871 NewSI->setAAMetadata(AATags);
872 }
874 // Nuke the old stores.
875 EraseInstFromFunction(SI);
876 EraseInstFromFunction(*OtherStore);
877 return true;
878 }