1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for load, store and alloca.
11 //
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
20 #include "llvm/Transforms/Utils/Local.h"
21 using namespace llvm;
23 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
24 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
26 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
27 /// some part of a constant global variable. This intentionally only accepts
28 /// constant expressions because we can't rewrite arbitrary instructions.
29 static bool pointsToConstantGlobal(Value *V) {
30 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
31 return GV->isConstant();
32 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
33 if (CE->getOpcode() == Instruction::BitCast ||
34 CE->getOpcode() == Instruction::GetElementPtr)
35 return pointsToConstantGlobal(CE->getOperand(0));
36 return false;
37 }
39 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
40 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
41 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
42 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
43 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
44 /// the alloca, and if the source pointer is a pointer to a constant global, we
45 /// can optimize this.
46 static bool
47 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
48 SmallVectorImpl<Instruction *> &ToDelete,
49 bool IsOffset = false) {
50 // We track lifetime intrinsics as we encounter them. If we decide to go
51 // ahead and replace the value with the global, this lets the caller quickly
52 // eliminate the markers.
54 for (Use &U : V->uses()) {
55 Instruction *I = cast<Instruction>(U.getUser());
57 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
58 // Ignore non-volatile loads, they are always ok.
59 if (!LI->isSimple()) return false;
60 continue;
61 }
63 if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
64 // If uses of the bitcast are ok, we are ok.
65 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset))
66 return false;
67 continue;
68 }
69 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
70 // If the GEP has all zero indices, it doesn't offset the pointer. If it
71 // doesn't, it does.
72 if (!isOnlyCopiedFromConstantGlobal(
73 GEP, TheCopy, ToDelete, IsOffset || !GEP->hasAllZeroIndices()))
74 return false;
75 continue;
76 }
78 if (CallSite CS = I) {
79 // If this is the function being called then we treat it like a load and
80 // ignore it.
81 if (CS.isCallee(&U))
82 continue;
84 // Inalloca arguments are clobbered by the call.
85 unsigned ArgNo = CS.getArgumentNo(&U);
86 if (CS.isInAllocaArgument(ArgNo))
87 return false;
89 // If this is a readonly/readnone call site, then we know it is just a
90 // load (but one that potentially returns the value itself), so we can
91 // ignore it if we know that the value isn't captured.
92 if (CS.onlyReadsMemory() &&
93 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
94 continue;
96 // If this is being passed as a byval argument, the caller is making a
97 // copy, so it is only a read of the alloca.
98 if (CS.isByValArgument(ArgNo))
99 continue;
100 }
102 // Lifetime intrinsics can be handled by the caller.
103 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
104 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
105 II->getIntrinsicID() == Intrinsic::lifetime_end) {
106 assert(II->use_empty() && "Lifetime markers have no result to use!");
107 ToDelete.push_back(II);
108 continue;
109 }
110 }
112 // If this is isn't our memcpy/memmove, reject it as something we can't
113 // handle.
114 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
115 if (MI == 0)
116 return false;
118 // If the transfer is using the alloca as a source of the transfer, then
119 // ignore it since it is a load (unless the transfer is volatile).
120 if (U.getOperandNo() == 1) {
121 if (MI->isVolatile()) return false;
122 continue;
123 }
125 // If we already have seen a copy, reject the second one.
126 if (TheCopy) return false;
128 // If the pointer has been offset from the start of the alloca, we can't
129 // safely handle this.
130 if (IsOffset) return false;
132 // If the memintrinsic isn't using the alloca as the dest, reject it.
133 if (U.getOperandNo() != 0) return false;
135 // If the source of the memcpy/move is not a constant global, reject it.
136 if (!pointsToConstantGlobal(MI->getSource()))
137 return false;
139 // Otherwise, the transform is safe. Remember the copy instruction.
140 TheCopy = MI;
141 }
142 return true;
143 }
145 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
146 /// modified by a copy from a constant global. If we can prove this, we can
147 /// replace any uses of the alloca with uses of the global directly.
148 static MemTransferInst *
149 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
150 SmallVectorImpl<Instruction *> &ToDelete) {
151 MemTransferInst *TheCopy = 0;
152 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
153 return TheCopy;
154 return 0;
155 }
157 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
158 // Ensure that the alloca array size argument has type intptr_t, so that
159 // any casting is exposed early.
160 if (DL) {
161 Type *IntPtrTy = DL->getIntPtrType(AI.getType());
162 if (AI.getArraySize()->getType() != IntPtrTy) {
163 Value *V = Builder->CreateIntCast(AI.getArraySize(),
164 IntPtrTy, false);
165 AI.setOperand(0, V);
166 return &AI;
167 }
168 }
170 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
171 if (AI.isArrayAllocation()) { // Check C != 1
172 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
173 Type *NewTy =
174 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
175 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
176 New->setAlignment(AI.getAlignment());
178 // Scan to the end of the allocation instructions, to skip over a block of
179 // allocas if possible...also skip interleaved debug info
180 //
181 BasicBlock::iterator It = New;
182 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
184 // Now that I is pointing to the first non-allocation-inst in the block,
185 // insert our getelementptr instruction...
186 //
187 Type *IdxTy = DL
188 ? DL->getIntPtrType(AI.getType())
189 : Type::getInt64Ty(AI.getContext());
190 Value *NullIdx = Constant::getNullValue(IdxTy);
191 Value *Idx[2] = { NullIdx, NullIdx };
192 Instruction *GEP =
193 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
194 InsertNewInstBefore(GEP, *It);
196 // Now make everything use the getelementptr instead of the original
197 // allocation.
198 return ReplaceInstUsesWith(AI, GEP);
199 } else if (isa<UndefValue>(AI.getArraySize())) {
200 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
201 }
202 }
204 if (DL && AI.getAllocatedType()->isSized()) {
205 // If the alignment is 0 (unspecified), assign it the preferred alignment.
206 if (AI.getAlignment() == 0)
207 AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
209 // Move all alloca's of zero byte objects to the entry block and merge them
210 // together. Note that we only do this for alloca's, because malloc should
211 // allocate and return a unique pointer, even for a zero byte allocation.
212 if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
213 // For a zero sized alloca there is no point in doing an array allocation.
214 // This is helpful if the array size is a complicated expression not used
215 // elsewhere.
216 if (AI.isArrayAllocation()) {
217 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
218 return &AI;
219 }
221 // Get the first instruction in the entry block.
222 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
223 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
224 if (FirstInst != &AI) {
225 // If the entry block doesn't start with a zero-size alloca then move
226 // this one to the start of the entry block. There is no problem with
227 // dominance as the array size was forced to a constant earlier already.
228 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
229 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
230 DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
231 AI.moveBefore(FirstInst);
232 return &AI;
233 }
235 // If the alignment of the entry block alloca is 0 (unspecified),
236 // assign it the preferred alignment.
237 if (EntryAI->getAlignment() == 0)
238 EntryAI->setAlignment(
239 DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
240 // Replace this zero-sized alloca with the one at the start of the entry
241 // block after ensuring that the address will be aligned enough for both
242 // types.
243 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
244 AI.getAlignment());
245 EntryAI->setAlignment(MaxAlign);
246 if (AI.getType() != EntryAI->getType())
247 return new BitCastInst(EntryAI, AI.getType());
248 return ReplaceInstUsesWith(AI, EntryAI);
249 }
250 }
251 }
253 if (AI.getAlignment()) {
254 // Check to see if this allocation is only modified by a memcpy/memmove from
255 // a constant global whose alignment is equal to or exceeds that of the
256 // allocation. If this is the case, we can change all users to use
257 // the constant global instead. This is commonly produced by the CFE by
258 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
259 // is only subsequently read.
260 SmallVector<Instruction *, 4> ToDelete;
261 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
262 unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
263 AI.getAlignment(), DL);
264 if (AI.getAlignment() <= SourceAlign) {
265 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
266 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
267 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
268 EraseInstFromFunction(*ToDelete[i]);
269 Constant *TheSrc = cast<Constant>(Copy->getSource());
270 Constant *Cast
271 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
272 Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
273 EraseInstFromFunction(*Copy);
274 ++NumGlobalCopies;
275 return NewI;
276 }
277 }
278 }
280 // At last, use the generic allocation site handler to aggressively remove
281 // unused allocas.
282 return visitAllocSite(AI);
283 }
286 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
287 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
288 const DataLayout *DL) {
289 User *CI = cast<User>(LI.getOperand(0));
290 Value *CastOp = CI->getOperand(0);
292 PointerType *DestTy = cast<PointerType>(CI->getType());
293 Type *DestPTy = DestTy->getElementType();
294 if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
296 // If the address spaces don't match, don't eliminate the cast.
297 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
298 return 0;
300 Type *SrcPTy = SrcTy->getElementType();
302 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
303 DestPTy->isVectorTy()) {
304 // If the source is an array, the code below will not succeed. Check to
305 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
306 // constants.
307 if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
308 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
309 if (ASrcTy->getNumElements() != 0) {
310 Type *IdxTy = DL
311 ? DL->getIntPtrType(SrcTy)
312 : Type::getInt64Ty(SrcTy->getContext());
313 Value *Idx = Constant::getNullValue(IdxTy);
314 Value *Idxs[2] = { Idx, Idx };
315 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
316 SrcTy = cast<PointerType>(CastOp->getType());
317 SrcPTy = SrcTy->getElementType();
318 }
320 if (IC.getDataLayout() &&
321 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
322 SrcPTy->isVectorTy()) &&
323 // Do not allow turning this into a load of an integer, which is then
324 // casted to a pointer, this pessimizes pointer analysis a lot.
325 (SrcPTy->isPtrOrPtrVectorTy() ==
326 LI.getType()->isPtrOrPtrVectorTy()) &&
327 IC.getDataLayout()->getTypeSizeInBits(SrcPTy) ==
328 IC.getDataLayout()->getTypeSizeInBits(DestPTy)) {
330 // Okay, we are casting from one integer or pointer type to another of
331 // the same size. Instead of casting the pointer before the load, cast
332 // the result of the loaded value.
333 LoadInst *NewLoad =
334 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
335 NewLoad->setAlignment(LI.getAlignment());
336 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
337 // Now cast the result of the load.
338 PointerType *OldTy = dyn_cast<PointerType>(NewLoad->getType());
339 PointerType *NewTy = dyn_cast<PointerType>(LI.getType());
340 if (OldTy && NewTy &&
341 OldTy->getAddressSpace() != NewTy->getAddressSpace()) {
342 return new AddrSpaceCastInst(NewLoad, LI.getType());
343 }
345 return new BitCastInst(NewLoad, LI.getType());
346 }
347 }
348 }
349 return 0;
350 }
352 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
353 Value *Op = LI.getOperand(0);
355 // Attempt to improve the alignment.
356 if (DL) {
357 unsigned KnownAlign =
358 getOrEnforceKnownAlignment(Op, DL->getPrefTypeAlignment(LI.getType()),DL);
359 unsigned LoadAlign = LI.getAlignment();
360 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
361 DL->getABITypeAlignment(LI.getType());
363 if (KnownAlign > EffectiveLoadAlign)
364 LI.setAlignment(KnownAlign);
365 else if (LoadAlign == 0)
366 LI.setAlignment(EffectiveLoadAlign);
367 }
369 // load (cast X) --> cast (load X) iff safe.
370 if (isa<CastInst>(Op))
371 if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
372 return Res;
374 // None of the following transforms are legal for volatile/atomic loads.
375 // FIXME: Some of it is okay for atomic loads; needs refactoring.
376 if (!LI.isSimple()) return 0;
378 // Do really simple store-to-load forwarding and load CSE, to catch cases
379 // where there are several consecutive memory accesses to the same location,
380 // separated by a few arithmetic operations.
381 BasicBlock::iterator BBI = &LI;
382 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
383 return ReplaceInstUsesWith(LI, AvailableVal);
385 // load(gep null, ...) -> unreachable
386 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
387 const Value *GEPI0 = GEPI->getOperand(0);
388 // TODO: Consider a target hook for valid address spaces for this xform.
389 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
390 // Insert a new store to null instruction before the load to indicate
391 // that this code is not reachable. We do this instead of inserting
392 // an unreachable instruction directly because we cannot modify the
393 // CFG.
394 new StoreInst(UndefValue::get(LI.getType()),
395 Constant::getNullValue(Op->getType()), &LI);
396 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
397 }
398 }
400 // load null/undef -> unreachable
401 // TODO: Consider a target hook for valid address spaces for this xform.
402 if (isa<UndefValue>(Op) ||
403 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
404 // Insert a new store to null instruction before the load to indicate that
405 // this code is not reachable. We do this instead of inserting an
406 // unreachable instruction directly because we cannot modify the CFG.
407 new StoreInst(UndefValue::get(LI.getType()),
408 Constant::getNullValue(Op->getType()), &LI);
409 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
410 }
412 // Instcombine load (constantexpr_cast global) -> cast (load global)
413 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
414 if (CE->isCast())
415 if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
416 return Res;
418 if (Op->hasOneUse()) {
419 // Change select and PHI nodes to select values instead of addresses: this
420 // helps alias analysis out a lot, allows many others simplifications, and
421 // exposes redundancy in the code.
422 //
423 // Note that we cannot do the transformation unless we know that the
424 // introduced loads cannot trap! Something like this is valid as long as
425 // the condition is always false: load (select bool %C, int* null, int* %G),
426 // but it would not be valid if we transformed it to load from null
427 // unconditionally.
428 //
429 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
430 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
431 unsigned Align = LI.getAlignment();
432 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
433 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
434 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
435 SI->getOperand(1)->getName()+".val");
436 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
437 SI->getOperand(2)->getName()+".val");
438 V1->setAlignment(Align);
439 V2->setAlignment(Align);
440 return SelectInst::Create(SI->getCondition(), V1, V2);
441 }
443 // load (select (cond, null, P)) -> load P
444 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
445 if (C->isNullValue()) {
446 LI.setOperand(0, SI->getOperand(2));
447 return &LI;
448 }
450 // load (select (cond, P, null)) -> load P
451 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
452 if (C->isNullValue()) {
453 LI.setOperand(0, SI->getOperand(1));
454 return &LI;
455 }
456 }
457 }
458 return 0;
459 }
461 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
462 /// when possible. This makes it generally easy to do alias analysis and/or
463 /// SROA/mem2reg of the memory object.
464 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
465 User *CI = cast<User>(SI.getOperand(1));
466 Value *CastOp = CI->getOperand(0);
468 Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
469 PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
470 if (SrcTy == 0) return 0;
472 Type *SrcPTy = SrcTy->getElementType();
474 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
475 return 0;
477 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
478 /// to its first element. This allows us to handle things like:
479 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
480 /// on 32-bit hosts.
481 SmallVector<Value*, 4> NewGEPIndices;
483 // If the source is an array, the code below will not succeed. Check to
484 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
485 // constants.
486 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
487 // Index through pointer.
488 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
489 NewGEPIndices.push_back(Zero);
491 while (1) {
492 if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
493 if (!STy->getNumElements()) /* Struct can be empty {} */
494 break;
495 NewGEPIndices.push_back(Zero);
496 SrcPTy = STy->getElementType(0);
497 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
498 NewGEPIndices.push_back(Zero);
499 SrcPTy = ATy->getElementType();
500 } else {
501 break;
502 }
503 }
505 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
506 }
508 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
509 return 0;
511 // If the pointers point into different address spaces or if they point to
512 // values with different sizes, we can't do the transformation.
513 if (!IC.getDataLayout() ||
514 SrcTy->getAddressSpace() !=
515 cast<PointerType>(CI->getType())->getAddressSpace() ||
516 IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
517 IC.getDataLayout()->getTypeSizeInBits(DestPTy))
518 return 0;
520 // Okay, we are casting from one integer or pointer type to another of
521 // the same size. Instead of casting the pointer before
522 // the store, cast the value to be stored.
523 Value *NewCast;
524 Instruction::CastOps opcode = Instruction::BitCast;
525 Type* CastSrcTy = DestPTy;
526 Type* CastDstTy = SrcPTy;
527 if (CastDstTy->isPointerTy()) {
528 if (CastSrcTy->isIntegerTy())
529 opcode = Instruction::IntToPtr;
530 } else if (CastDstTy->isIntegerTy()) {
531 if (CastSrcTy->isPointerTy())
532 opcode = Instruction::PtrToInt;
533 }
535 // SIOp0 is a pointer to aggregate and this is a store to the first field,
536 // emit a GEP to index into its first field.
537 if (!NewGEPIndices.empty())
538 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
540 Value *SIOp0 = SI.getOperand(0);
541 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
542 SIOp0->getName()+".c");
543 SI.setOperand(0, NewCast);
544 SI.setOperand(1, CastOp);
545 return &SI;
546 }
548 /// equivalentAddressValues - Test if A and B will obviously have the same
549 /// value. This includes recognizing that %t0 and %t1 will have the same
550 /// value in code like this:
551 /// %t0 = getelementptr \@a, 0, 3
552 /// store i32 0, i32* %t0
553 /// %t1 = getelementptr \@a, 0, 3
554 /// %t2 = load i32* %t1
555 ///
556 static bool equivalentAddressValues(Value *A, Value *B) {
557 // Test if the values are trivially equivalent.
558 if (A == B) return true;
560 // Test if the values come form identical arithmetic instructions.
561 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
562 // its only used to compare two uses within the same basic block, which
563 // means that they'll always either have the same value or one of them
564 // will have an undefined value.
565 if (isa<BinaryOperator>(A) ||
566 isa<CastInst>(A) ||
567 isa<PHINode>(A) ||
568 isa<GetElementPtrInst>(A))
569 if (Instruction *BI = dyn_cast<Instruction>(B))
570 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
571 return true;
573 // Otherwise they may not be equivalent.
574 return false;
575 }
577 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
578 Value *Val = SI.getOperand(0);
579 Value *Ptr = SI.getOperand(1);
581 // Attempt to improve the alignment.
582 if (DL) {
583 unsigned KnownAlign =
584 getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
585 DL);
586 unsigned StoreAlign = SI.getAlignment();
587 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
588 DL->getABITypeAlignment(Val->getType());
590 if (KnownAlign > EffectiveStoreAlign)
591 SI.setAlignment(KnownAlign);
592 else if (StoreAlign == 0)
593 SI.setAlignment(EffectiveStoreAlign);
594 }
596 // Don't hack volatile/atomic stores.
597 // FIXME: Some bits are legal for atomic stores; needs refactoring.
598 if (!SI.isSimple()) return 0;
600 // If the RHS is an alloca with a single use, zapify the store, making the
601 // alloca dead.
602 if (Ptr->hasOneUse()) {
603 if (isa<AllocaInst>(Ptr))
604 return EraseInstFromFunction(SI);
605 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
606 if (isa<AllocaInst>(GEP->getOperand(0))) {
607 if (GEP->getOperand(0)->hasOneUse())
608 return EraseInstFromFunction(SI);
609 }
610 }
611 }
613 // Do really simple DSE, to catch cases where there are several consecutive
614 // stores to the same location, separated by a few arithmetic operations. This
615 // situation often occurs with bitfield accesses.
616 BasicBlock::iterator BBI = &SI;
617 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
618 --ScanInsts) {
619 --BBI;
620 // Don't count debug info directives, lest they affect codegen,
621 // and we skip pointer-to-pointer bitcasts, which are NOPs.
622 if (isa<DbgInfoIntrinsic>(BBI) ||
623 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
624 ScanInsts++;
625 continue;
626 }
628 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
629 // Prev store isn't volatile, and stores to the same location?
630 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
631 SI.getOperand(1))) {
632 ++NumDeadStore;
633 ++BBI;
634 EraseInstFromFunction(*PrevSI);
635 continue;
636 }
637 break;
638 }
640 // If this is a load, we have to stop. However, if the loaded value is from
641 // the pointer we're loading and is producing the pointer we're storing,
642 // then *this* store is dead (X = load P; store X -> P).
643 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
644 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
645 LI->isSimple())
646 return EraseInstFromFunction(SI);
648 // Otherwise, this is a load from some other location. Stores before it
649 // may not be dead.
650 break;
651 }
653 // Don't skip over loads or things that can modify memory.
654 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
655 break;
656 }
658 // store X, null -> turns into 'unreachable' in SimplifyCFG
659 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
660 if (!isa<UndefValue>(Val)) {
661 SI.setOperand(0, UndefValue::get(Val->getType()));
662 if (Instruction *U = dyn_cast<Instruction>(Val))
663 Worklist.Add(U); // Dropped a use.
664 }
665 return 0; // Do not modify these!
666 }
668 // store undef, Ptr -> noop
669 if (isa<UndefValue>(Val))
670 return EraseInstFromFunction(SI);
672 // If the pointer destination is a cast, see if we can fold the cast into the
673 // source instead.
674 if (isa<CastInst>(Ptr))
675 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
676 return Res;
677 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
678 if (CE->isCast())
679 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
680 return Res;
683 // If this store is the last instruction in the basic block (possibly
684 // excepting debug info instructions), and if the block ends with an
685 // unconditional branch, try to move it to the successor block.
686 BBI = &SI;
687 do {
688 ++BBI;
689 } while (isa<DbgInfoIntrinsic>(BBI) ||
690 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
691 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
692 if (BI->isUnconditional())
693 if (SimplifyStoreAtEndOfBlock(SI))
694 return 0; // xform done!
696 return 0;
697 }
699 /// SimplifyStoreAtEndOfBlock - Turn things like:
700 /// if () { *P = v1; } else { *P = v2 }
701 /// into a phi node with a store in the successor.
702 ///
703 /// Simplify things like:
704 /// *P = v1; if () { *P = v2; }
705 /// into a phi node with a store in the successor.
706 ///
707 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
708 BasicBlock *StoreBB = SI.getParent();
710 // Check to see if the successor block has exactly two incoming edges. If
711 // so, see if the other predecessor contains a store to the same location.
712 // if so, insert a PHI node (if needed) and move the stores down.
713 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
715 // Determine whether Dest has exactly two predecessors and, if so, compute
716 // the other predecessor.
717 pred_iterator PI = pred_begin(DestBB);
718 BasicBlock *P = *PI;
719 BasicBlock *OtherBB = 0;
721 if (P != StoreBB)
722 OtherBB = P;
724 if (++PI == pred_end(DestBB))
725 return false;
727 P = *PI;
728 if (P != StoreBB) {
729 if (OtherBB)
730 return false;
731 OtherBB = P;
732 }
733 if (++PI != pred_end(DestBB))
734 return false;
736 // Bail out if all the relevant blocks aren't distinct (this can happen,
737 // for example, if SI is in an infinite loop)
738 if (StoreBB == DestBB || OtherBB == DestBB)
739 return false;
741 // Verify that the other block ends in a branch and is not otherwise empty.
742 BasicBlock::iterator BBI = OtherBB->getTerminator();
743 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
744 if (!OtherBr || BBI == OtherBB->begin())
745 return false;
747 // If the other block ends in an unconditional branch, check for the 'if then
748 // else' case. there is an instruction before the branch.
749 StoreInst *OtherStore = 0;
750 if (OtherBr->isUnconditional()) {
751 --BBI;
752 // Skip over debugging info.
753 while (isa<DbgInfoIntrinsic>(BBI) ||
754 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
755 if (BBI==OtherBB->begin())
756 return false;
757 --BBI;
758 }
759 // If this isn't a store, isn't a store to the same location, or is not the
760 // right kind of store, bail out.
761 OtherStore = dyn_cast<StoreInst>(BBI);
762 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
763 !SI.isSameOperationAs(OtherStore))
764 return false;
765 } else {
766 // Otherwise, the other block ended with a conditional branch. If one of the
767 // destinations is StoreBB, then we have the if/then case.
768 if (OtherBr->getSuccessor(0) != StoreBB &&
769 OtherBr->getSuccessor(1) != StoreBB)
770 return false;
772 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
773 // if/then triangle. See if there is a store to the same ptr as SI that
774 // lives in OtherBB.
775 for (;; --BBI) {
776 // Check to see if we find the matching store.
777 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
778 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
779 !SI.isSameOperationAs(OtherStore))
780 return false;
781 break;
782 }
783 // If we find something that may be using or overwriting the stored
784 // value, or if we run out of instructions, we can't do the xform.
785 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
786 BBI == OtherBB->begin())
787 return false;
788 }
790 // In order to eliminate the store in OtherBr, we have to
791 // make sure nothing reads or overwrites the stored value in
792 // StoreBB.
793 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
794 // FIXME: This should really be AA driven.
795 if (I->mayReadFromMemory() || I->mayWriteToMemory())
796 return false;
797 }
798 }
800 // Insert a PHI node now if we need it.
801 Value *MergedVal = OtherStore->getOperand(0);
802 if (MergedVal != SI.getOperand(0)) {
803 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
804 PN->addIncoming(SI.getOperand(0), SI.getParent());
805 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
806 MergedVal = InsertNewInstBefore(PN, DestBB->front());
807 }
809 // Advance to a place where it is safe to insert the new store and
810 // insert it.
811 BBI = DestBB->getFirstInsertionPt();
812 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
813 SI.isVolatile(),
814 SI.getAlignment(),
815 SI.getOrdering(),
816 SI.getSynchScope());
817 InsertNewInstBefore(NewSI, *BBI);
818 NewSI->setDebugLoc(OtherStore->getDebugLoc());
820 // If the two stores had the same TBAA tag, preserve it.
821 if (MDNode *TBAATag = SI.getMetadata(LLVMContext::MD_tbaa))
822 if ((TBAATag = MDNode::getMostGenericTBAA(TBAATag,
823 OtherStore->getMetadata(LLVMContext::MD_tbaa))))
824 NewSI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
827 // Nuke the old stores.
828 EraseInstFromFunction(SI);
829 EraseInstFromFunction(*OtherStore);
830 return true;
831 }