1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
15 //
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "regalloc"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "llvm/Value.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/CodeGen/VirtRegMap.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/ADT/DenseSet.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "LiveRangeCalc.h"
38 #include <algorithm>
39 #include <limits>
40 #include <cmath>
41 using namespace llvm;
43 // Switch to the new experimental algorithm for computing live intervals.
44 static cl::opt<bool>
45 NewLiveIntervals("new-live-intervals", cl::Hidden,
46 cl::desc("Use new algorithm forcomputing live intervals"));
48 char LiveIntervals::ID = 0;
49 char &llvm::LiveIntervalsID = LiveIntervals::ID;
50 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
51 "Live Interval Analysis", false, false)
52 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
53 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
54 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
55 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
56 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
57 "Live Interval Analysis", false, false)
59 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
60 AU.setPreservesCFG();
61 AU.addRequired<AliasAnalysis>();
62 AU.addPreserved<AliasAnalysis>();
63 AU.addRequired<LiveVariables>();
64 AU.addPreserved<LiveVariables>();
65 AU.addPreservedID(MachineLoopInfoID);
66 AU.addRequiredTransitiveID(MachineDominatorsID);
67 AU.addPreservedID(MachineDominatorsID);
68 AU.addPreserved<SlotIndexes>();
69 AU.addRequiredTransitive<SlotIndexes>();
70 MachineFunctionPass::getAnalysisUsage(AU);
71 }
73 LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
74 DomTree(0), LRCalc(0) {
75 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
76 }
78 LiveIntervals::~LiveIntervals() {
79 delete LRCalc;
80 }
82 void LiveIntervals::releaseMemory() {
83 // Free the live intervals themselves.
84 for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
85 delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
86 VirtRegIntervals.clear();
87 RegMaskSlots.clear();
88 RegMaskBits.clear();
89 RegMaskBlocks.clear();
91 for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
92 delete RegUnitIntervals[i];
93 RegUnitIntervals.clear();
95 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
96 VNInfoAllocator.Reset();
97 }
99 /// runOnMachineFunction - Register allocate the whole function
100 ///
101 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
102 MF = &fn;
103 MRI = &MF->getRegInfo();
104 TM = &fn.getTarget();
105 TRI = TM->getRegisterInfo();
106 TII = TM->getInstrInfo();
107 AA = &getAnalysis<AliasAnalysis>();
108 LV = &getAnalysis<LiveVariables>();
109 Indexes = &getAnalysis<SlotIndexes>();
110 DomTree = &getAnalysis<MachineDominatorTree>();
111 if (!LRCalc)
112 LRCalc = new LiveRangeCalc();
114 // Allocate space for all virtual registers.
115 VirtRegIntervals.resize(MRI->getNumVirtRegs());
117 if (NewLiveIntervals) {
118 // This is the new way of computing live intervals.
119 // It is independent of LiveVariables, and it can run at any time.
120 computeVirtRegs();
121 computeRegMasks();
122 } else {
123 // This is the old way of computing live intervals.
124 // It depends on LiveVariables.
125 computeIntervals();
126 }
127 computeLiveInRegUnits();
129 DEBUG(dump());
130 return true;
131 }
133 /// print - Implement the dump method.
134 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
135 OS << "********** INTERVALS **********\n";
137 // Dump the regunits.
138 for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
139 if (LiveInterval *LI = RegUnitIntervals[i])
140 OS << PrintRegUnit(i, TRI) << " = " << *LI << '\n';
142 // Dump the virtregs.
143 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
144 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
145 if (hasInterval(Reg))
146 OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n';
147 }
149 OS << "RegMasks:";
150 for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i)
151 OS << ' ' << RegMaskSlots[i];
152 OS << '\n';
154 printInstrs(OS);
155 }
157 void LiveIntervals::printInstrs(raw_ostream &OS) const {
158 OS << "********** MACHINEINSTRS **********\n";
159 MF->print(OS, Indexes);
160 }
162 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
163 void LiveIntervals::dumpInstrs() const {
164 printInstrs(dbgs());
165 }
166 #endif
168 static
169 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
170 unsigned Reg = MI.getOperand(MOIdx).getReg();
171 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
172 const MachineOperand &MO = MI.getOperand(i);
173 if (!MO.isReg())
174 continue;
175 if (MO.getReg() == Reg && MO.isDef()) {
176 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
177 MI.getOperand(MOIdx).getSubReg() &&
178 (MO.getSubReg() || MO.isImplicit()));
179 return true;
180 }
181 }
182 return false;
183 }
185 /// isPartialRedef - Return true if the specified def at the specific index is
186 /// partially re-defining the specified live interval. A common case of this is
187 /// a definition of the sub-register.
188 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
189 LiveInterval &interval) {
190 if (!MO.getSubReg() || MO.isEarlyClobber())
191 return false;
193 SlotIndex RedefIndex = MIIdx.getRegSlot();
194 const LiveRange *OldLR =
195 interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
196 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
197 if (DefMI != 0) {
198 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
199 }
200 return false;
201 }
203 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
204 MachineBasicBlock::iterator mi,
205 SlotIndex MIIdx,
206 MachineOperand& MO,
207 unsigned MOIdx,
208 LiveInterval &interval) {
209 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, TRI));
211 // Virtual registers may be defined multiple times (due to phi
212 // elimination and 2-addr elimination). Much of what we do only has to be
213 // done once for the vreg. We use an empty interval to detect the first
214 // time we see a vreg.
215 LiveVariables::VarInfo& vi = LV->getVarInfo(interval.reg);
216 if (interval.empty()) {
217 // Get the Idx of the defining instructions.
218 SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
220 // Make sure the first definition is not a partial redefinition.
221 assert(!MO.readsReg() && "First def cannot also read virtual register "
222 "missing <undef> flag?");
224 VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
225 assert(ValNo->id == 0 && "First value in interval is not 0?");
227 // Loop over all of the blocks that the vreg is defined in. There are
228 // two cases we have to handle here. The most common case is a vreg
229 // whose lifetime is contained within a basic block. In this case there
230 // will be a single kill, in MBB, which comes after the definition.
231 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
232 // FIXME: what about dead vars?
233 SlotIndex killIdx;
234 if (vi.Kills[0] != mi)
235 killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
236 else
237 killIdx = defIndex.getDeadSlot();
239 // If the kill happens after the definition, we have an intra-block
240 // live range.
241 if (killIdx > defIndex) {
242 assert(vi.AliveBlocks.empty() &&
243 "Shouldn't be alive across any blocks!");
244 LiveRange LR(defIndex, killIdx, ValNo);
245 interval.addRange(LR);
246 DEBUG(dbgs() << " +" << LR << "\n");
247 return;
248 }
249 }
251 // The other case we handle is when a virtual register lives to the end
252 // of the defining block, potentially live across some blocks, then is
253 // live into some number of blocks, but gets killed. Start by adding a
254 // range that goes from this definition to the end of the defining block.
255 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
256 DEBUG(dbgs() << " +" << NewLR);
257 interval.addRange(NewLR);
259 bool PHIJoin = LV->isPHIJoin(interval.reg);
261 if (PHIJoin) {
262 // A phi join register is killed at the end of the MBB and revived as a
263 // new valno in the killing blocks.
264 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
265 DEBUG(dbgs() << " phi-join");
266 } else {
267 // Iterate over all of the blocks that the variable is completely
268 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
269 // live interval.
270 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
271 E = vi.AliveBlocks.end(); I != E; ++I) {
272 MachineBasicBlock *aliveBlock = MF->getBlockNumbered(*I);
273 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock),
274 ValNo);
275 interval.addRange(LR);
276 DEBUG(dbgs() << " +" << LR);
277 }
278 }
280 // Finally, this virtual register is live from the start of any killing
281 // block to the 'use' slot of the killing instruction.
282 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
283 MachineInstr *Kill = vi.Kills[i];
284 SlotIndex Start = getMBBStartIdx(Kill->getParent());
285 SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();
287 // Create interval with one of a NEW value number. Note that this value
288 // number isn't actually defined by an instruction, weird huh? :)
289 if (PHIJoin) {
290 assert(getInstructionFromIndex(Start) == 0 &&
291 "PHI def index points at actual instruction.");
292 ValNo = interval.getNextValue(Start, VNInfoAllocator);
293 }
294 LiveRange LR(Start, killIdx, ValNo);
295 interval.addRange(LR);
296 DEBUG(dbgs() << " +" << LR);
297 }
299 } else {
300 if (MultipleDefsBySameMI(*mi, MOIdx))
301 // Multiple defs of the same virtual register by the same instruction.
302 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
303 // This is likely due to elimination of REG_SEQUENCE instructions. Return
304 // here since there is nothing to do.
305 return;
307 // If this is the second time we see a virtual register definition, it
308 // must be due to phi elimination or two addr elimination. If this is
309 // the result of two address elimination, then the vreg is one of the
310 // def-and-use register operand.
312 // It may also be partial redef like this:
313 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
314 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
315 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
316 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
317 // If this is a two-address definition, then we have already processed
318 // the live range. The only problem is that we didn't realize there
319 // are actually two values in the live interval. Because of this we
320 // need to take the LiveRegion that defines this register and split it
321 // into two values.
322 SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
324 const LiveRange *OldLR =
325 interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
326 VNInfo *OldValNo = OldLR->valno;
327 SlotIndex DefIndex = OldValNo->def.getRegSlot();
329 // Delete the previous value, which should be short and continuous,
330 // because the 2-addr copy must be in the same MBB as the redef.
331 interval.removeRange(DefIndex, RedefIndex);
333 // The new value number (#1) is defined by the instruction we claimed
334 // defined value #0.
335 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
337 // Value#0 is now defined by the 2-addr instruction.
338 OldValNo->def = RedefIndex;
340 // Add the new live interval which replaces the range for the input copy.
341 LiveRange LR(DefIndex, RedefIndex, ValNo);
342 DEBUG(dbgs() << " replace range with " << LR);
343 interval.addRange(LR);
345 // If this redefinition is dead, we need to add a dummy unit live
346 // range covering the def slot.
347 if (MO.isDead())
348 interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
349 OldValNo));
351 DEBUG(dbgs() << " RESULT: " << interval);
352 } else if (LV->isPHIJoin(interval.reg)) {
353 // In the case of PHI elimination, each variable definition is only
354 // live until the end of the block. We've already taken care of the
355 // rest of the live range.
357 SlotIndex defIndex = MIIdx.getRegSlot();
358 if (MO.isEarlyClobber())
359 defIndex = MIIdx.getRegSlot(true);
361 VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
363 SlotIndex killIndex = getMBBEndIdx(mbb);
364 LiveRange LR(defIndex, killIndex, ValNo);
365 interval.addRange(LR);
366 DEBUG(dbgs() << " phi-join +" << LR);
367 } else {
368 llvm_unreachable("Multiply defined register");
369 }
370 }
372 DEBUG(dbgs() << '\n');
373 }
375 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
376 MachineBasicBlock::iterator MI,
377 SlotIndex MIIdx,
378 MachineOperand& MO,
379 unsigned MOIdx) {
380 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
381 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
382 getOrCreateInterval(MO.getReg()));
383 }
385 /// computeIntervals - computes the live intervals for virtual
386 /// registers. for some ordering of the machine instructions [1,N] a
387 /// live interval is an interval [i, j) where 1 <= i <= j < N for
388 /// which a variable is live
389 void LiveIntervals::computeIntervals() {
390 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
391 << "********** Function: " << MF->getName() << '\n');
393 RegMaskBlocks.resize(MF->getNumBlockIDs());
395 SmallVector<unsigned, 8> UndefUses;
396 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
397 MBBI != E; ++MBBI) {
398 MachineBasicBlock *MBB = MBBI;
399 RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size();
401 if (MBB->empty())
402 continue;
404 // Track the index of the current machine instr.
405 SlotIndex MIIndex = getMBBStartIdx(MBB);
406 DEBUG(dbgs() << "BB#" << MBB->getNumber()
407 << ":\t\t# derived from " << MBB->getName() << "\n");
409 // Skip over empty initial indices.
410 if (getInstructionFromIndex(MIIndex) == 0)
411 MIIndex = Indexes->getNextNonNullIndex(MIIndex);
413 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
414 MI != miEnd; ++MI) {
415 DEBUG(dbgs() << MIIndex << "\t" << *MI);
416 if (MI->isDebugValue())
417 continue;
418 assert(Indexes->getInstructionFromIndex(MIIndex) == MI &&
419 "Lost SlotIndex synchronization");
421 // Handle defs.
422 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
423 MachineOperand &MO = MI->getOperand(i);
425 // Collect register masks.
426 if (MO.isRegMask()) {
427 RegMaskSlots.push_back(MIIndex.getRegSlot());
428 RegMaskBits.push_back(MO.getRegMask());
429 continue;
430 }
432 if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
433 continue;
435 // handle register defs - build intervals
436 if (MO.isDef())
437 handleRegisterDef(MBB, MI, MIIndex, MO, i);
438 else if (MO.isUndef())
439 UndefUses.push_back(MO.getReg());
440 }
442 // Move to the next instr slot.
443 MIIndex = Indexes->getNextNonNullIndex(MIIndex);
444 }
446 // Compute the number of register mask instructions in this block.
447 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
448 RMB.second = RegMaskSlots.size() - RMB.first;
449 }
451 // Create empty intervals for registers defined by implicit_def's (except
452 // for those implicit_def that define values which are liveout of their
453 // blocks.
454 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
455 unsigned UndefReg = UndefUses[i];
456 (void)getOrCreateInterval(UndefReg);
457 }
458 }
460 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
461 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
462 return new LiveInterval(reg, Weight);
463 }
466 /// computeVirtRegInterval - Compute the live interval of a virtual register,
467 /// based on defs and uses.
468 void LiveIntervals::computeVirtRegInterval(LiveInterval *LI) {
469 assert(LRCalc && "LRCalc not initialized.");
470 assert(LI->empty() && "Should only compute empty intervals.");
471 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
472 LRCalc->createDeadDefs(LI);
473 LRCalc->extendToUses(LI);
474 }
476 void LiveIntervals::computeVirtRegs() {
477 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
478 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
479 if (MRI->reg_nodbg_empty(Reg))
480 continue;
481 LiveInterval *LI = createInterval(Reg);
482 VirtRegIntervals[Reg] = LI;
483 computeVirtRegInterval(LI);
484 }
485 }
487 void LiveIntervals::computeRegMasks() {
488 RegMaskBlocks.resize(MF->getNumBlockIDs());
490 // Find all instructions with regmask operands.
491 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
492 MBBI != E; ++MBBI) {
493 MachineBasicBlock *MBB = MBBI;
494 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
495 RMB.first = RegMaskSlots.size();
496 for (MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end();
497 MI != ME; ++MI)
498 for (MIOperands MO(MI); MO.isValid(); ++MO) {
499 if (!MO->isRegMask())
500 continue;
501 RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
502 RegMaskBits.push_back(MO->getRegMask());
503 }
504 // Compute the number of register mask instructions in this block.
505 RMB.second = RegMaskSlots.size() - RMB.first;
506 }
507 }
509 //===----------------------------------------------------------------------===//
510 // Register Unit Liveness
511 //===----------------------------------------------------------------------===//
512 //
513 // Fixed interference typically comes from ABI boundaries: Function arguments
514 // and return values are passed in fixed registers, and so are exception
515 // pointers entering landing pads. Certain instructions require values to be
516 // present in specific registers. That is also represented through fixed
517 // interference.
518 //
520 /// computeRegUnitInterval - Compute the live interval of a register unit, based
521 /// on the uses and defs of aliasing registers. The interval should be empty,
522 /// or contain only dead phi-defs from ABI blocks.
523 void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) {
524 unsigned Unit = LI->reg;
526 assert(LRCalc && "LRCalc not initialized.");
527 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
529 // The physregs aliasing Unit are the roots and their super-registers.
530 // Create all values as dead defs before extending to uses. Note that roots
531 // may share super-registers. That's OK because createDeadDefs() is
532 // idempotent. It is very rare for a register unit to have multiple roots, so
533 // uniquing super-registers is probably not worthwhile.
534 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
535 unsigned Root = *Roots;
536 if (!MRI->reg_empty(Root))
537 LRCalc->createDeadDefs(LI, Root);
538 for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
539 if (!MRI->reg_empty(*Supers))
540 LRCalc->createDeadDefs(LI, *Supers);
541 }
542 }
544 // Now extend LI to reach all uses.
545 // Ignore uses of reserved registers. We only track defs of those.
546 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
547 unsigned Root = *Roots;
548 if (!MRI->isReserved(Root) && !MRI->reg_empty(Root))
549 LRCalc->extendToUses(LI, Root);
550 for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
551 unsigned Reg = *Supers;
552 if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg))
553 LRCalc->extendToUses(LI, Reg);
554 }
555 }
556 }
559 /// computeLiveInRegUnits - Precompute the live ranges of any register units
560 /// that are live-in to an ABI block somewhere. Register values can appear
561 /// without a corresponding def when entering the entry block or a landing pad.
562 ///
563 void LiveIntervals::computeLiveInRegUnits() {
564 RegUnitIntervals.resize(TRI->getNumRegUnits());
565 DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
567 // Keep track of the intervals allocated.
568 SmallVector<LiveInterval*, 8> NewIntvs;
570 // Check all basic blocks for live-ins.
571 for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
572 MFI != MFE; ++MFI) {
573 const MachineBasicBlock *MBB = MFI;
575 // We only care about ABI blocks: Entry + landing pads.
576 if ((MFI != MF->begin() && !MBB->isLandingPad()) || MBB->livein_empty())
577 continue;
579 // Create phi-defs at Begin for all live-in registers.
580 SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
581 DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber());
582 for (MachineBasicBlock::livein_iterator LII = MBB->livein_begin(),
583 LIE = MBB->livein_end(); LII != LIE; ++LII) {
584 for (MCRegUnitIterator Units(*LII, TRI); Units.isValid(); ++Units) {
585 unsigned Unit = *Units;
586 LiveInterval *Intv = RegUnitIntervals[Unit];
587 if (!Intv) {
588 Intv = RegUnitIntervals[Unit] = new LiveInterval(Unit, HUGE_VALF);
589 NewIntvs.push_back(Intv);
590 }
591 VNInfo *VNI = Intv->createDeadDef(Begin, getVNInfoAllocator());
592 (void)VNI;
593 DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id);
594 }
595 }
596 DEBUG(dbgs() << '\n');
597 }
598 DEBUG(dbgs() << "Created " << NewIntvs.size() << " new intervals.\n");
600 // Compute the 'normal' part of the intervals.
601 for (unsigned i = 0, e = NewIntvs.size(); i != e; ++i)
602 computeRegUnitInterval(NewIntvs[i]);
603 }
606 /// shrinkToUses - After removing some uses of a register, shrink its live
607 /// range to just the remaining uses. This method does not compute reaching
608 /// defs for new uses, and it doesn't remove dead defs.
609 bool LiveIntervals::shrinkToUses(LiveInterval *li,
610 SmallVectorImpl<MachineInstr*> *dead) {
611 DEBUG(dbgs() << "Shrink: " << *li << '\n');
612 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
613 && "Can only shrink virtual registers");
614 // Find all the values used, including PHI kills.
615 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
617 // Blocks that have already been added to WorkList as live-out.
618 SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
620 // Visit all instructions reading li->reg.
621 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg);
622 MachineInstr *UseMI = I.skipInstruction();) {
623 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
624 continue;
625 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
626 LiveRangeQuery LRQ(*li, Idx);
627 VNInfo *VNI = LRQ.valueIn();
628 if (!VNI) {
629 // This shouldn't happen: readsVirtualRegister returns true, but there is
630 // no live value. It is likely caused by a target getting <undef> flags
631 // wrong.
632 DEBUG(dbgs() << Idx << '\t' << *UseMI
633 << "Warning: Instr claims to read non-existent value in "
634 << *li << '\n');
635 continue;
636 }
637 // Special case: An early-clobber tied operand reads and writes the
638 // register one slot early.
639 if (VNInfo *DefVNI = LRQ.valueDefined())
640 Idx = DefVNI->def;
642 WorkList.push_back(std::make_pair(Idx, VNI));
643 }
645 // Create a new live interval with only minimal live segments per def.
646 LiveInterval NewLI(li->reg, 0);
647 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
648 I != E; ++I) {
649 VNInfo *VNI = *I;
650 if (VNI->isUnused())
651 continue;
652 NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI));
653 }
655 // Keep track of the PHIs that are in use.
656 SmallPtrSet<VNInfo*, 8> UsedPHIs;
658 // Extend intervals to reach all uses in WorkList.
659 while (!WorkList.empty()) {
660 SlotIndex Idx = WorkList.back().first;
661 VNInfo *VNI = WorkList.back().second;
662 WorkList.pop_back();
663 const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
664 SlotIndex BlockStart = getMBBStartIdx(MBB);
666 // Extend the live range for VNI to be live at Idx.
667 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
668 (void)ExtVNI;
669 assert(ExtVNI == VNI && "Unexpected existing value number");
670 // Is this a PHIDef we haven't seen before?
671 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
672 continue;
673 // The PHI is live, make sure the predecessors are live-out.
674 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
675 PE = MBB->pred_end(); PI != PE; ++PI) {
676 if (!LiveOut.insert(*PI))
677 continue;
678 SlotIndex Stop = getMBBEndIdx(*PI);
679 // A predecessor is not required to have a live-out value for a PHI.
680 if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
681 WorkList.push_back(std::make_pair(Stop, PVNI));
682 }
683 continue;
684 }
686 // VNI is live-in to MBB.
687 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
688 NewLI.addRange(LiveRange(BlockStart, Idx, VNI));
690 // Make sure VNI is live-out from the predecessors.
691 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
692 PE = MBB->pred_end(); PI != PE; ++PI) {
693 if (!LiveOut.insert(*PI))
694 continue;
695 SlotIndex Stop = getMBBEndIdx(*PI);
696 assert(li->getVNInfoBefore(Stop) == VNI &&
697 "Wrong value out of predecessor");
698 WorkList.push_back(std::make_pair(Stop, VNI));
699 }
700 }
702 // Handle dead values.
703 bool CanSeparate = false;
704 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
705 I != E; ++I) {
706 VNInfo *VNI = *I;
707 if (VNI->isUnused())
708 continue;
709 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
710 assert(LII != NewLI.end() && "Missing live range for PHI");
711 if (LII->end != VNI->def.getDeadSlot())
712 continue;
713 if (VNI->isPHIDef()) {
714 // This is a dead PHI. Remove it.
715 VNI->markUnused();
716 NewLI.removeRange(*LII);
717 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
718 CanSeparate = true;
719 } else {
720 // This is a dead def. Make sure the instruction knows.
721 MachineInstr *MI = getInstructionFromIndex(VNI->def);
722 assert(MI && "No instruction defining live value");
723 MI->addRegisterDead(li->reg, TRI);
724 if (dead && MI->allDefsAreDead()) {
725 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
726 dead->push_back(MI);
727 }
728 }
729 }
731 // Move the trimmed ranges back.
732 li->ranges.swap(NewLI.ranges);
733 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
734 return CanSeparate;
735 }
737 void LiveIntervals::extendToIndices(LiveInterval *LI,
738 ArrayRef<SlotIndex> Indices) {
739 assert(LRCalc && "LRCalc not initialized.");
740 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
741 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
742 LRCalc->extend(LI, Indices[i]);
743 }
745 void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill,
746 SmallVectorImpl<SlotIndex> *EndPoints) {
747 LiveRangeQuery LRQ(*LI, Kill);
748 VNInfo *VNI = LRQ.valueOut();
749 if (!VNI)
750 return;
752 MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
753 SlotIndex MBBStart, MBBEnd;
754 tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB);
756 // If VNI isn't live out from KillMBB, the value is trivially pruned.
757 if (LRQ.endPoint() < MBBEnd) {
758 LI->removeRange(Kill, LRQ.endPoint());
759 if (EndPoints) EndPoints->push_back(LRQ.endPoint());
760 return;
761 }
763 // VNI is live out of KillMBB.
764 LI->removeRange(Kill, MBBEnd);
765 if (EndPoints) EndPoints->push_back(MBBEnd);
767 // Find all blocks that are reachable from KillMBB without leaving VNI's live
768 // range. It is possible that KillMBB itself is reachable, so start a DFS
769 // from each successor.
770 typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy;
771 VisitedTy Visited;
772 for (MachineBasicBlock::succ_iterator
773 SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end();
774 SuccI != SuccE; ++SuccI) {
775 for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
776 I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited);
777 I != E;) {
778 MachineBasicBlock *MBB = *I;
780 // Check if VNI is live in to MBB.
781 tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
782 LiveRangeQuery LRQ(*LI, MBBStart);
783 if (LRQ.valueIn() != VNI) {
784 // This block isn't part of the VNI live range. Prune the search.
785 I.skipChildren();
786 continue;
787 }
789 // Prune the search if VNI is killed in MBB.
790 if (LRQ.endPoint() < MBBEnd) {
791 LI->removeRange(MBBStart, LRQ.endPoint());
792 if (EndPoints) EndPoints->push_back(LRQ.endPoint());
793 I.skipChildren();
794 continue;
795 }
797 // VNI is live through MBB.
798 LI->removeRange(MBBStart, MBBEnd);
799 if (EndPoints) EndPoints->push_back(MBBEnd);
800 ++I;
801 }
802 }
803 }
805 //===----------------------------------------------------------------------===//
806 // Register allocator hooks.
807 //
809 void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
810 // Keep track of regunit ranges.
811 SmallVector<std::pair<LiveInterval*, LiveInterval::iterator>, 8> RU;
813 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
814 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
815 if (MRI->reg_nodbg_empty(Reg))
816 continue;
817 LiveInterval *LI = &getInterval(Reg);
818 if (LI->empty())
819 continue;
821 // Find the regunit intervals for the assigned register. They may overlap
822 // the virtual register live range, cancelling any kills.
823 RU.clear();
824 for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid();
825 ++Units) {
826 LiveInterval *RUInt = &getRegUnit(*Units);
827 if (RUInt->empty())
828 continue;
829 RU.push_back(std::make_pair(RUInt, RUInt->find(LI->begin()->end)));
830 }
832 // Every instruction that kills Reg corresponds to a live range end point.
833 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
834 ++RI) {
835 // A block index indicates an MBB edge.
836 if (RI->end.isBlock())
837 continue;
838 MachineInstr *MI = getInstructionFromIndex(RI->end);
839 if (!MI)
840 continue;
842 // Check if any of the reguints are live beyond the end of RI. That could
843 // happen when a physreg is defined as a copy of a virtreg:
844 //
845 // %EAX = COPY %vreg5
846 // FOO %vreg5 <--- MI, cancel kill because %EAX is live.
847 // BAR %EAX<kill>
848 //
849 // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
850 bool CancelKill = false;
851 for (unsigned u = 0, e = RU.size(); u != e; ++u) {
852 LiveInterval *RInt = RU[u].first;
853 LiveInterval::iterator &I = RU[u].second;
854 if (I == RInt->end())
855 continue;
856 I = RInt->advanceTo(I, RI->end);
857 if (I == RInt->end() || I->start >= RI->end)
858 continue;
859 // I is overlapping RI.
860 CancelKill = true;
861 break;
862 }
863 if (CancelKill)
864 MI->clearRegisterKills(Reg, NULL);
865 else
866 MI->addRegisterKilled(Reg, NULL);
867 }
868 }
869 }
871 MachineBasicBlock*
872 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
873 // A local live range must be fully contained inside the block, meaning it is
874 // defined and killed at instructions, not at block boundaries. It is not
875 // live in or or out of any block.
876 //
877 // It is technically possible to have a PHI-defined live range identical to a
878 // single block, but we are going to return false in that case.
880 SlotIndex Start = LI.beginIndex();
881 if (Start.isBlock())
882 return NULL;
884 SlotIndex Stop = LI.endIndex();
885 if (Stop.isBlock())
886 return NULL;
888 // getMBBFromIndex doesn't need to search the MBB table when both indexes
889 // belong to proper instructions.
890 MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
891 MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
892 return MBB1 == MBB2 ? MBB1 : NULL;
893 }
895 bool
896 LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
897 for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
898 I != E; ++I) {
899 const VNInfo *PHI = *I;
900 if (PHI->isUnused() || !PHI->isPHIDef())
901 continue;
902 const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
903 // Conservatively return true instead of scanning huge predecessor lists.
904 if (PHIMBB->pred_size() > 100)
905 return true;
906 for (MachineBasicBlock::const_pred_iterator
907 PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI)
908 if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI)))
909 return true;
910 }
911 return false;
912 }
914 float
915 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
916 // Limit the loop depth ridiculousness.
917 if (loopDepth > 200)
918 loopDepth = 200;
920 // The loop depth is used to roughly estimate the number of times the
921 // instruction is executed. Something like 10^d is simple, but will quickly
922 // overflow a float. This expression behaves like 10^d for small d, but is
923 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
924 // headroom before overflow.
925 // By the way, powf() might be unavailable here. For consistency,
926 // We may take pow(double,double).
927 float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
929 return (isDef + isUse) * lc;
930 }
932 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
933 MachineInstr* startInst) {
934 LiveInterval& Interval = getOrCreateInterval(reg);
935 VNInfo* VN = Interval.getNextValue(
936 SlotIndex(getInstructionIndex(startInst).getRegSlot()),
937 getVNInfoAllocator());
938 LiveRange LR(
939 SlotIndex(getInstructionIndex(startInst).getRegSlot()),
940 getMBBEndIdx(startInst->getParent()), VN);
941 Interval.addRange(LR);
943 return LR;
944 }
947 //===----------------------------------------------------------------------===//
948 // Register mask functions
949 //===----------------------------------------------------------------------===//
951 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
952 BitVector &UsableRegs) {
953 if (LI.empty())
954 return false;
955 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
957 // Use a smaller arrays for local live ranges.
958 ArrayRef<SlotIndex> Slots;
959 ArrayRef<const uint32_t*> Bits;
960 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
961 Slots = getRegMaskSlotsInBlock(MBB->getNumber());
962 Bits = getRegMaskBitsInBlock(MBB->getNumber());
963 } else {
964 Slots = getRegMaskSlots();
965 Bits = getRegMaskBits();
966 }
968 // We are going to enumerate all the register mask slots contained in LI.
969 // Start with a binary search of RegMaskSlots to find a starting point.
970 ArrayRef<SlotIndex>::iterator SlotI =
971 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
972 ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
974 // No slots in range, LI begins after the last call.
975 if (SlotI == SlotE)
976 return false;
978 bool Found = false;
979 for (;;) {
980 assert(*SlotI >= LiveI->start);
981 // Loop over all slots overlapping this segment.
982 while (*SlotI < LiveI->end) {
983 // *SlotI overlaps LI. Collect mask bits.
984 if (!Found) {
985 // This is the first overlap. Initialize UsableRegs to all ones.
986 UsableRegs.clear();
987 UsableRegs.resize(TRI->getNumRegs(), true);
988 Found = true;
989 }
990 // Remove usable registers clobbered by this mask.
991 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
992 if (++SlotI == SlotE)
993 return Found;
994 }
995 // *SlotI is beyond the current LI segment.
996 LiveI = LI.advanceTo(LiveI, *SlotI);
997 if (LiveI == LiveE)
998 return Found;
999 // Advance SlotI until it overlaps.
1000 while (*SlotI < LiveI->start)
1001 if (++SlotI == SlotE)
1002 return Found;
1003 }
1004 }
1006 //===----------------------------------------------------------------------===//
1007 // IntervalUpdate class.
1008 //===----------------------------------------------------------------------===//
1010 // HMEditor is a toolkit used by handleMove to trim or extend live intervals.
1011 class LiveIntervals::HMEditor {
1012 private:
1013 LiveIntervals& LIS;
1014 const MachineRegisterInfo& MRI;
1015 const TargetRegisterInfo& TRI;
1016 SlotIndex OldIdx;
1017 SlotIndex NewIdx;
1018 SmallPtrSet<LiveInterval*, 8> Updated;
1019 bool UpdateFlags;
1021 public:
1022 HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
1023 const TargetRegisterInfo& TRI,
1024 SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
1025 : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
1026 UpdateFlags(UpdateFlags) {}
1028 // FIXME: UpdateFlags is a workaround that creates live intervals for all
1029 // physregs, even those that aren't needed for regalloc, in order to update
1030 // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
1031 // flags, and postRA passes will use a live register utility instead.
1032 LiveInterval *getRegUnitLI(unsigned Unit) {
1033 if (UpdateFlags)
1034 return &LIS.getRegUnit(Unit);
1035 return LIS.getCachedRegUnit(Unit);
1036 }
1038 /// Update all live ranges touched by MI, assuming a move from OldIdx to
1039 /// NewIdx.
1040 void updateAllRanges(MachineInstr *MI) {
1041 DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
1042 bool hasRegMask = false;
1043 for (MIOperands MO(MI); MO.isValid(); ++MO) {
1044 if (MO->isRegMask())
1045 hasRegMask = true;
1046 if (!MO->isReg())
1047 continue;
1048 // Aggressively clear all kill flags.
1049 // They are reinserted by VirtRegRewriter.
1050 if (MO->isUse())
1051 MO->setIsKill(false);
1053 unsigned Reg = MO->getReg();
1054 if (!Reg)
1055 continue;
1056 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1057 updateRange(LIS.getInterval(Reg));
1058 continue;
1059 }
1061 // For physregs, only update the regunits that actually have a
1062 // precomputed live range.
1063 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
1064 if (LiveInterval *LI = getRegUnitLI(*Units))
1065 updateRange(*LI);
1066 }
1067 if (hasRegMask)
1068 updateRegMaskSlots();
1069 }
1071 private:
1072 /// Update a single live range, assuming an instruction has been moved from
1073 /// OldIdx to NewIdx.
1074 void updateRange(LiveInterval &LI) {
1075 if (!Updated.insert(&LI))
1076 return;
1077 DEBUG({
1078 dbgs() << " ";
1079 if (TargetRegisterInfo::isVirtualRegister(LI.reg))
1080 dbgs() << PrintReg(LI.reg);
1081 else
1082 dbgs() << PrintRegUnit(LI.reg, &TRI);
1083 dbgs() << ":\t" << LI << '\n';
1084 });
1085 if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
1086 handleMoveDown(LI);
1087 else
1088 handleMoveUp(LI);
1089 DEBUG(dbgs() << " -->\t" << LI << '\n');
1090 LI.verify();
1091 }
1093 /// Update LI to reflect an instruction has been moved downwards from OldIdx
1094 /// to NewIdx.
1095 ///
1096 /// 1. Live def at OldIdx:
1097 /// Move def to NewIdx, assert endpoint after NewIdx.
1098 ///
1099 /// 2. Live def at OldIdx, killed at NewIdx:
1100 /// Change to dead def at NewIdx.
1101 /// (Happens when bundling def+kill together).
1102 ///
1103 /// 3. Dead def at OldIdx:
1104 /// Move def to NewIdx, possibly across another live value.
1105 ///
1106 /// 4. Def at OldIdx AND at NewIdx:
1107 /// Remove live range [OldIdx;NewIdx) and value defined at OldIdx.
1108 /// (Happens when bundling multiple defs together).
1109 ///
1110 /// 5. Value read at OldIdx, killed before NewIdx:
1111 /// Extend kill to NewIdx.
1112 ///
1113 void handleMoveDown(LiveInterval &LI) {
1114 // First look for a kill at OldIdx.
1115 LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
1116 LiveInterval::iterator E = LI.end();
1117 // Is LI even live at OldIdx?
1118 if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
1119 return;
1121 // Handle a live-in value.
1122 if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
1123 bool isKill = SlotIndex::isSameInstr(OldIdx, I->end);
1124 // If the live-in value already extends to NewIdx, there is nothing to do.
1125 if (!SlotIndex::isEarlierInstr(I->end, NewIdx))
1126 return;
1127 // Aggressively remove all kill flags from the old kill point.
1128 // Kill flags shouldn't be used while live intervals exist, they will be
1129 // reinserted by VirtRegRewriter.
1130 if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end))
1131 for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO)
1132 if (MO->isReg() && MO->isUse())
1133 MO->setIsKill(false);
1134 // Adjust I->end to reach NewIdx. This may temporarily make LI invalid by
1135 // overlapping ranges. Case 5 above.
1136 I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
1137 // If this was a kill, there may also be a def. Otherwise we're done.
1138 if (!isKill)
1139 return;
1140 ++I;
1141 }
1143 // Check for a def at OldIdx.
1144 if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start))
1145 return;
1146 // We have a def at OldIdx.
1147 VNInfo *DefVNI = I->valno;
1148 assert(DefVNI->def == I->start && "Inconsistent def");
1149 DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
1150 // If the defined value extends beyond NewIdx, just move the def down.
1151 // This is case 1 above.
1152 if (SlotIndex::isEarlierInstr(NewIdx, I->end)) {
1153 I->start = DefVNI->def;
1154 return;
1155 }
1156 // The remaining possibilities are now:
1157 // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx).
1158 // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot().
1159 // In either case, it is possible that there is an existing def at NewIdx.
1160 assert((I->end == OldIdx.getDeadSlot() ||
1161 SlotIndex::isSameInstr(I->end, NewIdx)) &&
1162 "Cannot move def below kill");
1163 LiveInterval::iterator NewI = LI.advanceTo(I, NewIdx.getRegSlot());
1164 if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) {
1165 // There is an existing def at NewIdx, case 4 above. The def at OldIdx is
1166 // coalesced into that value.
1167 assert(NewI->valno != DefVNI && "Multiple defs of value?");
1168 LI.removeValNo(DefVNI);
1169 return;
1170 }
1171 // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx.
1172 // If the def at OldIdx was dead, we allow it to be moved across other LI
1173 // values. The new range should be placed immediately before NewI, move any
1174 // intermediate ranges up.
1175 assert(NewI != I && "Inconsistent iterators");
1176 std::copy(llvm::next(I), NewI, I);
1177 *llvm::prior(NewI) = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
1178 }
1180 /// Update LI to reflect an instruction has been moved upwards from OldIdx
1181 /// to NewIdx.
1182 ///
1183 /// 1. Live def at OldIdx:
1184 /// Hoist def to NewIdx.
1185 ///
1186 /// 2. Dead def at OldIdx:
1187 /// Hoist def+end to NewIdx, possibly move across other values.
1188 ///
1189 /// 3. Dead def at OldIdx AND existing def at NewIdx:
1190 /// Remove value defined at OldIdx, coalescing it with existing value.
1191 ///
1192 /// 4. Live def at OldIdx AND existing def at NewIdx:
1193 /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx.
1194 /// (Happens when bundling multiple defs together).
1195 ///
1196 /// 5. Value killed at OldIdx:
1197 /// Hoist kill to NewIdx, then scan for last kill between NewIdx and
1198 /// OldIdx.
1199 ///
1200 void handleMoveUp(LiveInterval &LI) {
1201 // First look for a kill at OldIdx.
1202 LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
1203 LiveInterval::iterator E = LI.end();
1204 // Is LI even live at OldIdx?
1205 if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
1206 return;
1208 // Handle a live-in value.
1209 if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
1210 // If the live-in value isn't killed here, there is nothing to do.
1211 if (!SlotIndex::isSameInstr(OldIdx, I->end))
1212 return;
1213 // Adjust I->end to end at NewIdx. If we are hoisting a kill above
1214 // another use, we need to search for that use. Case 5 above.
1215 I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
1216 ++I;
1217 // If OldIdx also defines a value, there couldn't have been another use.
1218 if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
1219 // No def, search for the new kill.
1220 // This can never be an early clobber kill since there is no def.
1221 llvm::prior(I)->end = findLastUseBefore(LI.reg).getRegSlot();
1222 return;
1223 }
1224 }
1226 // Now deal with the def at OldIdx.
1227 assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?");
1228 VNInfo *DefVNI = I->valno;
1229 assert(DefVNI->def == I->start && "Inconsistent def");
1230 DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
1232 // Check for an existing def at NewIdx.
1233 LiveInterval::iterator NewI = LI.find(NewIdx.getRegSlot());
1234 if (SlotIndex::isSameInstr(NewI->start, NewIdx)) {
1235 assert(NewI->valno != DefVNI && "Same value defined more than once?");
1236 // There is an existing def at NewIdx.
1237 if (I->end.isDead()) {
1238 // Case 3: Remove the dead def at OldIdx.
1239 LI.removeValNo(DefVNI);
1240 return;
1241 }
1242 // Case 4: Replace def at NewIdx with live def at OldIdx.
1243 I->start = DefVNI->def;
1244 LI.removeValNo(NewI->valno);
1245 return;
1246 }
1248 // There is no existing def at NewIdx. Hoist DefVNI.
1249 if (!I->end.isDead()) {
1250 // Leave the end point of a live def.
1251 I->start = DefVNI->def;
1252 return;
1253 }
1255 // DefVNI is a dead def. It may have been moved across other values in LI,
1256 // so move I up to NewI. Slide [NewI;I) down one position.
1257 std::copy_backward(NewI, I, llvm::next(I));
1258 *NewI = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
1259 }
1261 void updateRegMaskSlots() {
1262 SmallVectorImpl<SlotIndex>::iterator RI =
1263 std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
1264 OldIdx);
1265 assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
1266 "No RegMask at OldIdx.");
1267 *RI = NewIdx.getRegSlot();
1268 assert((RI == LIS.RegMaskSlots.begin() ||
1269 SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) &&
1270 "Cannot move regmask instruction above another call");
1271 assert((llvm::next(RI) == LIS.RegMaskSlots.end() ||
1272 SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) &&
1273 "Cannot move regmask instruction below another call");
1274 }
1276 // Return the last use of reg between NewIdx and OldIdx.
1277 SlotIndex findLastUseBefore(unsigned Reg) {
1278 SlotIndex LastUse = NewIdx;
1280 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1281 for (MachineRegisterInfo::use_nodbg_iterator
1282 UI = MRI.use_nodbg_begin(Reg),
1283 UE = MRI.use_nodbg_end();
1284 UI != UE; UI.skipInstruction()) {
1285 const MachineInstr* MI = &*UI;
1286 SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
1287 if (InstSlot > LastUse && InstSlot < OldIdx)
1288 LastUse = InstSlot;
1289 }
1290 } else {
1291 MachineInstr* MI = LIS.getSlotIndexes()->getInstructionFromIndex(NewIdx);
1292 MachineBasicBlock::iterator MII(MI);
1293 ++MII;
1294 MachineBasicBlock* MBB = MI->getParent();
1295 for (; MII != MBB->end() && LIS.getInstructionIndex(MII) < OldIdx; ++MII){
1296 for (MachineInstr::mop_iterator MOI = MII->operands_begin(),
1297 MOE = MII->operands_end();
1298 MOI != MOE; ++MOI) {
1299 const MachineOperand& mop = *MOI;
1300 if (!mop.isReg() || mop.getReg() == 0 ||
1301 TargetRegisterInfo::isVirtualRegister(mop.getReg()))
1302 continue;
1304 if (TRI.hasRegUnit(mop.getReg(), Reg))
1305 LastUse = LIS.getInstructionIndex(MII);
1306 }
1307 }
1308 }
1309 return LastUse;
1310 }
1311 };
1313 void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) {
1314 assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
1315 SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1316 Indexes->removeMachineInstrFromMaps(MI);
1317 SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
1318 assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
1319 OldIndex < getMBBEndIdx(MI->getParent()) &&
1320 "Cannot handle moves across basic block boundaries.");
1322 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1323 HME.updateAllRanges(MI);
1324 }
1326 void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
1327 MachineInstr* BundleStart,
1328 bool UpdateFlags) {
1329 SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1330 SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
1331 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1332 HME.updateAllRanges(MI);
1333 }