1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief SI implementation of the TargetRegisterInfo class.
12 //
13 //===----------------------------------------------------------------------===//
16 #include "SIRegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "SIInstrInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/RegisterScavenging.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/Support/Debug.h"
27 using namespace llvm;
29 SIRegisterInfo::SIRegisterInfo(const AMDGPUSubtarget &st)
30 : AMDGPURegisterInfo(st)
31 { }
33 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
34 BitVector Reserved(getNumRegs());
35 Reserved.set(AMDGPU::EXEC);
37 // EXEC_LO and EXEC_HI could be allocated and used as regular register,
38 // but this seems likely to result in bugs, so I'm marking them as reserved.
39 Reserved.set(AMDGPU::EXEC_LO);
40 Reserved.set(AMDGPU::EXEC_HI);
42 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
43 Reserved.set(AMDGPU::FLAT_SCR);
44 Reserved.set(AMDGPU::FLAT_SCR_LO);
45 Reserved.set(AMDGPU::FLAT_SCR_HI);
47 // Reserve some VGPRs to use as temp registers in case we have to spill VGPRs
48 Reserved.set(AMDGPU::VGPR255);
49 Reserved.set(AMDGPU::VGPR254);
51 return Reserved;
52 }
54 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
55 MachineFunction &MF) const {
56 return RC->getNumRegs();
57 }
59 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
60 return Fn.getFrameInfo()->hasStackObjects();
61 }
63 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
65 switch (Op) {
66 case AMDGPU::SI_SPILL_S512_SAVE:
67 case AMDGPU::SI_SPILL_S512_RESTORE:
68 case AMDGPU::SI_SPILL_V512_SAVE:
69 case AMDGPU::SI_SPILL_V512_RESTORE:
70 return 16;
71 case AMDGPU::SI_SPILL_S256_SAVE:
72 case AMDGPU::SI_SPILL_S256_RESTORE:
73 case AMDGPU::SI_SPILL_V256_SAVE:
74 case AMDGPU::SI_SPILL_V256_RESTORE:
75 return 8;
76 case AMDGPU::SI_SPILL_S128_SAVE:
77 case AMDGPU::SI_SPILL_S128_RESTORE:
78 case AMDGPU::SI_SPILL_V128_SAVE:
79 case AMDGPU::SI_SPILL_V128_RESTORE:
80 return 4;
81 case AMDGPU::SI_SPILL_V96_SAVE:
82 case AMDGPU::SI_SPILL_V96_RESTORE:
83 return 3;
84 case AMDGPU::SI_SPILL_S64_SAVE:
85 case AMDGPU::SI_SPILL_S64_RESTORE:
86 case AMDGPU::SI_SPILL_V64_SAVE:
87 case AMDGPU::SI_SPILL_V64_RESTORE:
88 return 2;
89 case AMDGPU::SI_SPILL_S32_SAVE:
90 case AMDGPU::SI_SPILL_S32_RESTORE:
91 case AMDGPU::SI_SPILL_V32_SAVE:
92 case AMDGPU::SI_SPILL_V32_RESTORE:
93 return 1;
94 default: llvm_unreachable("Invalid spill opcode");
95 }
96 }
98 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
99 unsigned LoadStoreOp,
100 unsigned Value,
101 unsigned ScratchRsrcReg,
102 unsigned ScratchOffset,
103 int64_t Offset,
104 RegScavenger *RS) const {
106 const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
107 MachineBasicBlock *MBB = MI->getParent();
108 const MachineFunction *MF = MI->getParent()->getParent();
109 LLVMContext &Ctx = MF->getFunction()->getContext();
110 DebugLoc DL = MI->getDebugLoc();
111 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
113 bool RanOutOfSGPRs = false;
114 unsigned SOffset = ScratchOffset;
116 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
117 unsigned Size = NumSubRegs * 4;
119 if (!isUInt<12>(Offset + Size)) {
120 dbgs() << "Offset scavenge\n";
121 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
122 if (SOffset == AMDGPU::NoRegister) {
123 RanOutOfSGPRs = true;
124 SOffset = AMDGPU::SGPR0;
125 }
126 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
127 .addReg(ScratchOffset)
128 .addImm(Offset);
129 Offset = 0;
130 }
132 if (RanOutOfSGPRs)
133 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
135 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
136 unsigned SubReg = NumSubRegs > 1 ?
137 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
138 Value;
139 bool IsKill = (i == e - 1);
141 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
142 .addReg(SubReg, getDefRegState(IsLoad))
143 .addReg(ScratchRsrcReg, getKillRegState(IsKill))
144 .addImm(Offset)
145 .addReg(SOffset, getKillRegState(IsKill))
146 .addImm(0) // glc
147 .addImm(0) // slc
148 .addImm(0) // tfe
149 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad));
150 }
151 }
153 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
154 int SPAdj, unsigned FIOperandNum,
155 RegScavenger *RS) const {
156 MachineFunction *MF = MI->getParent()->getParent();
157 MachineBasicBlock *MBB = MI->getParent();
158 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
159 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
160 const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
161 DebugLoc DL = MI->getDebugLoc();
163 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
164 int Index = MI->getOperand(FIOperandNum).getIndex();
166 switch (MI->getOpcode()) {
167 // SGPR register spill
168 case AMDGPU::SI_SPILL_S512_SAVE:
169 case AMDGPU::SI_SPILL_S256_SAVE:
170 case AMDGPU::SI_SPILL_S128_SAVE:
171 case AMDGPU::SI_SPILL_S64_SAVE:
172 case AMDGPU::SI_SPILL_S32_SAVE: {
173 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
175 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
176 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
177 &AMDGPU::SGPR_32RegClass, i);
178 struct SIMachineFunctionInfo::SpilledReg Spill =
179 MFI->getSpilledReg(MF, Index, i);
181 if (Spill.VGPR == AMDGPU::NoRegister) {
182 LLVMContext &Ctx = MF->getFunction()->getContext();
183 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
184 }
186 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
187 .addReg(SubReg)
188 .addImm(Spill.Lane);
190 }
191 MI->eraseFromParent();
192 break;
193 }
195 // SGPR register restore
196 case AMDGPU::SI_SPILL_S512_RESTORE:
197 case AMDGPU::SI_SPILL_S256_RESTORE:
198 case AMDGPU::SI_SPILL_S128_RESTORE:
199 case AMDGPU::SI_SPILL_S64_RESTORE:
200 case AMDGPU::SI_SPILL_S32_RESTORE: {
201 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
203 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
204 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
205 &AMDGPU::SGPR_32RegClass, i);
206 bool isM0 = SubReg == AMDGPU::M0;
207 struct SIMachineFunctionInfo::SpilledReg Spill =
208 MFI->getSpilledReg(MF, Index, i);
210 if (Spill.VGPR == AMDGPU::NoRegister) {
211 LLVMContext &Ctx = MF->getFunction()->getContext();
212 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
213 }
215 if (isM0) {
216 dbgs() << "Scavenge M0\n";
217 SubReg = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
218 }
220 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
221 .addReg(Spill.VGPR)
222 .addImm(Spill.Lane)
223 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
224 if (isM0) {
225 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
226 .addReg(SubReg);
227 }
228 }
229 TII->insertNOPs(MI, 3);
230 MI->eraseFromParent();
231 break;
232 }
234 // VGPR register spill
235 case AMDGPU::SI_SPILL_V512_SAVE:
236 case AMDGPU::SI_SPILL_V256_SAVE:
237 case AMDGPU::SI_SPILL_V128_SAVE:
238 case AMDGPU::SI_SPILL_V96_SAVE:
239 case AMDGPU::SI_SPILL_V64_SAVE:
240 case AMDGPU::SI_SPILL_V32_SAVE:
241 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
242 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
243 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
244 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
245 FrameInfo->getObjectOffset(Index), RS);
246 MI->eraseFromParent();
247 break;
248 case AMDGPU::SI_SPILL_V32_RESTORE:
249 case AMDGPU::SI_SPILL_V64_RESTORE:
250 case AMDGPU::SI_SPILL_V128_RESTORE:
251 case AMDGPU::SI_SPILL_V256_RESTORE:
252 case AMDGPU::SI_SPILL_V512_RESTORE: {
253 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
254 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
255 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
256 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
257 FrameInfo->getObjectOffset(Index), RS);
258 MI->eraseFromParent();
259 break;
260 }
262 default: {
263 int64_t Offset = FrameInfo->getObjectOffset(Index);
264 FIOp.ChangeToImmediate(Offset);
265 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
266 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
267 BuildMI(*MBB, MI, MI->getDebugLoc(),
268 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
269 .addImm(Offset);
270 FIOp.ChangeToRegister(TmpReg, false, false, true);
271 }
272 }
273 }
274 }
276 const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
277 MVT VT) const {
278 switch(VT.SimpleTy) {
279 default:
280 case MVT::i32: return &AMDGPU::VGPR_32RegClass;
281 }
282 }
284 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
285 return getEncodingValue(Reg) & 0xff;
286 }
288 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
289 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
291 static const TargetRegisterClass *BaseClasses[] = {
292 &AMDGPU::VGPR_32RegClass,
293 &AMDGPU::SReg_32RegClass,
294 &AMDGPU::VReg_64RegClass,
295 &AMDGPU::SReg_64RegClass,
296 &AMDGPU::VReg_96RegClass,
297 &AMDGPU::VReg_128RegClass,
298 &AMDGPU::SReg_128RegClass,
299 &AMDGPU::VReg_256RegClass,
300 &AMDGPU::SReg_256RegClass,
301 &AMDGPU::VReg_512RegClass
302 };
304 for (const TargetRegisterClass *BaseClass : BaseClasses) {
305 if (BaseClass->contains(Reg)) {
306 return BaseClass;
307 }
308 }
309 return nullptr;
310 }
312 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
313 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) ||
314 getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
315 getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
316 getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
317 getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
318 getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
319 }
321 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
322 const TargetRegisterClass *SRC) const {
323 if (hasVGPRs(SRC)) {
324 return SRC;
325 } else if (SRC == &AMDGPU::SCCRegRegClass) {
326 return &AMDGPU::VCCRegRegClass;
327 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
328 return &AMDGPU::VGPR_32RegClass;
329 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
330 return &AMDGPU::VReg_64RegClass;
331 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
332 return &AMDGPU::VReg_128RegClass;
333 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
334 return &AMDGPU::VReg_256RegClass;
335 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
336 return &AMDGPU::VReg_512RegClass;
337 }
338 return nullptr;
339 }
341 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
342 const TargetRegisterClass *RC, unsigned SubIdx) const {
343 if (SubIdx == AMDGPU::NoSubRegister)
344 return RC;
346 // If this register has a sub-register, we can safely assume it is a 32-bit
347 // register, because all of SI's sub-registers are 32-bit.
348 if (isSGPRClass(RC)) {
349 return &AMDGPU::SGPR_32RegClass;
350 } else {
351 return &AMDGPU::VGPR_32RegClass;
352 }
353 }
355 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
356 const TargetRegisterClass *SubRC,
357 unsigned Channel) const {
359 switch (Reg) {
360 case AMDGPU::VCC:
361 switch(Channel) {
362 case 0: return AMDGPU::VCC_LO;
363 case 1: return AMDGPU::VCC_HI;
364 default: llvm_unreachable("Invalid SubIdx for VCC");
365 }
367 case AMDGPU::FLAT_SCR:
368 switch (Channel) {
369 case 0:
370 return AMDGPU::FLAT_SCR_LO;
371 case 1:
372 return AMDGPU::FLAT_SCR_HI;
373 default:
374 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
375 }
376 break;
378 case AMDGPU::EXEC:
379 switch (Channel) {
380 case 0:
381 return AMDGPU::EXEC_LO;
382 case 1:
383 return AMDGPU::EXEC_HI;
384 default:
385 llvm_unreachable("Invalid SubIdx for EXEC");
386 }
387 break;
388 }
390 const TargetRegisterClass *RC = getPhysRegClass(Reg);
391 // 32-bit registers don't have sub-registers, so we can just return the
392 // Reg. We need to have this check here, because the calculation below
393 // using getHWRegIndex() will fail with special 32-bit registers like
394 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
395 if (RC->getSize() == 4) {
396 assert(Channel == 0);
397 return Reg;
398 }
400 unsigned Index = getHWRegIndex(Reg);
401 return SubRC->getRegister(Index + Channel);
402 }
404 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
405 return OpType == AMDGPU::OPERAND_REG_IMM32;
406 }
408 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
409 if (opCanUseLiteralConstant(OpType))
410 return true;
412 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
413 }
415 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
416 enum PreloadedValue Value) const {
418 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
419 switch (Value) {
420 case SIRegisterInfo::TGID_X:
421 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
422 case SIRegisterInfo::TGID_Y:
423 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
424 case SIRegisterInfo::TGID_Z:
425 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
426 case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
427 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
428 case SIRegisterInfo::SCRATCH_PTR:
429 return AMDGPU::SGPR2_SGPR3;
430 case SIRegisterInfo::INPUT_PTR:
431 return AMDGPU::SGPR0_SGPR1;
432 case SIRegisterInfo::TIDIG_X:
433 return AMDGPU::VGPR0;
434 case SIRegisterInfo::TIDIG_Y:
435 return AMDGPU::VGPR1;
436 case SIRegisterInfo::TIDIG_Z:
437 return AMDGPU::VGPR2;
438 }
439 llvm_unreachable("unexpected preloaded value type");
440 }
442 /// \brief Returns a register that is not used at any point in the function.
443 /// If all registers are used, then this function will return
444 // AMDGPU::NoRegister.
445 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
446 const TargetRegisterClass *RC) const {
448 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
449 I != E; ++I) {
450 if (!MRI.isPhysRegUsed(*I))
451 return *I;
452 }
453 return AMDGPU::NoRegister;
454 }