index e443deb7ea95f3cd8a5be5084a64a11b918484e8..166df66a27a14d6c1d19c1f3dab53fed762fa829 100644 (file)
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/Debug.h"
using namespace llvm;
SIRegisterInfo::SIRegisterInfo(const AMDGPUSubtarget &st)
BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
Reserved.set(AMDGPU::EXEC);
+
+ // EXEC_LO and EXEC_HI could be allocated and used as regular register,
+ // but this seems likely to result in bugs, so I'm marking them as reserved.
+ Reserved.set(AMDGPU::EXEC_LO);
+ Reserved.set(AMDGPU::EXEC_HI);
+
Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
Reserved.set(AMDGPU::FLAT_SCR);
+ Reserved.set(AMDGPU::FLAT_SCR_LO);
+ Reserved.set(AMDGPU::FLAT_SCR_HI);
// Reserve some VGPRs to use as temp registers in case we have to spill VGPRs
Reserved.set(AMDGPU::VGPR255);
}
}
+void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
+ unsigned LoadStoreOp,
+ unsigned Value,
+ unsigned ScratchRsrcReg,
+ unsigned ScratchOffset,
+ int64_t Offset,
+ RegScavenger *RS) const {
+
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
+ MachineBasicBlock *MBB = MI->getParent();
+ const MachineFunction *MF = MI->getParent()->getParent();
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ DebugLoc DL = MI->getDebugLoc();
+ bool IsLoad = TII->get(LoadStoreOp).mayLoad();
+
+ bool RanOutOfSGPRs = false;
+ unsigned SOffset = ScratchOffset;
+
+ unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+ unsigned Size = NumSubRegs * 4;
+
+ if (!isUInt<12>(Offset + Size)) {
+ dbgs() << "Offset scavenge\n";
+ SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
+ if (SOffset == AMDGPU::NoRegister) {
+ RanOutOfSGPRs = true;
+ SOffset = AMDGPU::SGPR0;
+ }
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
+ .addReg(ScratchOffset)
+ .addImm(Offset);
+ Offset = 0;
+ }
+
+ if (RanOutOfSGPRs)
+ Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
+
+ for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
+ unsigned SubReg = NumSubRegs > 1 ?
+ getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
+ Value;
+ bool IsKill = (i == e - 1);
+
+ BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
+ .addReg(SubReg, getDefRegState(IsLoad))
+ .addReg(ScratchRsrcReg, getKillRegState(IsKill))
+ .addImm(Offset)
+ .addReg(SOffset)
+ .addImm(0) // glc
+ .addImm(0) // slc
+ .addImm(0) // tfe
+ .addReg(Value, RegState::Implicit | getDefRegState(IsLoad));
+ }
+}
+
void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
&AMDGPU::SGPR_32RegClass, i);
+ bool isM0 = SubReg == AMDGPU::M0;
struct SIMachineFunctionInfo::SpilledReg Spill =
MFI->getSpilledReg(MF, Index, i);
Ctx.emitError("Ran out of VGPRs for spilling SGPR");
}
+ if (isM0) {
+ dbgs() << "Scavenge M0\n";
+ SubReg = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
+ }
+
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
.addReg(Spill.VGPR)
- .addImm(Spill.Lane);
-
+ .addImm(Spill.Lane)
+ .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
+ if (isM0) {
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+ .addReg(SubReg);
+ }
}
TII->insertNOPs(MI, 3);
MI->eraseFromParent();
case AMDGPU::SI_SPILL_V128_SAVE:
case AMDGPU::SI_SPILL_V96_SAVE:
case AMDGPU::SI_SPILL_V64_SAVE:
- case AMDGPU::SI_SPILL_V32_SAVE: {
- unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
- unsigned SrcReg = MI->getOperand(0).getReg();
- int64_t Offset = FrameInfo->getObjectOffset(Index);
- unsigned Size = NumSubRegs * 4;
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
-
- for (unsigned i = 0, e = NumSubRegs; i != e; ++i) {
- unsigned SubReg = NumSubRegs > 1 ?
- getPhysRegSubReg(SrcReg, &AMDGPU::VGPR_32RegClass, i) :
- SrcReg;
- Offset += (i * 4);
- MFI->LDSWaveSpillSize = std::max((unsigned)Offset + 4, (unsigned)MFI->LDSWaveSpillSize);
-
- unsigned AddrReg = TII->calculateLDSSpillAddress(*MBB, MI, RS, TmpReg,
- Offset, Size);
-
- if (AddrReg == AMDGPU::NoRegister) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
- Ctx.emitError("Ran out of VGPRs for spilling VGPRS");
- AddrReg = AMDGPU::VGPR0;
- }
-
- // Store the value in LDS
- BuildMI(*MBB, MI, DL, TII->get(AMDGPU::DS_WRITE_B32))
- .addImm(0) // gds
- .addReg(AddrReg, RegState::Kill) // addr
- .addReg(SubReg) // data0
- .addImm(0); // offset
- }
-
+ case AMDGPU::SI_SPILL_V32_SAVE:
+ buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
+ TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
+ FrameInfo->getObjectOffset(Index), RS);
MI->eraseFromParent();
break;
- }
case AMDGPU::SI_SPILL_V32_RESTORE:
case AMDGPU::SI_SPILL_V64_RESTORE:
case AMDGPU::SI_SPILL_V128_RESTORE:
case AMDGPU::SI_SPILL_V256_RESTORE:
case AMDGPU::SI_SPILL_V512_RESTORE: {
- unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
- unsigned DstReg = MI->getOperand(0).getReg();
- int64_t Offset = FrameInfo->getObjectOffset(Index);
- unsigned Size = NumSubRegs * 4;
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
-
- // FIXME: We could use DS_READ_B64 here to optimize for larger registers.
- for (unsigned i = 0, e = NumSubRegs; i != e; ++i) {
- unsigned SubReg = NumSubRegs > 1 ?
- getPhysRegSubReg(DstReg, &AMDGPU::VGPR_32RegClass, i) :
- DstReg;
-
- Offset += (i * 4);
- unsigned AddrReg = TII->calculateLDSSpillAddress(*MBB, MI, RS, TmpReg,
- Offset, Size);
- if (AddrReg == AMDGPU::NoRegister) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
- Ctx.emitError("Ran out of VGPRs for spilling VGPRs");
- AddrReg = AMDGPU::VGPR0;
- }
-
- BuildMI(*MBB, MI, DL, TII->get(AMDGPU::DS_READ_B32), SubReg)
- .addImm(0) // gds
- .addReg(AddrReg, RegState::Kill) // addr
- .addImm(0); //offset
- }
+ buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
+ TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
+ FrameInfo->getObjectOffset(Index), RS);
MI->eraseFromParent();
break;
}
int64_t Offset = FrameInfo->getObjectOffset(Index);
FIOp.ChangeToImmediate(Offset);
if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VReg_32RegClass, MI, SPAdj);
+ unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
BuildMI(*MBB, MI, MI->getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
.addImm(Offset);
- FIOp.ChangeToRegister(TmpReg, false);
+ FIOp.ChangeToRegister(TmpReg, false, false, true);
}
}
}
MVT VT) const {
switch(VT.SimpleTy) {
default:
- case MVT::i32: return &AMDGPU::VReg_32RegClass;
+ case MVT::i32: return &AMDGPU::VGPR_32RegClass;
}
}
const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
assert(!TargetRegisterInfo::isVirtualRegister(Reg));
- const TargetRegisterClass *BaseClasses[] = {
- &AMDGPU::VReg_32RegClass,
+ static const TargetRegisterClass *BaseClasses[] = {
+ &AMDGPU::VGPR_32RegClass,
&AMDGPU::SReg_32RegClass,
&AMDGPU::VReg_64RegClass,
&AMDGPU::SReg_64RegClass,
@@ -282,15 +309,8 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
return nullptr;
}
-bool SIRegisterInfo::isSGPRClass(const TargetRegisterClass *RC) const {
- if (!RC) {
- return false;
- }
- return !hasVGPRs(RC);
-}
-
bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
- return getCommonSubClass(&AMDGPU::VReg_32RegClass, RC) ||
+ return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) ||
getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
} else if (SRC == &AMDGPU::SCCRegRegClass) {
return &AMDGPU::VCCRegRegClass;
} else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
- return &AMDGPU::VReg_32RegClass;
+ return &AMDGPU::VGPR_32RegClass;
} else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
return &AMDGPU::VReg_64RegClass;
} else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
return SubRC->getRegister(Index + Channel);
}
-bool SIRegisterInfo::regClassCanUseLiteralConstant(int RCID) const {
- switch (RCID) {
- default: return false;
- case AMDGPU::SSrc_32RegClassID:
- case AMDGPU::SSrc_64RegClassID:
- case AMDGPU::VSrc_32RegClassID:
- case AMDGPU::VSrc_64RegClassID:
- return true;
- }
-}
-
-bool SIRegisterInfo::regClassCanUseLiteralConstant(
- const TargetRegisterClass *RC) const {
- return regClassCanUseLiteralConstant(RC->getID());
+bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
+ return OpType == AMDGPU::OPERAND_REG_IMM32;
}
-bool SIRegisterInfo::regClassCanUseInlineConstant(int RCID) const {
- if (regClassCanUseLiteralConstant(RCID))
+bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
+ if (opCanUseLiteralConstant(OpType))
return true;
- switch (RCID) {
- default: return false;
- case AMDGPU::VCSrc_32RegClassID:
- case AMDGPU::VCSrc_64RegClassID:
- return true;
- }
-}
-
-bool SIRegisterInfo::regClassCanUseInlineConstant(
- const TargetRegisterClass *RC) const {
- return regClassCanUseInlineConstant(RC->getID());
+ return OpType == AMDGPU::OPERAND_REG_INLINE_C;
}
-
unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
enum PreloadedValue Value) const {
/// \brief Returns a register that is not used at any point in the function.
/// If all registers are used, then this function will return
// AMDGPU::NoRegister.
-unsigned SIRegisterInfo::findUnusedVGPR(const MachineRegisterInfo &MRI) const {
-
- const TargetRegisterClass *RC = &AMDGPU::VGPR_32RegClass;
+unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
+ const TargetRegisterClass *RC) const {
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I) {