]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - opencl/llvm.git/commitdiff
R600/SI: Fix extra mov from legalizing 64-bit SALU ops.
authorMatt Arsenault <Matthew.Arsenault@amd.com>
Mon, 24 Mar 2014 20:08:13 +0000 (20:08 +0000)
committerMatt Arsenault <Matthew.Arsenault@amd.com>
Mon, 24 Mar 2014 20:08:13 +0000 (20:08 +0000)
Check the register class of each operand individually
to avoid an extra copy to a vgpr.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@204662 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/R600/SIInstrInfo.cpp
test/CodeGen/R600/or.ll

index eb5172c896e97d2369ccfd8cda8ca9e58d1dbfd0..336f6aa5667f876a2605649a17ff451db6b005dd 100644 (file)
@@ -1028,29 +1028,41 @@ void SIInstrInfo::splitScalar64BitOp(SmallVectorImpl<MachineInstr *> &Worklist,
   MachineBasicBlock::iterator MII = Inst;
 
   const MCInstrDesc &InstDesc = get(Opcode);
-  const TargetRegisterClass *RC = MRI.getRegClass(Src0.getReg());
-  const TargetRegisterClass *SubRC = RI.getSubRegClass(RC, AMDGPU::sub0);
-  MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, RC,
-                                                       AMDGPU::sub0, SubRC);
-  MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, RC,
-                                                       AMDGPU::sub0, SubRC);
-
-  unsigned DestSub0 = MRI.createVirtualRegister(SubRC);
+  const TargetRegisterClass *Src0RC = Src0.isReg() ?
+    MRI.getRegClass(Src0.getReg()) :
+    &AMDGPU::SGPR_32RegClass;
+
+  const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
+  const TargetRegisterClass *Src1RC = Src1.isReg() ?
+    MRI.getRegClass(Src1.getReg()) :
+    &AMDGPU::SGPR_32RegClass;
+
+  const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
+
+  MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+                                                       AMDGPU::sub0, Src0SubRC);
+  MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
+                                                       AMDGPU::sub0, Src1SubRC);
+
+  const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
+  const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
+
+  unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
   MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
     .addOperand(SrcReg0Sub0)
     .addOperand(SrcReg1Sub0);
 
-  MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, RC,
-                                                       AMDGPU::sub1, SubRC);
-  MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, RC,
-                                                       AMDGPU::sub1, SubRC);
+  MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+                                                       AMDGPU::sub1, Src0SubRC);
+  MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
+                                                       AMDGPU::sub1, Src1SubRC);
 
-  unsigned DestSub1 = MRI.createVirtualRegister(SubRC);
+  unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
   MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
     .addOperand(SrcReg0Sub1)
     .addOperand(SrcReg1Sub1);
 
-  unsigned FullDestReg = MRI.createVirtualRegister(RC);
+  unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
     .addReg(DestSub0)
     .addImm(AMDGPU::sub0)
index 8e985c75cbd615a0de21461684768f178e01a403..be984b2712246fd22ae08f1f4bca5897db982bd3 100644 (file)
@@ -89,11 +89,11 @@ define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
 }
 
 ; SI-LABEL: @vector_or_i64_loadimm
-; SI-DAG: S_MOV_B32
-; SI-DAG: S_MOV_B32
-; SI-DAG: BUFFER_LOAD_DWORDX2
-; SI: V_OR_B32_e32
-; SI: V_OR_B32_e32
+; SI-DAG: S_MOV_B32 [[LO_S_IMM:s[0-9]+]], -545810305
+; SI-DAG: S_MOV_B32 [[HI_S_IMM:s[0-9]+]], 5231
+; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: S_ENDPGM
 define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
   %loada = load i64 addrspace(1)* %a, align 8