1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the XCoreTargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
17 #include "XCore.h"
18 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreSubtarget.h"
20 #include "XCoreTargetMachine.h"
21 #include "XCoreTargetObjectFile.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalAlias.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <algorithm>
41 using namespace llvm;
43 const char *XCoreTargetLowering::
44 getTargetNodeName(unsigned Opcode) const
45 {
46 switch (Opcode)
47 {
48 case XCoreISD::BL : return "XCoreISD::BL";
49 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
50 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
51 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
52 case XCoreISD::STWSP : return "XCoreISD::STWSP";
53 case XCoreISD::RETSP : return "XCoreISD::RETSP";
54 case XCoreISD::LADD : return "XCoreISD::LADD";
55 case XCoreISD::LSUB : return "XCoreISD::LSUB";
56 case XCoreISD::LMUL : return "XCoreISD::LMUL";
57 case XCoreISD::MACCU : return "XCoreISD::MACCU";
58 case XCoreISD::MACCS : return "XCoreISD::MACCS";
59 case XCoreISD::CRC8 : return "XCoreISD::CRC8";
60 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
61 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
62 default : return NULL;
63 }
64 }
66 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
67 : TargetLowering(XTM, new XCoreTargetObjectFile()),
68 TM(XTM),
69 Subtarget(*XTM.getSubtargetImpl()) {
71 // Set up the register classes.
72 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
74 // Compute derived properties from the register classes
75 computeRegisterProperties();
77 // Division is expensive
78 setIntDivIsCheap(false);
80 setStackPointerRegisterToSaveRestore(XCore::SP);
82 setSchedulingPreference(Sched::RegPressure);
84 // Use i32 for setcc operations results (slt, sgt, ...).
85 setBooleanContents(ZeroOrOneBooleanContent);
86 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
88 // XCore does not have the NodeTypes below.
89 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
90 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
91 setOperationAction(ISD::ADDC, MVT::i32, Expand);
92 setOperationAction(ISD::ADDE, MVT::i32, Expand);
93 setOperationAction(ISD::SUBC, MVT::i32, Expand);
94 setOperationAction(ISD::SUBE, MVT::i32, Expand);
96 // Stop the combiner recombining select and set_cc
97 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
99 // 64bit
100 setOperationAction(ISD::ADD, MVT::i64, Custom);
101 setOperationAction(ISD::SUB, MVT::i64, Custom);
102 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
103 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
104 setOperationAction(ISD::MULHS, MVT::i32, Expand);
105 setOperationAction(ISD::MULHU, MVT::i32, Expand);
106 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
107 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
108 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
110 // Bit Manipulation
111 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
112 setOperationAction(ISD::ROTL , MVT::i32, Expand);
113 setOperationAction(ISD::ROTR , MVT::i32, Expand);
114 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
115 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
117 setOperationAction(ISD::TRAP, MVT::Other, Legal);
119 // Jump tables.
120 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
122 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
123 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
125 // Conversion of i64 -> double produces constantpool nodes
126 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
128 // Loads
129 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
130 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
131 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
133 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
134 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
136 // Custom expand misaligned loads / stores.
137 setOperationAction(ISD::LOAD, MVT::i32, Custom);
138 setOperationAction(ISD::STORE, MVT::i32, Custom);
140 // Varargs
141 setOperationAction(ISD::VAEND, MVT::Other, Expand);
142 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
143 setOperationAction(ISD::VAARG, MVT::Other, Custom);
144 setOperationAction(ISD::VASTART, MVT::Other, Custom);
146 // Dynamic stack
147 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
148 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
149 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
151 // TRAMPOLINE is custom lowered.
152 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
153 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
155 // We want to custom lower some of our intrinsics.
156 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
158 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4;
159 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize
160 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2;
162 // We have target-specific dag combine patterns for the following nodes:
163 setTargetDAGCombine(ISD::STORE);
164 setTargetDAGCombine(ISD::ADD);
166 setMinFunctionAlignment(1);
167 }
169 SDValue XCoreTargetLowering::
170 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
171 switch (Op.getOpcode())
172 {
173 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
174 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
175 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
176 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
177 case ISD::LOAD: return LowerLOAD(Op, DAG);
178 case ISD::STORE: return LowerSTORE(Op, DAG);
179 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
180 case ISD::VAARG: return LowerVAARG(Op, DAG);
181 case ISD::VASTART: return LowerVASTART(Op, DAG);
182 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
183 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
184 // FIXME: Remove these when LegalizeDAGTypes lands.
185 case ISD::ADD:
186 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
187 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
188 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
189 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
190 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
191 default:
192 llvm_unreachable("unimplemented operand");
193 }
194 }
196 /// ReplaceNodeResults - Replace the results of node with an illegal result
197 /// type with new values built out of custom code.
198 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
199 SmallVectorImpl<SDValue>&Results,
200 SelectionDAG &DAG) const {
201 switch (N->getOpcode()) {
202 default:
203 llvm_unreachable("Don't know how to custom expand this!");
204 case ISD::ADD:
205 case ISD::SUB:
206 Results.push_back(ExpandADDSUB(N, DAG));
207 return;
208 }
209 }
211 //===----------------------------------------------------------------------===//
212 // Misc Lower Operation implementation
213 //===----------------------------------------------------------------------===//
215 SDValue XCoreTargetLowering::
216 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
217 {
218 SDLoc dl(Op);
219 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
220 Op.getOperand(3), Op.getOperand(4));
221 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
222 Op.getOperand(1));
223 }
225 SDValue XCoreTargetLowering::
226 getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
227 SelectionDAG &DAG) const
228 {
229 // FIXME there is no actual debug info here
230 SDLoc dl(GA);
231 const GlobalValue *UnderlyingGV = GV;
232 // If GV is an alias then use the aliasee to determine the wrapper type
233 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
234 UnderlyingGV = GA->resolveAliasedGlobal();
235 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(UnderlyingGV)) {
236 if (GVar->isConstant())
237 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
238 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
239 }
240 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
241 }
243 SDValue XCoreTargetLowering::
244 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
245 {
246 SDLoc DL(Op);
247 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
248 const GlobalValue *GV = GN->getGlobal();
249 int64_t Offset = GN->getOffset();
250 // We can only fold positive offsets that are a multiple of the word size.
251 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
252 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
253 GA = getGlobalAddressWrapper(GA, GV, DAG);
254 // Handle the rest of the offset.
255 if (Offset != FoldedOffset) {
256 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32);
257 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
258 }
259 return GA;
260 }
262 static inline SDValue BuildGetId(SelectionDAG &DAG, SDLoc dl) {
263 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
264 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
265 }
267 SDValue XCoreTargetLowering::
268 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
269 {
270 SDLoc DL(Op);
272 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
273 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy());
275 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);
276 }
278 SDValue XCoreTargetLowering::
279 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
280 {
281 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
282 // FIXME there isn't really debug info here
283 SDLoc dl(CP);
284 EVT PtrVT = Op.getValueType();
285 SDValue Res;
286 if (CP->isMachineConstantPoolEntry()) {
287 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
288 CP->getAlignment());
289 } else {
290 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
291 CP->getAlignment());
292 }
293 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
294 }
296 unsigned XCoreTargetLowering::getJumpTableEncoding() const {
297 return MachineJumpTableInfo::EK_Inline;
298 }
300 SDValue XCoreTargetLowering::
301 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
302 {
303 SDValue Chain = Op.getOperand(0);
304 SDValue Table = Op.getOperand(1);
305 SDValue Index = Op.getOperand(2);
306 SDLoc dl(Op);
307 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
308 unsigned JTI = JT->getIndex();
309 MachineFunction &MF = DAG.getMachineFunction();
310 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
311 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
313 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
314 if (NumEntries <= 32) {
315 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
316 }
317 assert((NumEntries >> 31) == 0);
318 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
319 DAG.getConstant(1, MVT::i32));
320 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
321 ScaledIndex);
322 }
324 SDValue XCoreTargetLowering::
325 lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
326 int64_t Offset, SelectionDAG &DAG) const
327 {
328 if ((Offset & 0x3) == 0) {
329 return DAG.getLoad(getPointerTy(), DL, Chain, Base, MachinePointerInfo(),
330 false, false, false, 0);
331 }
332 // Lower to pair of consecutive word aligned loads plus some bit shifting.
333 int32_t HighOffset = RoundUpToAlignment(Offset, 4);
334 int32_t LowOffset = HighOffset - 4;
335 SDValue LowAddr, HighAddr;
336 if (GlobalAddressSDNode *GASD =
337 dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
338 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
339 LowOffset);
340 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
341 HighOffset);
342 } else {
343 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
344 DAG.getConstant(LowOffset, MVT::i32));
345 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
346 DAG.getConstant(HighOffset, MVT::i32));
347 }
348 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, MVT::i32);
349 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, MVT::i32);
351 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
352 LowAddr, MachinePointerInfo(),
353 false, false, false, 0);
354 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
355 HighAddr, MachinePointerInfo(),
356 false, false, false, 0);
357 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
358 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
359 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
360 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
361 High.getValue(1));
362 SDValue Ops[] = { Result, Chain };
363 return DAG.getMergeValues(Ops, 2, DL);
364 }
366 static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
367 {
368 APInt KnownZero, KnownOne;
369 DAG.ComputeMaskedBits(Value, KnownZero, KnownOne);
370 return KnownZero.countTrailingOnes() >= 2;
371 }
373 SDValue XCoreTargetLowering::
374 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
375 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
376 LoadSDNode *LD = cast<LoadSDNode>(Op);
377 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
378 "Unexpected extension type");
379 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
380 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
381 return SDValue();
383 unsigned ABIAlignment = getDataLayout()->
384 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
385 // Leave aligned load alone.
386 if (LD->getAlignment() >= ABIAlignment)
387 return SDValue();
389 SDValue Chain = LD->getChain();
390 SDValue BasePtr = LD->getBasePtr();
391 SDLoc DL(Op);
393 if (!LD->isVolatile()) {
394 const GlobalValue *GV;
395 int64_t Offset = 0;
396 if (DAG.isBaseWithConstantOffset(BasePtr) &&
397 isWordAligned(BasePtr->getOperand(0), DAG)) {
398 SDValue NewBasePtr = BasePtr->getOperand(0);
399 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
400 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
401 Offset, DAG);
402 }
403 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
404 MinAlign(GV->getAlignment(), 4) == 4) {
405 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
406 BasePtr->getValueType(0));
407 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
408 Offset, DAG);
409 }
410 }
412 if (LD->getAlignment() == 2) {
413 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
414 BasePtr, LD->getPointerInfo(), MVT::i16,
415 LD->isVolatile(), LD->isNonTemporal(), 2);
416 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
417 DAG.getConstant(2, MVT::i32));
418 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
419 HighAddr,
420 LD->getPointerInfo().getWithOffset(2),
421 MVT::i16, LD->isVolatile(),
422 LD->isNonTemporal(), 2);
423 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
424 DAG.getConstant(16, MVT::i32));
425 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
426 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
427 High.getValue(1));
428 SDValue Ops[] = { Result, Chain };
429 return DAG.getMergeValues(Ops, 2, DL);
430 }
432 // Lower to a call to __misaligned_load(BasePtr).
433 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
434 TargetLowering::ArgListTy Args;
435 TargetLowering::ArgListEntry Entry;
437 Entry.Ty = IntPtrTy;
438 Entry.Node = BasePtr;
439 Args.push_back(Entry);
441 TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false,
442 false, false, 0, CallingConv::C, /*isTailCall=*/false,
443 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
444 DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
445 Args, DAG, DL);
446 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
448 SDValue Ops[] =
449 { CallResult.first, CallResult.second };
451 return DAG.getMergeValues(Ops, 2, DL);
452 }
454 SDValue XCoreTargetLowering::
455 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
456 {
457 StoreSDNode *ST = cast<StoreSDNode>(Op);
458 assert(!ST->isTruncatingStore() && "Unexpected store type");
459 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
460 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
461 return SDValue();
462 }
463 unsigned ABIAlignment = getDataLayout()->
464 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
465 // Leave aligned store alone.
466 if (ST->getAlignment() >= ABIAlignment) {
467 return SDValue();
468 }
469 SDValue Chain = ST->getChain();
470 SDValue BasePtr = ST->getBasePtr();
471 SDValue Value = ST->getValue();
472 SDLoc dl(Op);
474 if (ST->getAlignment() == 2) {
475 SDValue Low = Value;
476 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
477 DAG.getConstant(16, MVT::i32));
478 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
479 ST->getPointerInfo(), MVT::i16,
480 ST->isVolatile(), ST->isNonTemporal(),
481 2);
482 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
483 DAG.getConstant(2, MVT::i32));
484 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
485 ST->getPointerInfo().getWithOffset(2),
486 MVT::i16, ST->isVolatile(),
487 ST->isNonTemporal(), 2);
488 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
489 }
491 // Lower to a call to __misaligned_store(BasePtr, Value).
492 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
493 TargetLowering::ArgListTy Args;
494 TargetLowering::ArgListEntry Entry;
496 Entry.Ty = IntPtrTy;
497 Entry.Node = BasePtr;
498 Args.push_back(Entry);
500 Entry.Node = Value;
501 Args.push_back(Entry);
503 TargetLowering::CallLoweringInfo CLI(Chain,
504 Type::getVoidTy(*DAG.getContext()), false, false,
505 false, false, 0, CallingConv::C, /*isTailCall=*/false,
506 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
507 DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
508 Args, DAG, dl);
509 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
511 return CallResult.second;
512 }
514 SDValue XCoreTargetLowering::
515 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
516 {
517 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
518 "Unexpected operand to lower!");
519 SDLoc dl(Op);
520 SDValue LHS = Op.getOperand(0);
521 SDValue RHS = Op.getOperand(1);
522 SDValue Zero = DAG.getConstant(0, MVT::i32);
523 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
524 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
525 LHS, RHS);
526 SDValue Lo(Hi.getNode(), 1);
527 SDValue Ops[] = { Lo, Hi };
528 return DAG.getMergeValues(Ops, 2, dl);
529 }
531 SDValue XCoreTargetLowering::
532 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
533 {
534 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
535 "Unexpected operand to lower!");
536 SDLoc dl(Op);
537 SDValue LHS = Op.getOperand(0);
538 SDValue RHS = Op.getOperand(1);
539 SDValue Zero = DAG.getConstant(0, MVT::i32);
540 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
541 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
542 Zero, Zero);
543 SDValue Lo(Hi.getNode(), 1);
544 SDValue Ops[] = { Lo, Hi };
545 return DAG.getMergeValues(Ops, 2, dl);
546 }
548 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
549 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
550 /// each intermediate result in the calculation must also have a single use.
551 /// If the Op is in the correct form the constituent parts are written to Mul0,
552 /// Mul1, Addend0 and Addend1.
553 static bool
554 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
555 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
556 {
557 if (Op.getOpcode() != ISD::ADD)
558 return false;
559 SDValue N0 = Op.getOperand(0);
560 SDValue N1 = Op.getOperand(1);
561 SDValue AddOp;
562 SDValue OtherOp;
563 if (N0.getOpcode() == ISD::ADD) {
564 AddOp = N0;
565 OtherOp = N1;
566 } else if (N1.getOpcode() == ISD::ADD) {
567 AddOp = N1;
568 OtherOp = N0;
569 } else {
570 return false;
571 }
572 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
573 return false;
574 if (OtherOp.getOpcode() == ISD::MUL) {
575 // add(add(a,b),mul(x,y))
576 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
577 return false;
578 Mul0 = OtherOp.getOperand(0);
579 Mul1 = OtherOp.getOperand(1);
580 Addend0 = AddOp.getOperand(0);
581 Addend1 = AddOp.getOperand(1);
582 return true;
583 }
584 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
585 // add(add(mul(x,y),a),b)
586 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
587 return false;
588 Mul0 = AddOp.getOperand(0).getOperand(0);
589 Mul1 = AddOp.getOperand(0).getOperand(1);
590 Addend0 = AddOp.getOperand(1);
591 Addend1 = OtherOp;
592 return true;
593 }
594 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
595 // add(add(a,mul(x,y)),b)
596 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
597 return false;
598 Mul0 = AddOp.getOperand(1).getOperand(0);
599 Mul1 = AddOp.getOperand(1).getOperand(1);
600 Addend0 = AddOp.getOperand(0);
601 Addend1 = OtherOp;
602 return true;
603 }
604 return false;
605 }
607 SDValue XCoreTargetLowering::
608 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
609 {
610 SDValue Mul;
611 SDValue Other;
612 if (N->getOperand(0).getOpcode() == ISD::MUL) {
613 Mul = N->getOperand(0);
614 Other = N->getOperand(1);
615 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
616 Mul = N->getOperand(1);
617 Other = N->getOperand(0);
618 } else {
619 return SDValue();
620 }
621 SDLoc dl(N);
622 SDValue LL, RL, AddendL, AddendH;
623 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
624 Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
625 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
626 Mul.getOperand(1), DAG.getConstant(0, MVT::i32));
627 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
628 Other, DAG.getConstant(0, MVT::i32));
629 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
630 Other, DAG.getConstant(1, MVT::i32));
631 APInt HighMask = APInt::getHighBitsSet(64, 32);
632 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
633 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
634 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
635 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
636 // The inputs are both zero-extended.
637 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
638 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
639 AddendL, LL, RL);
640 SDValue Lo(Hi.getNode(), 1);
641 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
642 }
643 if (LHSSB > 32 && RHSSB > 32) {
644 // The inputs are both sign-extended.
645 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
646 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
647 AddendL, LL, RL);
648 SDValue Lo(Hi.getNode(), 1);
649 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
650 }
651 SDValue LH, RH;
652 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
653 Mul.getOperand(0), DAG.getConstant(1, MVT::i32));
654 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
655 Mul.getOperand(1), DAG.getConstant(1, MVT::i32));
656 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
657 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
658 AddendL, LL, RL);
659 SDValue Lo(Hi.getNode(), 1);
660 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
661 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
662 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
663 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
664 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
665 }
667 SDValue XCoreTargetLowering::
668 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
669 {
670 assert(N->getValueType(0) == MVT::i64 &&
671 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
672 "Unknown operand to lower!");
674 if (N->getOpcode() == ISD::ADD) {
675 SDValue Result = TryExpandADDWithMul(N, DAG);
676 if (Result.getNode() != 0)
677 return Result;
678 }
680 SDLoc dl(N);
682 // Extract components
683 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
684 N->getOperand(0), DAG.getConstant(0, MVT::i32));
685 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
686 N->getOperand(0), DAG.getConstant(1, MVT::i32));
687 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
688 N->getOperand(1), DAG.getConstant(0, MVT::i32));
689 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
690 N->getOperand(1), DAG.getConstant(1, MVT::i32));
692 // Expand
693 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
694 XCoreISD::LSUB;
695 SDValue Zero = DAG.getConstant(0, MVT::i32);
696 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
697 LHSL, RHSL, Zero);
698 SDValue Carry(Lo.getNode(), 1);
700 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
701 LHSH, RHSH, Carry);
702 SDValue Ignored(Hi.getNode(), 1);
703 // Merge the pieces
704 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
705 }
707 SDValue XCoreTargetLowering::
708 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
709 {
710 llvm_unreachable("unimplemented");
711 // FIXME Arguments passed by reference need a extra dereference.
712 SDNode *Node = Op.getNode();
713 SDLoc dl(Node);
714 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
715 EVT VT = Node->getValueType(0);
716 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
717 Node->getOperand(1), MachinePointerInfo(V),
718 false, false, false, 0);
719 // Increment the pointer, VAList, to the next vararg
720 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
721 DAG.getConstant(VT.getSizeInBits(),
722 getPointerTy()));
723 // Store the incremented VAList to the legalized pointer
724 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
725 MachinePointerInfo(V), false, false, 0);
726 // Load the actual argument out of the pointer VAList
727 return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
728 false, false, false, 0);
729 }
731 SDValue XCoreTargetLowering::
732 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
733 {
734 SDLoc dl(Op);
735 // vastart stores the address of the VarArgsFrameIndex slot into the
736 // memory location argument
737 MachineFunction &MF = DAG.getMachineFunction();
738 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
739 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
740 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
741 MachinePointerInfo(), false, false, 0);
742 }
744 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
745 SelectionDAG &DAG) const {
746 SDLoc dl(Op);
747 // Depths > 0 not supported yet!
748 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
749 return SDValue();
751 MachineFunction &MF = DAG.getMachineFunction();
752 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
753 return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
754 RegInfo->getFrameRegister(MF), MVT::i32);
755 }
757 SDValue XCoreTargetLowering::
758 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
759 return Op.getOperand(0);
760 }
762 SDValue XCoreTargetLowering::
763 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
764 SDValue Chain = Op.getOperand(0);
765 SDValue Trmp = Op.getOperand(1); // trampoline
766 SDValue FPtr = Op.getOperand(2); // nested function
767 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
769 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
771 // .align 4
772 // LDAPF_u10 r11, nest
773 // LDW_2rus r11, r11[0]
774 // STWSP_ru6 r11, sp[0]
775 // LDAPF_u10 r11, fptr
776 // LDW_2rus r11, r11[0]
777 // BAU_1r r11
778 // nest:
779 // .word nest
780 // fptr:
781 // .word fptr
782 SDValue OutChains[5];
784 SDValue Addr = Trmp;
786 SDLoc dl(Op);
787 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
788 Addr, MachinePointerInfo(TrmpAddr), false, false,
789 0);
791 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
792 DAG.getConstant(4, MVT::i32));
793 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32),
794 Addr, MachinePointerInfo(TrmpAddr, 4), false,
795 false, 0);
797 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
798 DAG.getConstant(8, MVT::i32));
799 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32),
800 Addr, MachinePointerInfo(TrmpAddr, 8), false,
801 false, 0);
803 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
804 DAG.getConstant(12, MVT::i32));
805 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
806 MachinePointerInfo(TrmpAddr, 12), false, false,
807 0);
809 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
810 DAG.getConstant(16, MVT::i32));
811 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
812 MachinePointerInfo(TrmpAddr, 16), false, false,
813 0);
815 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5);
816 }
818 SDValue XCoreTargetLowering::
819 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
820 SDLoc DL(Op);
821 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
822 switch (IntNo) {
823 case Intrinsic::xcore_crc8:
824 EVT VT = Op.getValueType();
825 SDValue Data =
826 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
827 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
828 SDValue Crc(Data.getNode(), 1);
829 SDValue Results[] = { Crc, Data };
830 return DAG.getMergeValues(Results, 2, DL);
831 }
832 return SDValue();
833 }
835 //===----------------------------------------------------------------------===//
836 // Calling Convention Implementation
837 //===----------------------------------------------------------------------===//
839 #include "XCoreGenCallingConv.inc"
841 //===----------------------------------------------------------------------===//
842 // Call Calling Convention Implementation
843 //===----------------------------------------------------------------------===//
845 /// XCore call implementation
846 SDValue
847 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
848 SmallVectorImpl<SDValue> &InVals) const {
849 SelectionDAG &DAG = CLI.DAG;
850 SDLoc &dl = CLI.DL;
851 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
852 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
853 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
854 SDValue Chain = CLI.Chain;
855 SDValue Callee = CLI.Callee;
856 bool &isTailCall = CLI.IsTailCall;
857 CallingConv::ID CallConv = CLI.CallConv;
858 bool isVarArg = CLI.IsVarArg;
860 // XCore target does not yet support tail call optimization.
861 isTailCall = false;
863 // For now, only CallingConv::C implemented
864 switch (CallConv)
865 {
866 default:
867 llvm_unreachable("Unsupported calling convention");
868 case CallingConv::Fast:
869 case CallingConv::C:
870 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
871 Outs, OutVals, Ins, dl, DAG, InVals);
872 }
873 }
875 /// LowerCCCCallTo - functions arguments are copied from virtual
876 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
877 /// CALLSEQ_END are emitted.
878 /// TODO: isTailCall, sret.
879 SDValue
880 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
881 CallingConv::ID CallConv, bool isVarArg,
882 bool isTailCall,
883 const SmallVectorImpl<ISD::OutputArg> &Outs,
884 const SmallVectorImpl<SDValue> &OutVals,
885 const SmallVectorImpl<ISD::InputArg> &Ins,
886 SDLoc dl, SelectionDAG &DAG,
887 SmallVectorImpl<SDValue> &InVals) const {
889 // Analyze operands of the call, assigning locations to each operand.
890 SmallVector<CCValAssign, 16> ArgLocs;
891 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
892 getTargetMachine(), ArgLocs, *DAG.getContext());
894 // The ABI dictates there should be one stack slot available to the callee
895 // on function entry (for saving lr).
896 CCInfo.AllocateStack(4, 4);
898 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
900 // Get a count of how many bytes are to be pushed on the stack.
901 unsigned NumBytes = CCInfo.getNextStackOffset();
903 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
904 getPointerTy(), true), dl);
906 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
907 SmallVector<SDValue, 12> MemOpChains;
909 // Walk the register/memloc assignments, inserting copies/loads.
910 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
911 CCValAssign &VA = ArgLocs[i];
912 SDValue Arg = OutVals[i];
914 // Promote the value if needed.
915 switch (VA.getLocInfo()) {
916 default: llvm_unreachable("Unknown loc info!");
917 case CCValAssign::Full: break;
918 case CCValAssign::SExt:
919 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
920 break;
921 case CCValAssign::ZExt:
922 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
923 break;
924 case CCValAssign::AExt:
925 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
926 break;
927 }
929 // Arguments that can be passed on register must be kept at
930 // RegsToPass vector
931 if (VA.isRegLoc()) {
932 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
933 } else {
934 assert(VA.isMemLoc());
936 int Offset = VA.getLocMemOffset();
938 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
939 Chain, Arg,
940 DAG.getConstant(Offset/4, MVT::i32)));
941 }
942 }
944 // Transform all store nodes into one single node because
945 // all store nodes are independent of each other.
946 if (!MemOpChains.empty())
947 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
948 &MemOpChains[0], MemOpChains.size());
950 // Build a sequence of copy-to-reg nodes chained together with token
951 // chain and flag operands which copy the outgoing args into registers.
952 // The InFlag in necessary since all emitted instructions must be
953 // stuck together.
954 SDValue InFlag;
955 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
956 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
957 RegsToPass[i].second, InFlag);
958 InFlag = Chain.getValue(1);
959 }
961 // If the callee is a GlobalAddress node (quite common, every direct call is)
962 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
963 // Likewise ExternalSymbol -> TargetExternalSymbol.
964 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
965 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
966 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
967 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
969 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
970 // = Chain, Callee, Reg#1, Reg#2, ...
971 //
972 // Returns a chain & a flag for retval copy to use.
973 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
974 SmallVector<SDValue, 8> Ops;
975 Ops.push_back(Chain);
976 Ops.push_back(Callee);
978 // Add argument registers to the end of the list so that they are
979 // known live into the call.
980 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
981 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
982 RegsToPass[i].second.getValueType()));
984 if (InFlag.getNode())
985 Ops.push_back(InFlag);
987 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size());
988 InFlag = Chain.getValue(1);
990 // Create the CALLSEQ_END node.
991 Chain = DAG.getCALLSEQ_END(Chain,
992 DAG.getConstant(NumBytes, getPointerTy(), true),
993 DAG.getConstant(0, getPointerTy(), true),
994 InFlag, dl);
995 InFlag = Chain.getValue(1);
997 // Handle result values, copying them out of physregs into vregs that we
998 // return.
999 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
1000 Ins, dl, DAG, InVals);
1001 }
1003 /// LowerCallResult - Lower the result values of a call into the
1004 /// appropriate copies out of appropriate physical registers.
1005 SDValue
1006 XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1007 CallingConv::ID CallConv, bool isVarArg,
1008 const SmallVectorImpl<ISD::InputArg> &Ins,
1009 SDLoc dl, SelectionDAG &DAG,
1010 SmallVectorImpl<SDValue> &InVals) const {
1012 // Assign locations to each value returned by this call.
1013 SmallVector<CCValAssign, 16> RVLocs;
1014 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1015 getTargetMachine(), RVLocs, *DAG.getContext());
1017 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1019 // Copy all of the result registers out of their specified physreg.
1020 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1021 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
1022 RVLocs[i].getValVT(), InFlag).getValue(1);
1023 InFlag = Chain.getValue(2);
1024 InVals.push_back(Chain.getValue(0));
1025 }
1027 return Chain;
1028 }
1030 //===----------------------------------------------------------------------===//
1031 // Formal Arguments Calling Convention Implementation
1032 //===----------------------------------------------------------------------===//
1034 /// XCore formal arguments implementation
1035 SDValue
1036 XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
1037 CallingConv::ID CallConv,
1038 bool isVarArg,
1039 const SmallVectorImpl<ISD::InputArg> &Ins,
1040 SDLoc dl,
1041 SelectionDAG &DAG,
1042 SmallVectorImpl<SDValue> &InVals)
1043 const {
1044 switch (CallConv)
1045 {
1046 default:
1047 llvm_unreachable("Unsupported calling convention");
1048 case CallingConv::C:
1049 case CallingConv::Fast:
1050 return LowerCCCArguments(Chain, CallConv, isVarArg,
1051 Ins, dl, DAG, InVals);
1052 }
1053 }
1055 /// LowerCCCArguments - transform physical registers into
1056 /// virtual registers and generate load operations for
1057 /// arguments places on the stack.
1058 /// TODO: sret
1059 SDValue
1060 XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
1061 CallingConv::ID CallConv,
1062 bool isVarArg,
1063 const SmallVectorImpl<ISD::InputArg>
1064 &Ins,
1065 SDLoc dl,
1066 SelectionDAG &DAG,
1067 SmallVectorImpl<SDValue> &InVals) const {
1068 MachineFunction &MF = DAG.getMachineFunction();
1069 MachineFrameInfo *MFI = MF.getFrameInfo();
1070 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1072 // Assign locations to all of the incoming arguments.
1073 SmallVector<CCValAssign, 16> ArgLocs;
1074 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1075 getTargetMachine(), ArgLocs, *DAG.getContext());
1077 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1079 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1081 unsigned LRSaveSize = StackSlotSize;
1083 // TODO: need to make copies of any byVal arguments
1085 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1087 CCValAssign &VA = ArgLocs[i];
1089 if (VA.isRegLoc()) {
1090 // Arguments passed in registers
1091 EVT RegVT = VA.getLocVT();
1092 switch (RegVT.getSimpleVT().SimpleTy) {
1093 default:
1094 {
1095 #ifndef NDEBUG
1096 errs() << "LowerFormalArguments Unhandled argument type: "
1097 << RegVT.getSimpleVT().SimpleTy << "\n";
1098 #endif
1099 llvm_unreachable(0);
1100 }
1101 case MVT::i32:
1102 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1103 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1104 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1105 }
1106 } else {
1107 // sanity check
1108 assert(VA.isMemLoc());
1109 // Load the argument to a virtual register
1110 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1111 if (ObjSize > StackSlotSize) {
1112 errs() << "LowerFormalArguments Unhandled argument type: "
1113 << EVT(VA.getLocVT()).getEVTString()
1114 << "\n";
1115 }
1116 // Create the frame index object for this incoming parameter...
1117 int FI = MFI->CreateFixedObject(ObjSize,
1118 LRSaveSize + VA.getLocMemOffset(),
1119 true);
1121 // Create the SelectionDAG nodes corresponding to a load
1122 //from this parameter
1123 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1124 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1125 MachinePointerInfo::getFixedStack(FI),
1126 false, false, false, 0));
1127 }
1128 }
1130 if (isVarArg) {
1131 /* Argument registers */
1132 static const uint16_t ArgRegs[] = {
1133 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1134 };
1135 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1136 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
1137 array_lengthof(ArgRegs));
1138 if (FirstVAReg < array_lengthof(ArgRegs)) {
1139 SmallVector<SDValue, 4> MemOps;
1140 int offset = 0;
1141 // Save remaining registers, storing higher register numbers at a higher
1142 // address
1143 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1144 // Create a stack slot
1145 int FI = MFI->CreateFixedObject(4, offset, true);
1146 if (i == (int)FirstVAReg) {
1147 XFI->setVarArgsFrameIndex(FI);
1148 }
1149 offset -= StackSlotSize;
1150 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1151 // Move argument from phys reg -> virt reg
1152 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1153 RegInfo.addLiveIn(ArgRegs[i], VReg);
1154 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1155 // Move argument from virt reg -> stack
1156 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1157 MachinePointerInfo(), false, false, 0);
1158 MemOps.push_back(Store);
1159 }
1160 if (!MemOps.empty())
1161 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1162 &MemOps[0], MemOps.size());
1163 } else {
1164 // This will point to the next argument passed via stack.
1165 XFI->setVarArgsFrameIndex(
1166 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1167 true));
1168 }
1169 }
1171 return Chain;
1172 }
1174 //===----------------------------------------------------------------------===//
1175 // Return Value Calling Convention Implementation
1176 //===----------------------------------------------------------------------===//
1178 bool XCoreTargetLowering::
1179 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1180 bool isVarArg,
1181 const SmallVectorImpl<ISD::OutputArg> &Outs,
1182 LLVMContext &Context) const {
1183 SmallVector<CCValAssign, 16> RVLocs;
1184 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
1185 return CCInfo.CheckReturn(Outs, RetCC_XCore);
1186 }
1188 SDValue
1189 XCoreTargetLowering::LowerReturn(SDValue Chain,
1190 CallingConv::ID CallConv, bool isVarArg,
1191 const SmallVectorImpl<ISD::OutputArg> &Outs,
1192 const SmallVectorImpl<SDValue> &OutVals,
1193 SDLoc dl, SelectionDAG &DAG) const {
1195 // CCValAssign - represent the assignment of
1196 // the return value to a location
1197 SmallVector<CCValAssign, 16> RVLocs;
1199 // CCState - Info about the registers and stack slot.
1200 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1201 getTargetMachine(), RVLocs, *DAG.getContext());
1203 // Analyze return values.
1204 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1206 SDValue Flag;
1207 SmallVector<SDValue, 4> RetOps(1, Chain);
1209 // Return on XCore is always a "retsp 0"
1210 RetOps.push_back(DAG.getConstant(0, MVT::i32));
1212 // Copy the result values into the output registers.
1213 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1214 CCValAssign &VA = RVLocs[i];
1215 assert(VA.isRegLoc() && "Can only return in registers!");
1217 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1218 OutVals[i], Flag);
1220 // guarantee that all emitted copies are
1221 // stuck together, avoiding something bad
1222 Flag = Chain.getValue(1);
1223 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1224 }
1226 RetOps[0] = Chain; // Update chain.
1228 // Add the flag if we have it.
1229 if (Flag.getNode())
1230 RetOps.push_back(Flag);
1232 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1233 &RetOps[0], RetOps.size());
1234 }
1236 //===----------------------------------------------------------------------===//
1237 // Other Lowering Code
1238 //===----------------------------------------------------------------------===//
1240 MachineBasicBlock *
1241 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1242 MachineBasicBlock *BB) const {
1243 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
1244 DebugLoc dl = MI->getDebugLoc();
1245 assert((MI->getOpcode() == XCore::SELECT_CC) &&
1246 "Unexpected instr type to insert");
1248 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1249 // control-flow pattern. The incoming instruction knows the destination vreg
1250 // to set, the condition code register to branch on, the true/false values to
1251 // select between, and a branch opcode to use.
1252 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1253 MachineFunction::iterator It = BB;
1254 ++It;
1256 // thisMBB:
1257 // ...
1258 // TrueVal = ...
1259 // cmpTY ccX, r1, r2
1260 // bCC copy1MBB
1261 // fallthrough --> copy0MBB
1262 MachineBasicBlock *thisMBB = BB;
1263 MachineFunction *F = BB->getParent();
1264 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1265 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1266 F->insert(It, copy0MBB);
1267 F->insert(It, sinkMBB);
1269 // Transfer the remainder of BB and its successor edges to sinkMBB.
1270 sinkMBB->splice(sinkMBB->begin(), BB,
1271 llvm::next(MachineBasicBlock::iterator(MI)),
1272 BB->end());
1273 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1275 // Next, add the true and fallthrough blocks as its successors.
1276 BB->addSuccessor(copy0MBB);
1277 BB->addSuccessor(sinkMBB);
1279 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1280 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1282 // copy0MBB:
1283 // %FalseValue = ...
1284 // # fallthrough to sinkMBB
1285 BB = copy0MBB;
1287 // Update machine-CFG edges
1288 BB->addSuccessor(sinkMBB);
1290 // sinkMBB:
1291 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1292 // ...
1293 BB = sinkMBB;
1294 BuildMI(*BB, BB->begin(), dl,
1295 TII.get(XCore::PHI), MI->getOperand(0).getReg())
1296 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1297 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1299 MI->eraseFromParent(); // The pseudo instruction is gone now.
1300 return BB;
1301 }
1303 //===----------------------------------------------------------------------===//
1304 // Target Optimization Hooks
1305 //===----------------------------------------------------------------------===//
1307 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1308 DAGCombinerInfo &DCI) const {
1309 SelectionDAG &DAG = DCI.DAG;
1310 SDLoc dl(N);
1311 switch (N->getOpcode()) {
1312 default: break;
1313 case XCoreISD::LADD: {
1314 SDValue N0 = N->getOperand(0);
1315 SDValue N1 = N->getOperand(1);
1316 SDValue N2 = N->getOperand(2);
1317 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1318 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1319 EVT VT = N0.getValueType();
1321 // canonicalize constant to RHS
1322 if (N0C && !N1C)
1323 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1325 // fold (ladd 0, 0, x) -> 0, x & 1
1326 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1327 SDValue Carry = DAG.getConstant(0, VT);
1328 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1329 DAG.getConstant(1, VT));
1330 SDValue Ops[] = { Result, Carry };
1331 return DAG.getMergeValues(Ops, 2, dl);
1332 }
1334 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1335 // low bit set
1336 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1337 APInt KnownZero, KnownOne;
1338 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1339 VT.getSizeInBits() - 1);
1340 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1341 if ((KnownZero & Mask) == Mask) {
1342 SDValue Carry = DAG.getConstant(0, VT);
1343 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1344 SDValue Ops[] = { Result, Carry };
1345 return DAG.getMergeValues(Ops, 2, dl);
1346 }
1347 }
1348 }
1349 break;
1350 case XCoreISD::LSUB: {
1351 SDValue N0 = N->getOperand(0);
1352 SDValue N1 = N->getOperand(1);
1353 SDValue N2 = N->getOperand(2);
1354 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1355 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1356 EVT VT = N0.getValueType();
1358 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1359 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1360 APInt KnownZero, KnownOne;
1361 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1362 VT.getSizeInBits() - 1);
1363 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1364 if ((KnownZero & Mask) == Mask) {
1365 SDValue Borrow = N2;
1366 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1367 DAG.getConstant(0, VT), N2);
1368 SDValue Ops[] = { Result, Borrow };
1369 return DAG.getMergeValues(Ops, 2, dl);
1370 }
1371 }
1373 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1374 // low bit set
1375 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1376 APInt KnownZero, KnownOne;
1377 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1378 VT.getSizeInBits() - 1);
1379 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1380 if ((KnownZero & Mask) == Mask) {
1381 SDValue Borrow = DAG.getConstant(0, VT);
1382 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1383 SDValue Ops[] = { Result, Borrow };
1384 return DAG.getMergeValues(Ops, 2, dl);
1385 }
1386 }
1387 }
1388 break;
1389 case XCoreISD::LMUL: {
1390 SDValue N0 = N->getOperand(0);
1391 SDValue N1 = N->getOperand(1);
1392 SDValue N2 = N->getOperand(2);
1393 SDValue N3 = N->getOperand(3);
1394 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1395 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1396 EVT VT = N0.getValueType();
1397 // Canonicalize multiplicative constant to RHS. If both multiplicative
1398 // operands are constant canonicalize smallest to RHS.
1399 if ((N0C && !N1C) ||
1400 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1401 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1402 N1, N0, N2, N3);
1404 // lmul(x, 0, a, b)
1405 if (N1C && N1C->isNullValue()) {
1406 // If the high result is unused fold to add(a, b)
1407 if (N->hasNUsesOfValue(0, 0)) {
1408 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1409 SDValue Ops[] = { Lo, Lo };
1410 return DAG.getMergeValues(Ops, 2, dl);
1411 }
1412 // Otherwise fold to ladd(a, b, 0)
1413 SDValue Result =
1414 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1415 SDValue Carry(Result.getNode(), 1);
1416 SDValue Ops[] = { Carry, Result };
1417 return DAG.getMergeValues(Ops, 2, dl);
1418 }
1419 }
1420 break;
1421 case ISD::ADD: {
1422 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1423 // lmul(x, y, a, b). The high result of lmul will be ignored.
1424 // This is only profitable if the intermediate results are unused
1425 // elsewhere.
1426 SDValue Mul0, Mul1, Addend0, Addend1;
1427 if (N->getValueType(0) == MVT::i32 &&
1428 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1429 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1430 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1431 Mul1, Addend0, Addend1);
1432 SDValue Result(Ignored.getNode(), 1);
1433 return Result;
1434 }
1435 APInt HighMask = APInt::getHighBitsSet(64, 32);
1436 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1437 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1438 // before type legalization as it is messy to match the operands after
1439 // that.
1440 if (N->getValueType(0) == MVT::i64 &&
1441 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1442 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1443 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1444 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1445 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1446 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1447 Mul0, DAG.getConstant(0, MVT::i32));
1448 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1449 Mul1, DAG.getConstant(0, MVT::i32));
1450 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1451 Addend0, DAG.getConstant(0, MVT::i32));
1452 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1453 Addend1, DAG.getConstant(0, MVT::i32));
1454 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1455 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1456 Addend0L, Addend1L);
1457 SDValue Lo(Hi.getNode(), 1);
1458 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1459 }
1460 }
1461 break;
1462 case ISD::STORE: {
1463 // Replace unaligned store of unaligned load with memmove.
1464 StoreSDNode *ST = cast<StoreSDNode>(N);
1465 if (!DCI.isBeforeLegalize() ||
1466 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
1467 ST->isVolatile() || ST->isIndexed()) {
1468 break;
1469 }
1470 SDValue Chain = ST->getChain();
1472 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1473 if (StoreBits % 8) {
1474 break;
1475 }
1476 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(
1477 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1478 unsigned Alignment = ST->getAlignment();
1479 if (Alignment >= ABIAlignment) {
1480 break;
1481 }
1483 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1484 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1485 LD->getAlignment() == Alignment &&
1486 !LD->isVolatile() && !LD->isIndexed() &&
1487 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1488 return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1489 LD->getBasePtr(),
1490 DAG.getConstant(StoreBits/8, MVT::i32),
1491 Alignment, false, ST->getPointerInfo(),
1492 LD->getPointerInfo());
1493 }
1494 }
1495 break;
1496 }
1497 }
1498 return SDValue();
1499 }
1501 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
1502 APInt &KnownZero,
1503 APInt &KnownOne,
1504 const SelectionDAG &DAG,
1505 unsigned Depth) const {
1506 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1507 switch (Op.getOpcode()) {
1508 default: break;
1509 case XCoreISD::LADD:
1510 case XCoreISD::LSUB:
1511 if (Op.getResNo() == 1) {
1512 // Top bits of carry / borrow are clear.
1513 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1514 KnownZero.getBitWidth() - 1);
1515 }
1516 break;
1517 }
1518 }
1520 //===----------------------------------------------------------------------===//
1521 // Addressing mode description hooks
1522 //===----------------------------------------------------------------------===//
1524 static inline bool isImmUs(int64_t val)
1525 {
1526 return (val >= 0 && val <= 11);
1527 }
1529 static inline bool isImmUs2(int64_t val)
1530 {
1531 return (val%2 == 0 && isImmUs(val/2));
1532 }
1534 static inline bool isImmUs4(int64_t val)
1535 {
1536 return (val%4 == 0 && isImmUs(val/4));
1537 }
1539 /// isLegalAddressingMode - Return true if the addressing mode represented
1540 /// by AM is legal for this target, for a load/store of the specified type.
1541 bool
1542 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1543 Type *Ty) const {
1544 if (Ty->getTypeID() == Type::VoidTyID)
1545 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1547 const DataLayout *TD = TM.getDataLayout();
1548 unsigned Size = TD->getTypeAllocSize(Ty);
1549 if (AM.BaseGV) {
1550 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1551 AM.BaseOffs%4 == 0;
1552 }
1554 switch (Size) {
1555 case 1:
1556 // reg + imm
1557 if (AM.Scale == 0) {
1558 return isImmUs(AM.BaseOffs);
1559 }
1560 // reg + reg
1561 return AM.Scale == 1 && AM.BaseOffs == 0;
1562 case 2:
1563 case 3:
1564 // reg + imm
1565 if (AM.Scale == 0) {
1566 return isImmUs2(AM.BaseOffs);
1567 }
1568 // reg + reg<<1
1569 return AM.Scale == 2 && AM.BaseOffs == 0;
1570 default:
1571 // reg + imm
1572 if (AM.Scale == 0) {
1573 return isImmUs4(AM.BaseOffs);
1574 }
1575 // reg + reg<<2
1576 return AM.Scale == 4 && AM.BaseOffs == 0;
1577 }
1578 }
1580 //===----------------------------------------------------------------------===//
1581 // XCore Inline Assembly Support
1582 //===----------------------------------------------------------------------===//
1584 std::pair<unsigned, const TargetRegisterClass*>
1585 XCoreTargetLowering::
1586 getRegForInlineAsmConstraint(const std::string &Constraint,
1587 MVT VT) const {
1588 if (Constraint.size() == 1) {
1589 switch (Constraint[0]) {
1590 default : break;
1591 case 'r':
1592 return std::make_pair(0U, &XCore::GRRegsRegClass);
1593 }
1594 }
1595 // Use the default implementation in TargetLowering to convert the register
1596 // constraint into a member of a register class.
1597 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1598 }