72390a94bfe2ec06270146a95b3a61569665c1cc
1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
42 #include "llvm/CodeGen/Analysis.h"
43 #include "llvm/CodeGen/FastISel.h"
44 #include "llvm/ADT/Optional.h"
45 #include "llvm/ADT/Statistic.h"
46 #include "llvm/Analysis/BranchProbabilityInfo.h"
47 #include "llvm/Analysis/Loads.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FunctionLoweringInfo.h"
50 #include "llvm/CodeGen/MachineFrameInfo.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineModuleInfo.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DebugInfo.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/GlobalVariable.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Target/TargetInstrInfo.h"
65 #include "llvm/Target/TargetLibraryInfo.h"
66 #include "llvm/Target/TargetLowering.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include "llvm/Target/TargetSubtargetInfo.h"
69 using namespace llvm;
71 #define DEBUG_TYPE "isel"
73 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
74 "target-independent selector");
75 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
76 "target-specific selector");
77 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
79 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
80 unsigned AttrIdx) {
81 IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
82 IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
83 IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
84 IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
85 IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
86 IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
87 IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
88 IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
89 Alignment = CS->getParamAlignment(AttrIdx);
90 }
92 /// Set the current block to which generated machine instructions will be
93 /// appended, and clear the local CSE map.
94 void FastISel::startNewBlock() {
95 LocalValueMap.clear();
97 // Instructions are appended to FuncInfo.MBB. If the basic block already
98 // contains labels or copies, use the last instruction as the last local
99 // value.
100 EmitStartPt = nullptr;
101 if (!FuncInfo.MBB->empty())
102 EmitStartPt = &FuncInfo.MBB->back();
103 LastLocalValue = EmitStartPt;
104 }
106 bool FastISel::lowerArguments() {
107 if (!FuncInfo.CanLowerReturn)
108 // Fallback to SDISel argument lowering code to deal with sret pointer
109 // parameter.
110 return false;
112 if (!fastLowerArguments())
113 return false;
115 // Enter arguments into ValueMap for uses in non-entry BBs.
116 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
117 E = FuncInfo.Fn->arg_end();
118 I != E; ++I) {
119 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
120 assert(VI != LocalValueMap.end() && "Missed an argument?");
121 FuncInfo.ValueMap[I] = VI->second;
122 }
123 return true;
124 }
126 void FastISel::flushLocalValueMap() {
127 LocalValueMap.clear();
128 LastLocalValue = EmitStartPt;
129 recomputeInsertPt();
130 SavedInsertPt = FuncInfo.InsertPt;
131 }
133 bool FastISel::hasTrivialKill(const Value *V) {
134 // Don't consider constants or arguments to have trivial kills.
135 const Instruction *I = dyn_cast<Instruction>(V);
136 if (!I)
137 return false;
139 // No-op casts are trivially coalesced by fast-isel.
140 if (const auto *Cast = dyn_cast<CastInst>(I))
141 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
142 !hasTrivialKill(Cast->getOperand(0)))
143 return false;
145 // Even the value might have only one use in the LLVM IR, it is possible that
146 // FastISel might fold the use into another instruction and now there is more
147 // than one use at the Machine Instruction level.
148 unsigned Reg = lookUpRegForValue(V);
149 if (Reg && !MRI.use_empty(Reg))
150 return false;
152 // GEPs with all zero indices are trivially coalesced by fast-isel.
153 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
154 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
155 return false;
157 // Only instructions with a single use in the same basic block are considered
158 // to have trivial kills.
159 return I->hasOneUse() &&
160 !(I->getOpcode() == Instruction::BitCast ||
161 I->getOpcode() == Instruction::PtrToInt ||
162 I->getOpcode() == Instruction::IntToPtr) &&
163 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
164 }
166 unsigned FastISel::getRegForValue(const Value *V) {
167 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
168 // Don't handle non-simple values in FastISel.
169 if (!RealVT.isSimple())
170 return 0;
172 // Ignore illegal types. We must do this before looking up the value
173 // in ValueMap because Arguments are given virtual registers regardless
174 // of whether FastISel can handle them.
175 MVT VT = RealVT.getSimpleVT();
176 if (!TLI.isTypeLegal(VT)) {
177 // Handle integer promotions, though, because they're common and easy.
178 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
179 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
180 else
181 return 0;
182 }
184 // Look up the value to see if we already have a register for it.
185 unsigned Reg = lookUpRegForValue(V);
186 if (Reg)
187 return Reg;
189 // In bottom-up mode, just create the virtual register which will be used
190 // to hold the value. It will be materialized later.
191 if (isa<Instruction>(V) &&
192 (!isa<AllocaInst>(V) ||
193 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
194 return FuncInfo.InitializeRegForValue(V);
196 SavePoint SaveInsertPt = enterLocalValueArea();
198 // Materialize the value in a register. Emit any instructions in the
199 // local value area.
200 Reg = materializeRegForValue(V, VT);
202 leaveLocalValueArea(SaveInsertPt);
204 return Reg;
205 }
207 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
208 unsigned Reg = 0;
209 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
210 if (CI->getValue().getActiveBits() <= 64)
211 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
212 } else if (isa<AllocaInst>(V))
213 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
214 else if (isa<ConstantPointerNull>(V))
215 // Translate this as an integer zero so that it can be
216 // local-CSE'd with actual integer zeros.
217 Reg = getRegForValue(
218 Constant::getNullValue(DL.getIntPtrType(V->getContext())));
219 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
220 if (CF->isNullValue())
221 Reg = fastMaterializeFloatZero(CF);
222 else
223 // Try to emit the constant directly.
224 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
226 if (!Reg) {
227 // Try to emit the constant by using an integer constant with a cast.
228 const APFloat &Flt = CF->getValueAPF();
229 EVT IntVT = TLI.getPointerTy();
231 uint64_t x[2];
232 uint32_t IntBitWidth = IntVT.getSizeInBits();
233 bool isExact;
234 (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
235 APFloat::rmTowardZero, &isExact);
236 if (isExact) {
237 APInt IntVal(IntBitWidth, x);
239 unsigned IntegerReg =
240 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
241 if (IntegerReg != 0)
242 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
243 /*Kill=*/false);
244 }
245 }
246 } else if (const auto *Op = dyn_cast<Operator>(V)) {
247 if (!selectOperator(Op, Op->getOpcode()))
248 if (!isa<Instruction>(Op) ||
249 !fastSelectInstruction(cast<Instruction>(Op)))
250 return 0;
251 Reg = lookUpRegForValue(Op);
252 } else if (isa<UndefValue>(V)) {
253 Reg = createResultReg(TLI.getRegClassFor(VT));
254 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
255 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
256 }
257 return Reg;
258 }
260 /// Helper for getRegForValue. This function is called when the value isn't
261 /// already available in a register and must be materialized with new
262 /// instructions.
263 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
264 unsigned Reg = 0;
265 // Give the target-specific code a try first.
266 if (isa<Constant>(V))
267 Reg = fastMaterializeConstant(cast<Constant>(V));
269 // If target-specific code couldn't or didn't want to handle the value, then
270 // give target-independent code a try.
271 if (!Reg)
272 Reg = materializeConstant(V, VT);
274 // Don't cache constant materializations in the general ValueMap.
275 // To do so would require tracking what uses they dominate.
276 if (Reg) {
277 LocalValueMap[V] = Reg;
278 LastLocalValue = MRI.getVRegDef(Reg);
279 }
280 return Reg;
281 }
283 unsigned FastISel::lookUpRegForValue(const Value *V) {
284 // Look up the value to see if we already have a register for it. We
285 // cache values defined by Instructions across blocks, and other values
286 // only locally. This is because Instructions already have the SSA
287 // def-dominates-use requirement enforced.
288 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
289 if (I != FuncInfo.ValueMap.end())
290 return I->second;
291 return LocalValueMap[V];
292 }
294 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
295 if (!isa<Instruction>(I)) {
296 LocalValueMap[I] = Reg;
297 return;
298 }
300 unsigned &AssignedReg = FuncInfo.ValueMap[I];
301 if (AssignedReg == 0)
302 // Use the new register.
303 AssignedReg = Reg;
304 else if (Reg != AssignedReg) {
305 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
306 for (unsigned i = 0; i < NumRegs; i++)
307 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
309 AssignedReg = Reg;
310 }
311 }
313 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
314 unsigned IdxN = getRegForValue(Idx);
315 if (IdxN == 0)
316 // Unhandled operand. Halt "fast" selection and bail.
317 return std::pair<unsigned, bool>(0, false);
319 bool IdxNIsKill = hasTrivialKill(Idx);
321 // If the index is smaller or larger than intptr_t, truncate or extend it.
322 MVT PtrVT = TLI.getPointerTy();
323 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
324 if (IdxVT.bitsLT(PtrVT)) {
325 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
326 IdxNIsKill);
327 IdxNIsKill = true;
328 } else if (IdxVT.bitsGT(PtrVT)) {
329 IdxN =
330 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
331 IdxNIsKill = true;
332 }
333 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
334 }
336 void FastISel::recomputeInsertPt() {
337 if (getLastLocalValue()) {
338 FuncInfo.InsertPt = getLastLocalValue();
339 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
340 ++FuncInfo.InsertPt;
341 } else
342 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
344 // Now skip past any EH_LABELs, which must remain at the beginning.
345 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
346 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
347 ++FuncInfo.InsertPt;
348 }
350 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
351 MachineBasicBlock::iterator E) {
352 assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!");
353 while (I != E) {
354 MachineInstr *Dead = &*I;
355 ++I;
356 Dead->eraseFromParent();
357 ++NumFastIselDead;
358 }
359 recomputeInsertPt();
360 }
362 FastISel::SavePoint FastISel::enterLocalValueArea() {
363 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
364 DebugLoc OldDL = DbgLoc;
365 recomputeInsertPt();
366 DbgLoc = DebugLoc();
367 SavePoint SP = {OldInsertPt, OldDL};
368 return SP;
369 }
371 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
372 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
373 LastLocalValue = std::prev(FuncInfo.InsertPt);
375 // Restore the previous insert position.
376 FuncInfo.InsertPt = OldInsertPt.InsertPt;
377 DbgLoc = OldInsertPt.DL;
378 }
380 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
381 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
382 if (VT == MVT::Other || !VT.isSimple())
383 // Unhandled type. Halt "fast" selection and bail.
384 return false;
386 // We only handle legal types. For example, on x86-32 the instruction
387 // selector contains all of the 64-bit instructions from x86-64,
388 // under the assumption that i64 won't be used if the target doesn't
389 // support it.
390 if (!TLI.isTypeLegal(VT)) {
391 // MVT::i1 is special. Allow AND, OR, or XOR because they
392 // don't require additional zeroing, which makes them easy.
393 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
394 ISDOpcode == ISD::XOR))
395 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
396 else
397 return false;
398 }
400 // Check if the first operand is a constant, and handle it as "ri". At -O0,
401 // we don't have anything that canonicalizes operand order.
402 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
403 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
404 unsigned Op1 = getRegForValue(I->getOperand(1));
405 if (!Op1)
406 return false;
407 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
409 unsigned ResultReg =
410 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
411 CI->getZExtValue(), VT.getSimpleVT());
412 if (!ResultReg)
413 return false;
415 // We successfully emitted code for the given LLVM Instruction.
416 updateValueMap(I, ResultReg);
417 return true;
418 }
420 unsigned Op0 = getRegForValue(I->getOperand(0));
421 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
422 return false;
423 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
425 // Check if the second operand is a constant and handle it appropriately.
426 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
427 uint64_t Imm = CI->getZExtValue();
429 // Transform "sdiv exact X, 8" -> "sra X, 3".
430 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
431 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
432 Imm = Log2_64(Imm);
433 ISDOpcode = ISD::SRA;
434 }
436 // Transform "urem x, pow2" -> "and x, pow2-1".
437 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
438 isPowerOf2_64(Imm)) {
439 --Imm;
440 ISDOpcode = ISD::AND;
441 }
443 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
444 Op0IsKill, Imm, VT.getSimpleVT());
445 if (!ResultReg)
446 return false;
448 // We successfully emitted code for the given LLVM Instruction.
449 updateValueMap(I, ResultReg);
450 return true;
451 }
453 // Check if the second operand is a constant float.
454 if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
455 unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
456 ISDOpcode, Op0, Op0IsKill, CF);
457 if (ResultReg) {
458 // We successfully emitted code for the given LLVM Instruction.
459 updateValueMap(I, ResultReg);
460 return true;
461 }
462 }
464 unsigned Op1 = getRegForValue(I->getOperand(1));
465 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
466 return false;
467 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
469 // Now we have both operands in registers. Emit the instruction.
470 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
471 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
472 if (!ResultReg)
473 // Target-specific code wasn't able to find a machine opcode for
474 // the given ISD opcode and type. Halt "fast" selection and bail.
475 return false;
477 // We successfully emitted code for the given LLVM Instruction.
478 updateValueMap(I, ResultReg);
479 return true;
480 }
482 bool FastISel::selectGetElementPtr(const User *I) {
483 unsigned N = getRegForValue(I->getOperand(0));
484 if (!N) // Unhandled operand. Halt "fast" selection and bail.
485 return false;
486 bool NIsKill = hasTrivialKill(I->getOperand(0));
488 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
489 // into a single N = N + TotalOffset.
490 uint64_t TotalOffs = 0;
491 // FIXME: What's a good SWAG number for MaxOffs?
492 uint64_t MaxOffs = 2048;
493 Type *Ty = I->getOperand(0)->getType();
494 MVT VT = TLI.getPointerTy();
495 for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1,
496 E = I->op_end();
497 OI != E; ++OI) {
498 const Value *Idx = *OI;
499 if (auto *StTy = dyn_cast<StructType>(Ty)) {
500 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
501 if (Field) {
502 // N = N + Offset
503 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
504 if (TotalOffs >= MaxOffs) {
505 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
506 if (!N) // Unhandled operand. Halt "fast" selection and bail.
507 return false;
508 NIsKill = true;
509 TotalOffs = 0;
510 }
511 }
512 Ty = StTy->getElementType(Field);
513 } else {
514 Ty = cast<SequentialType>(Ty)->getElementType();
516 // If this is a constant subscript, handle it quickly.
517 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
518 if (CI->isZero())
519 continue;
520 // N = N + Offset
521 TotalOffs +=
522 DL.getTypeAllocSize(Ty) * cast<ConstantInt>(CI)->getSExtValue();
523 if (TotalOffs >= MaxOffs) {
524 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
525 if (!N) // Unhandled operand. Halt "fast" selection and bail.
526 return false;
527 NIsKill = true;
528 TotalOffs = 0;
529 }
530 continue;
531 }
532 if (TotalOffs) {
533 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
534 if (!N) // Unhandled operand. Halt "fast" selection and bail.
535 return false;
536 NIsKill = true;
537 TotalOffs = 0;
538 }
540 // N = N + Idx * ElementSize;
541 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
542 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
543 unsigned IdxN = Pair.first;
544 bool IdxNIsKill = Pair.second;
545 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
546 return false;
548 if (ElementSize != 1) {
549 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
550 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
551 return false;
552 IdxNIsKill = true;
553 }
554 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
555 if (!N) // Unhandled operand. Halt "fast" selection and bail.
556 return false;
557 }
558 }
559 if (TotalOffs) {
560 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
561 if (!N) // Unhandled operand. Halt "fast" selection and bail.
562 return false;
563 }
565 // We successfully emitted code for the given LLVM Instruction.
566 updateValueMap(I, N);
567 return true;
568 }
570 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
571 const CallInst *CI, unsigned StartIdx) {
572 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
573 Value *Val = CI->getArgOperand(i);
574 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
575 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
576 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
577 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
578 } else if (isa<ConstantPointerNull>(Val)) {
579 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
580 Ops.push_back(MachineOperand::CreateImm(0));
581 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
582 // Values coming from a stack location also require a sepcial encoding,
583 // but that is added later on by the target specific frame index
584 // elimination implementation.
585 auto SI = FuncInfo.StaticAllocaMap.find(AI);
586 if (SI != FuncInfo.StaticAllocaMap.end())
587 Ops.push_back(MachineOperand::CreateFI(SI->second));
588 else
589 return false;
590 } else {
591 unsigned Reg = getRegForValue(Val);
592 if (!Reg)
593 return false;
594 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
595 }
596 }
597 return true;
598 }
600 bool FastISel::selectStackmap(const CallInst *I) {
601 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
602 // [live variables...])
603 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
604 "Stackmap cannot return a value.");
606 // The stackmap intrinsic only records the live variables (the arguments
607 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
608 // intrinsic, this won't be lowered to a function call. This means we don't
609 // have to worry about calling conventions and target-specific lowering code.
610 // Instead we perform the call lowering right here.
611 //
612 // CALLSEQ_START(0)
613 // STACKMAP(id, nbytes, ...)
614 // CALLSEQ_END(0, 0)
615 //
616 SmallVector<MachineOperand, 32> Ops;
618 // Add the <id> and <numBytes> constants.
619 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
620 "Expected a constant integer.");
621 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
622 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
624 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
625 "Expected a constant integer.");
626 const auto *NumBytes =
627 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
628 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
630 // Push live variables for the stack map (skipping the first two arguments
631 // <id> and <numBytes>).
632 if (!addStackMapLiveVars(Ops, I, 2))
633 return false;
635 // We are not adding any register mask info here, because the stackmap doesn't
636 // clobber anything.
638 // Add scratch registers as implicit def and early clobber.
639 CallingConv::ID CC = I->getCallingConv();
640 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
641 for (unsigned i = 0; ScratchRegs[i]; ++i)
642 Ops.push_back(MachineOperand::CreateReg(
643 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
644 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
646 // Issue CALLSEQ_START
647 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
648 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
649 .addImm(0);
651 // Issue STACKMAP.
652 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
653 TII.get(TargetOpcode::STACKMAP));
654 for (auto const &MO : Ops)
655 MIB.addOperand(MO);
657 // Issue CALLSEQ_END
658 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
659 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
660 .addImm(0)
661 .addImm(0);
663 // Inform the Frame Information that we have a stackmap in this function.
664 FuncInfo.MF->getFrameInfo()->setHasStackMap();
666 return true;
667 }
669 /// \brief Lower an argument list according to the target calling convention.
670 ///
671 /// This is a helper for lowering intrinsics that follow a target calling
672 /// convention or require stack pointer adjustment. Only a subset of the
673 /// intrinsic's operands need to participate in the calling convention.
674 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
675 unsigned NumArgs, const Value *Callee,
676 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
677 ArgListTy Args;
678 Args.reserve(NumArgs);
680 // Populate the argument list.
681 // Attributes for args start at offset 1, after the return attribute.
682 ImmutableCallSite CS(CI);
683 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
684 ArgI != ArgE; ++ArgI) {
685 Value *V = CI->getOperand(ArgI);
687 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
689 ArgListEntry Entry;
690 Entry.Val = V;
691 Entry.Ty = V->getType();
692 Entry.setAttributes(&CS, AttrI);
693 Args.push_back(Entry);
694 }
696 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
697 : CI->getType();
698 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
700 return lowerCallTo(CLI);
701 }
703 bool FastISel::selectPatchpoint(const CallInst *I) {
704 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
705 // i32 <numBytes>,
706 // i8* <target>,
707 // i32 <numArgs>,
708 // [Args...],
709 // [live variables...])
710 CallingConv::ID CC = I->getCallingConv();
711 bool IsAnyRegCC = CC == CallingConv::AnyReg;
712 bool HasDef = !I->getType()->isVoidTy();
713 Value *Callee = I->getOperand(PatchPointOpers::TargetPos);
715 // Get the real number of arguments participating in the call <numArgs>
716 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
717 "Expected a constant integer.");
718 const auto *NumArgsVal =
719 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
720 unsigned NumArgs = NumArgsVal->getZExtValue();
722 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
723 // This includes all meta-operands up to but not including CC.
724 unsigned NumMetaOpers = PatchPointOpers::CCPos;
725 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
726 "Not enough arguments provided to the patchpoint intrinsic");
728 // For AnyRegCC the arguments are lowered later on manually.
729 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
730 CallLoweringInfo CLI;
731 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
732 return false;
734 assert(CLI.Call && "No call instruction specified.");
736 SmallVector<MachineOperand, 32> Ops;
738 // Add an explicit result reg if we use the anyreg calling convention.
739 if (IsAnyRegCC && HasDef) {
740 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
741 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
742 CLI.NumResultRegs = 1;
743 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
744 }
746 // Add the <id> and <numBytes> constants.
747 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
748 "Expected a constant integer.");
749 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
750 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
752 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
753 "Expected a constant integer.");
754 const auto *NumBytes =
755 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
756 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
758 // Assume that the callee is a constant address or null pointer.
759 // FIXME: handle function symbols in the future.
760 uint64_t CalleeAddr;
761 if (const auto *C = dyn_cast<IntToPtrInst>(Callee))
762 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
763 else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
764 if (C->getOpcode() == Instruction::IntToPtr)
765 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
766 else
767 llvm_unreachable("Unsupported ConstantExpr.");
768 } else if (isa<ConstantPointerNull>(Callee))
769 CalleeAddr = 0;
770 else
771 llvm_unreachable("Unsupported callee address.");
773 Ops.push_back(MachineOperand::CreateImm(CalleeAddr));
775 // Adjust <numArgs> to account for any arguments that have been passed on
776 // the stack instead.
777 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
778 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
780 // Add the calling convention
781 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
783 // Add the arguments we omitted previously. The register allocator should
784 // place these in any free register.
785 if (IsAnyRegCC) {
786 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
787 unsigned Reg = getRegForValue(I->getArgOperand(i));
788 if (!Reg)
789 return false;
790 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
791 }
792 }
794 // Push the arguments from the call instruction.
795 for (auto Reg : CLI.OutRegs)
796 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
798 // Push live variables for the stack map.
799 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
800 return false;
802 // Push the register mask info.
803 Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC)));
805 // Add scratch registers as implicit def and early clobber.
806 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
807 for (unsigned i = 0; ScratchRegs[i]; ++i)
808 Ops.push_back(MachineOperand::CreateReg(
809 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
810 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
812 // Add implicit defs (return values).
813 for (auto Reg : CLI.InRegs)
814 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
815 /*IsImpl=*/true));
817 // Insert the patchpoint instruction before the call generated by the target.
818 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
819 TII.get(TargetOpcode::PATCHPOINT));
821 for (auto &MO : Ops)
822 MIB.addOperand(MO);
824 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
826 // Delete the original call instruction.
827 CLI.Call->eraseFromParent();
829 // Inform the Frame Information that we have a patchpoint in this function.
830 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
832 if (CLI.NumResultRegs)
833 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
834 return true;
835 }
837 /// Returns an AttributeSet representing the attributes applied to the return
838 /// value of the given call.
839 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
840 SmallVector<Attribute::AttrKind, 2> Attrs;
841 if (CLI.RetSExt)
842 Attrs.push_back(Attribute::SExt);
843 if (CLI.RetZExt)
844 Attrs.push_back(Attribute::ZExt);
845 if (CLI.IsInReg)
846 Attrs.push_back(Attribute::InReg);
848 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
849 Attrs);
850 }
852 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
853 unsigned NumArgs) {
854 ImmutableCallSite CS(CI);
856 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
857 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
858 Type *RetTy = FTy->getReturnType();
860 ArgListTy Args;
861 Args.reserve(NumArgs);
863 // Populate the argument list.
864 // Attributes for args start at offset 1, after the return attribute.
865 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
866 Value *V = CI->getOperand(ArgI);
868 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
870 ArgListEntry Entry;
871 Entry.Val = V;
872 Entry.Ty = V->getType();
873 Entry.setAttributes(&CS, ArgI + 1);
874 Args.push_back(Entry);
875 }
877 CallLoweringInfo CLI;
878 CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs);
880 return lowerCallTo(CLI);
881 }
883 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
884 // Handle the incoming return values from the call.
885 CLI.clearIns();
886 SmallVector<EVT, 4> RetTys;
887 ComputeValueVTs(TLI, CLI.RetTy, RetTys);
889 SmallVector<ISD::OutputArg, 4> Outs;
890 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
892 bool CanLowerReturn = TLI.CanLowerReturn(
893 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
895 // FIXME: sret demotion isn't supported yet - bail out.
896 if (!CanLowerReturn)
897 return false;
899 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
900 EVT VT = RetTys[I];
901 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
902 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
903 for (unsigned i = 0; i != NumRegs; ++i) {
904 ISD::InputArg MyFlags;
905 MyFlags.VT = RegisterVT;
906 MyFlags.ArgVT = VT;
907 MyFlags.Used = CLI.IsReturnValueUsed;
908 if (CLI.RetSExt)
909 MyFlags.Flags.setSExt();
910 if (CLI.RetZExt)
911 MyFlags.Flags.setZExt();
912 if (CLI.IsInReg)
913 MyFlags.Flags.setInReg();
914 CLI.Ins.push_back(MyFlags);
915 }
916 }
918 // Handle all of the outgoing arguments.
919 CLI.clearOuts();
920 for (auto &Arg : CLI.getArgs()) {
921 Type *FinalType = Arg.Ty;
922 if (Arg.IsByVal)
923 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
924 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
925 FinalType, CLI.CallConv, CLI.IsVarArg);
927 ISD::ArgFlagsTy Flags;
928 if (Arg.IsZExt)
929 Flags.setZExt();
930 if (Arg.IsSExt)
931 Flags.setSExt();
932 if (Arg.IsInReg)
933 Flags.setInReg();
934 if (Arg.IsSRet)
935 Flags.setSRet();
936 if (Arg.IsByVal)
937 Flags.setByVal();
938 if (Arg.IsInAlloca) {
939 Flags.setInAlloca();
940 // Set the byval flag for CCAssignFn callbacks that don't know about
941 // inalloca. This way we can know how many bytes we should've allocated
942 // and how many bytes a callee cleanup function will pop. If we port
943 // inalloca to more targets, we'll have to add custom inalloca handling in
944 // the various CC lowering callbacks.
945 Flags.setByVal();
946 }
947 if (Arg.IsByVal || Arg.IsInAlloca) {
948 PointerType *Ty = cast<PointerType>(Arg.Ty);
949 Type *ElementTy = Ty->getElementType();
950 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
951 // For ByVal, alignment should come from FE. BE will guess if this info is
952 // not there, but there are cases it cannot get right.
953 unsigned FrameAlign = Arg.Alignment;
954 if (!FrameAlign)
955 FrameAlign = TLI.getByValTypeAlignment(ElementTy);
956 Flags.setByValSize(FrameSize);
957 Flags.setByValAlign(FrameAlign);
958 }
959 if (Arg.IsNest)
960 Flags.setNest();
961 if (NeedsRegBlock)
962 Flags.setInConsecutiveRegs();
963 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
964 Flags.setOrigAlign(OriginalAlignment);
966 CLI.OutVals.push_back(Arg.Val);
967 CLI.OutFlags.push_back(Flags);
968 }
970 if (!fastLowerCall(CLI))
971 return false;
973 // Set all unused physreg defs as dead.
974 assert(CLI.Call && "No call instruction specified.");
975 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
977 if (CLI.NumResultRegs && CLI.CS)
978 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
980 return true;
981 }
983 bool FastISel::lowerCall(const CallInst *CI) {
984 ImmutableCallSite CS(CI);
986 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
987 FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
988 Type *RetTy = FuncTy->getReturnType();
990 ArgListTy Args;
991 ArgListEntry Entry;
992 Args.reserve(CS.arg_size());
994 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
995 i != e; ++i) {
996 Value *V = *i;
998 // Skip empty types
999 if (V->getType()->isEmptyTy())
1000 continue;
1002 Entry.Val = V;
1003 Entry.Ty = V->getType();
1005 // Skip the first return-type Attribute to get to params.
1006 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1007 Args.push_back(Entry);
1008 }
1010 // Check if target-independent constraints permit a tail call here.
1011 // Target-dependent constraints are checked within fastLowerCall.
1012 bool IsTailCall = CI->isTailCall();
1013 if (IsTailCall && !isInTailCallPosition(CS, TM))
1014 IsTailCall = false;
1016 CallLoweringInfo CLI;
1017 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1018 .setTailCall(IsTailCall);
1020 return lowerCallTo(CLI);
1021 }
1023 bool FastISel::selectCall(const User *I) {
1024 const CallInst *Call = cast<CallInst>(I);
1026 // Handle simple inline asms.
1027 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1028 // If the inline asm has side effects, then make sure that no local value
1029 // lives across by flushing the local value map.
1030 if (IA->hasSideEffects())
1031 flushLocalValueMap();
1033 // Don't attempt to handle constraints.
1034 if (!IA->getConstraintString().empty())
1035 return false;
1037 unsigned ExtraInfo = 0;
1038 if (IA->hasSideEffects())
1039 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1040 if (IA->isAlignStack())
1041 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1043 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1044 TII.get(TargetOpcode::INLINEASM))
1045 .addExternalSymbol(IA->getAsmString().c_str())
1046 .addImm(ExtraInfo);
1047 return true;
1048 }
1050 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1051 ComputeUsesVAFloatArgument(*Call, &MMI);
1053 // Handle intrinsic function calls.
1054 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1055 return selectIntrinsicCall(II);
1057 // Usually, it does not make sense to initialize a value,
1058 // make an unrelated function call and use the value, because
1059 // it tends to be spilled on the stack. So, we move the pointer
1060 // to the last local value to the beginning of the block, so that
1061 // all the values which have already been materialized,
1062 // appear after the call. It also makes sense to skip intrinsics
1063 // since they tend to be inlined.
1064 flushLocalValueMap();
1066 return lowerCall(Call);
1067 }
1069 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1070 switch (II->getIntrinsicID()) {
1071 default:
1072 break;
1073 // At -O0 we don't care about the lifetime intrinsics.
1074 case Intrinsic::lifetime_start:
1075 case Intrinsic::lifetime_end:
1076 // The donothing intrinsic does, well, nothing.
1077 case Intrinsic::donothing:
1078 return true;
1079 case Intrinsic::dbg_declare: {
1080 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1081 DIVariable DIVar(DI->getVariable());
1082 assert((!DIVar || DIVar.isVariable()) &&
1083 "Variable in DbgDeclareInst should be either null or a DIVariable.");
1084 if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) {
1085 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1086 return true;
1087 }
1089 const Value *Address = DI->getAddress();
1090 if (!Address || isa<UndefValue>(Address)) {
1091 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1092 return true;
1093 }
1095 unsigned Offset = 0;
1096 Optional<MachineOperand> Op;
1097 if (const auto *Arg = dyn_cast<Argument>(Address))
1098 // Some arguments' frame index is recorded during argument lowering.
1099 Offset = FuncInfo.getArgumentFrameIndex(Arg);
1100 if (Offset)
1101 Op = MachineOperand::CreateFI(Offset);
1102 if (!Op)
1103 if (unsigned Reg = lookUpRegForValue(Address))
1104 Op = MachineOperand::CreateReg(Reg, false);
1106 // If we have a VLA that has a "use" in a metadata node that's then used
1107 // here but it has no other uses, then we have a problem. E.g.,
1108 //
1109 // int foo (const int *x) {
1110 // char a[*x];
1111 // return 0;
1112 // }
1113 //
1114 // If we assign 'a' a vreg and fast isel later on has to use the selection
1115 // DAG isel, it will want to copy the value to the vreg. However, there are
1116 // no uses, which goes counter to what selection DAG isel expects.
1117 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1118 (!isa<AllocaInst>(Address) ||
1119 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1120 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1121 false);
1123 if (Op) {
1124 if (Op->isReg()) {
1125 Op->setIsDebug(true);
1126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1127 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1128 DI->getVariable(), DI->getExpression());
1129 } else
1130 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1131 TII.get(TargetOpcode::DBG_VALUE))
1132 .addOperand(*Op)
1133 .addImm(0)
1134 .addMetadata(DI->getVariable())
1135 .addMetadata(DI->getExpression());
1136 } else {
1137 // We can't yet handle anything else here because it would require
1138 // generating code, thus altering codegen because of debug info.
1139 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1140 }
1141 return true;
1142 }
1143 case Intrinsic::dbg_value: {
1144 // This form of DBG_VALUE is target-independent.
1145 const DbgValueInst *DI = cast<DbgValueInst>(II);
1146 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1147 const Value *V = DI->getValue();
1148 if (!V) {
1149 // Currently the optimizer can produce this; insert an undef to
1150 // help debugging. Probably the optimizer should not do this.
1151 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1152 .addReg(0U)
1153 .addImm(DI->getOffset())
1154 .addMetadata(DI->getVariable())
1155 .addMetadata(DI->getExpression());
1156 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1157 if (CI->getBitWidth() > 64)
1158 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1159 .addCImm(CI)
1160 .addImm(DI->getOffset())
1161 .addMetadata(DI->getVariable())
1162 .addMetadata(DI->getExpression());
1163 else
1164 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1165 .addImm(CI->getZExtValue())
1166 .addImm(DI->getOffset())
1167 .addMetadata(DI->getVariable())
1168 .addMetadata(DI->getExpression());
1169 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1170 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1171 .addFPImm(CF)
1172 .addImm(DI->getOffset())
1173 .addMetadata(DI->getVariable())
1174 .addMetadata(DI->getExpression());
1175 } else if (unsigned Reg = lookUpRegForValue(V)) {
1176 // FIXME: This does not handle register-indirect values at offset 0.
1177 bool IsIndirect = DI->getOffset() != 0;
1178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1179 DI->getOffset(), DI->getVariable(), DI->getExpression());
1180 } else {
1181 // We can't yet handle anything else here because it would require
1182 // generating code, thus altering codegen because of debug info.
1183 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1184 }
1185 return true;
1186 }
1187 case Intrinsic::objectsize: {
1188 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1189 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1190 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1191 unsigned ResultReg = getRegForValue(ResCI);
1192 if (!ResultReg)
1193 return false;
1194 updateValueMap(II, ResultReg);
1195 return true;
1196 }
1197 case Intrinsic::expect: {
1198 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1199 if (!ResultReg)
1200 return false;
1201 updateValueMap(II, ResultReg);
1202 return true;
1203 }
1204 case Intrinsic::experimental_stackmap:
1205 return selectStackmap(II);
1206 case Intrinsic::experimental_patchpoint_void:
1207 case Intrinsic::experimental_patchpoint_i64:
1208 return selectPatchpoint(II);
1209 }
1211 return fastLowerIntrinsicCall(II);
1212 }
1214 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1215 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1216 EVT DstVT = TLI.getValueType(I->getType());
1218 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1219 !DstVT.isSimple())
1220 // Unhandled type. Halt "fast" selection and bail.
1221 return false;
1223 // Check if the destination type is legal.
1224 if (!TLI.isTypeLegal(DstVT))
1225 return false;
1227 // Check if the source operand is legal.
1228 if (!TLI.isTypeLegal(SrcVT))
1229 return false;
1231 unsigned InputReg = getRegForValue(I->getOperand(0));
1232 if (!InputReg)
1233 // Unhandled operand. Halt "fast" selection and bail.
1234 return false;
1236 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1238 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1239 Opcode, InputReg, InputRegIsKill);
1240 if (!ResultReg)
1241 return false;
1243 updateValueMap(I, ResultReg);
1244 return true;
1245 }
1247 bool FastISel::selectBitCast(const User *I) {
1248 // If the bitcast doesn't change the type, just use the operand value.
1249 if (I->getType() == I->getOperand(0)->getType()) {
1250 unsigned Reg = getRegForValue(I->getOperand(0));
1251 if (!Reg)
1252 return false;
1253 updateValueMap(I, Reg);
1254 return true;
1255 }
1257 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1258 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
1259 EVT DstEVT = TLI.getValueType(I->getType());
1260 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1261 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1262 // Unhandled type. Halt "fast" selection and bail.
1263 return false;
1265 MVT SrcVT = SrcEVT.getSimpleVT();
1266 MVT DstVT = DstEVT.getSimpleVT();
1267 unsigned Op0 = getRegForValue(I->getOperand(0));
1268 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1269 return false;
1270 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1272 // First, try to perform the bitcast by inserting a reg-reg copy.
1273 unsigned ResultReg = 0;
1274 if (SrcVT == DstVT) {
1275 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1276 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1277 // Don't attempt a cross-class copy. It will likely fail.
1278 if (SrcClass == DstClass) {
1279 ResultReg = createResultReg(DstClass);
1280 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1281 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1282 }
1283 }
1285 // If the reg-reg copy failed, select a BITCAST opcode.
1286 if (!ResultReg)
1287 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1289 if (!ResultReg)
1290 return false;
1292 updateValueMap(I, ResultReg);
1293 return true;
1294 }
1296 bool FastISel::selectInstruction(const Instruction *I) {
1297 // Just before the terminator instruction, insert instructions to
1298 // feed PHI nodes in successor blocks.
1299 if (isa<TerminatorInst>(I))
1300 if (!handlePHINodesInSuccessorBlocks(I->getParent()))
1301 return false;
1303 DbgLoc = I->getDebugLoc();
1305 SavedInsertPt = FuncInfo.InsertPt;
1307 if (const auto *Call = dyn_cast<CallInst>(I)) {
1308 const Function *F = Call->getCalledFunction();
1309 LibFunc::Func Func;
1311 // As a special case, don't handle calls to builtin library functions that
1312 // may be translated directly to target instructions.
1313 if (F && !F->hasLocalLinkage() && F->hasName() &&
1314 LibInfo->getLibFunc(F->getName(), Func) &&
1315 LibInfo->hasOptimizedCodeGen(Func))
1316 return false;
1318 // Don't handle Intrinsic::trap if a trap funciton is specified.
1319 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1320 !TM.Options.getTrapFunctionName().empty())
1321 return false;
1322 }
1324 // First, try doing target-independent selection.
1325 if (!SkipTargetIndependentISel) {
1326 if (selectOperator(I, I->getOpcode())) {
1327 ++NumFastIselSuccessIndependent;
1328 DbgLoc = DebugLoc();
1329 return true;
1330 }
1331 // Remove dead code.
1332 recomputeInsertPt();
1333 if (SavedInsertPt != FuncInfo.InsertPt)
1334 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1335 SavedInsertPt = FuncInfo.InsertPt;
1336 }
1337 // Next, try calling the target to attempt to handle the instruction.
1338 if (fastSelectInstruction(I)) {
1339 ++NumFastIselSuccessTarget;
1340 DbgLoc = DebugLoc();
1341 return true;
1342 }
1343 // Remove dead code.
1344 recomputeInsertPt();
1345 if (SavedInsertPt != FuncInfo.InsertPt)
1346 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1348 DbgLoc = DebugLoc();
1349 // Undo phi node updates, because they will be added again by SelectionDAG.
1350 if (isa<TerminatorInst>(I))
1351 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1352 return false;
1353 }
1355 /// Emit an unconditional branch to the given block, unless it is the immediate
1356 /// (fall-through) successor, and update the CFG.
1357 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
1358 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1359 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1360 // For more accurate line information if this is the only instruction
1361 // in the block then emit it, otherwise we have the unconditional
1362 // fall-through case, which needs no instructions.
1363 } else {
1364 // The unconditional branch case.
1365 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1366 SmallVector<MachineOperand, 0>(), DbgLoc);
1367 }
1368 uint32_t BranchWeight = 0;
1369 if (FuncInfo.BPI)
1370 BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
1371 MSucc->getBasicBlock());
1372 FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
1373 }
1375 /// Emit an FNeg operation.
1376 bool FastISel::selectFNeg(const User *I) {
1377 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1378 if (!OpReg)
1379 return false;
1380 bool OpRegIsKill = hasTrivialKill(I);
1382 // If the target has ISD::FNEG, use it.
1383 EVT VT = TLI.getValueType(I->getType());
1384 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1385 OpReg, OpRegIsKill);
1386 if (ResultReg) {
1387 updateValueMap(I, ResultReg);
1388 return true;
1389 }
1391 // Bitcast the value to integer, twiddle the sign bit with xor,
1392 // and then bitcast it back to floating-point.
1393 if (VT.getSizeInBits() > 64)
1394 return false;
1395 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1396 if (!TLI.isTypeLegal(IntVT))
1397 return false;
1399 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1400 ISD::BITCAST, OpReg, OpRegIsKill);
1401 if (!IntReg)
1402 return false;
1404 unsigned IntResultReg = fastEmit_ri_(
1405 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1406 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1407 if (!IntResultReg)
1408 return false;
1410 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1411 IntResultReg, /*IsKill=*/true);
1412 if (!ResultReg)
1413 return false;
1415 updateValueMap(I, ResultReg);
1416 return true;
1417 }
1419 bool FastISel::selectExtractValue(const User *U) {
1420 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1421 if (!EVI)
1422 return false;
1424 // Make sure we only try to handle extracts with a legal result. But also
1425 // allow i1 because it's easy.
1426 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
1427 if (!RealVT.isSimple())
1428 return false;
1429 MVT VT = RealVT.getSimpleVT();
1430 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1431 return false;
1433 const Value *Op0 = EVI->getOperand(0);
1434 Type *AggTy = Op0->getType();
1436 // Get the base result register.
1437 unsigned ResultReg;
1438 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1439 if (I != FuncInfo.ValueMap.end())
1440 ResultReg = I->second;
1441 else if (isa<Instruction>(Op0))
1442 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1443 else
1444 return false; // fast-isel can't handle aggregate constants at the moment
1446 // Get the actual result register, which is an offset from the base register.
1447 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1449 SmallVector<EVT, 4> AggValueVTs;
1450 ComputeValueVTs(TLI, AggTy, AggValueVTs);
1452 for (unsigned i = 0; i < VTIndex; i++)
1453 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1455 updateValueMap(EVI, ResultReg);
1456 return true;
1457 }
1459 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1460 switch (Opcode) {
1461 case Instruction::Add:
1462 return selectBinaryOp(I, ISD::ADD);
1463 case Instruction::FAdd:
1464 return selectBinaryOp(I, ISD::FADD);
1465 case Instruction::Sub:
1466 return selectBinaryOp(I, ISD::SUB);
1467 case Instruction::FSub:
1468 // FNeg is currently represented in LLVM IR as a special case of FSub.
1469 if (BinaryOperator::isFNeg(I))
1470 return selectFNeg(I);
1471 return selectBinaryOp(I, ISD::FSUB);
1472 case Instruction::Mul:
1473 return selectBinaryOp(I, ISD::MUL);
1474 case Instruction::FMul:
1475 return selectBinaryOp(I, ISD::FMUL);
1476 case Instruction::SDiv:
1477 return selectBinaryOp(I, ISD::SDIV);
1478 case Instruction::UDiv:
1479 return selectBinaryOp(I, ISD::UDIV);
1480 case Instruction::FDiv:
1481 return selectBinaryOp(I, ISD::FDIV);
1482 case Instruction::SRem:
1483 return selectBinaryOp(I, ISD::SREM);
1484 case Instruction::URem:
1485 return selectBinaryOp(I, ISD::UREM);
1486 case Instruction::FRem:
1487 return selectBinaryOp(I, ISD::FREM);
1488 case Instruction::Shl:
1489 return selectBinaryOp(I, ISD::SHL);
1490 case Instruction::LShr:
1491 return selectBinaryOp(I, ISD::SRL);
1492 case Instruction::AShr:
1493 return selectBinaryOp(I, ISD::SRA);
1494 case Instruction::And:
1495 return selectBinaryOp(I, ISD::AND);
1496 case Instruction::Or:
1497 return selectBinaryOp(I, ISD::OR);
1498 case Instruction::Xor:
1499 return selectBinaryOp(I, ISD::XOR);
1501 case Instruction::GetElementPtr:
1502 return selectGetElementPtr(I);
1504 case Instruction::Br: {
1505 const BranchInst *BI = cast<BranchInst>(I);
1507 if (BI->isUnconditional()) {
1508 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1509 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1510 fastEmitBranch(MSucc, BI->getDebugLoc());
1511 return true;
1512 }
1514 // Conditional branches are not handed yet.
1515 // Halt "fast" selection and bail.
1516 return false;
1517 }
1519 case Instruction::Unreachable:
1520 if (TM.Options.TrapUnreachable)
1521 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1522 else
1523 return true;
1525 case Instruction::Alloca:
1526 // FunctionLowering has the static-sized case covered.
1527 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1528 return true;
1530 // Dynamic-sized alloca is not handled yet.
1531 return false;
1533 case Instruction::Call:
1534 return selectCall(I);
1536 case Instruction::BitCast:
1537 return selectBitCast(I);
1539 case Instruction::FPToSI:
1540 return selectCast(I, ISD::FP_TO_SINT);
1541 case Instruction::ZExt:
1542 return selectCast(I, ISD::ZERO_EXTEND);
1543 case Instruction::SExt:
1544 return selectCast(I, ISD::SIGN_EXTEND);
1545 case Instruction::Trunc:
1546 return selectCast(I, ISD::TRUNCATE);
1547 case Instruction::SIToFP:
1548 return selectCast(I, ISD::SINT_TO_FP);
1550 case Instruction::IntToPtr: // Deliberate fall-through.
1551 case Instruction::PtrToInt: {
1552 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1553 EVT DstVT = TLI.getValueType(I->getType());
1554 if (DstVT.bitsGT(SrcVT))
1555 return selectCast(I, ISD::ZERO_EXTEND);
1556 if (DstVT.bitsLT(SrcVT))
1557 return selectCast(I, ISD::TRUNCATE);
1558 unsigned Reg = getRegForValue(I->getOperand(0));
1559 if (!Reg)
1560 return false;
1561 updateValueMap(I, Reg);
1562 return true;
1563 }
1565 case Instruction::ExtractValue:
1566 return selectExtractValue(I);
1568 case Instruction::PHI:
1569 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1571 default:
1572 // Unhandled instruction. Halt "fast" selection and bail.
1573 return false;
1574 }
1575 }
1577 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1578 const TargetLibraryInfo *LibInfo,
1579 bool SkipTargetIndependentISel)
1580 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1581 MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1582 TM(FuncInfo.MF->getTarget()), DL(*TM.getSubtargetImpl()->getDataLayout()),
1583 TII(*TM.getSubtargetImpl()->getInstrInfo()),
1584 TLI(*TM.getSubtargetImpl()->getTargetLowering()),
1585 TRI(*TM.getSubtargetImpl()->getRegisterInfo()), LibInfo(LibInfo),
1586 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1588 FastISel::~FastISel() {}
1590 bool FastISel::fastLowerArguments() { return false; }
1592 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1594 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1595 return false;
1596 }
1598 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1600 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1601 bool /*Op0IsKill*/) {
1602 return 0;
1603 }
1605 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1606 bool /*Op0IsKill*/, unsigned /*Op1*/,
1607 bool /*Op1IsKill*/) {
1608 return 0;
1609 }
1611 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1612 return 0;
1613 }
1615 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1616 const ConstantFP * /*FPImm*/) {
1617 return 0;
1618 }
1620 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1621 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1622 return 0;
1623 }
1625 unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
1626 bool /*Op0IsKill*/,
1627 const ConstantFP * /*FPImm*/) {
1628 return 0;
1629 }
1631 unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
1632 bool /*Op0IsKill*/, unsigned /*Op1*/,
1633 bool /*Op1IsKill*/, uint64_t /*Imm*/) {
1634 return 0;
1635 }
1637 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1638 /// instruction with an immediate operand using fastEmit_ri.
1639 /// If that fails, it materializes the immediate into a register and try
1640 /// fastEmit_rr instead.
1641 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1642 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1643 // If this is a multiply by a power of two, emit this as a shift left.
1644 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1645 Opcode = ISD::SHL;
1646 Imm = Log2_64(Imm);
1647 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1648 // div x, 8 -> srl x, 3
1649 Opcode = ISD::SRL;
1650 Imm = Log2_64(Imm);
1651 }
1653 // Horrible hack (to be removed), check to make sure shift amounts are
1654 // in-range.
1655 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1656 Imm >= VT.getSizeInBits())
1657 return 0;
1659 // First check if immediate type is legal. If not, we can't use the ri form.
1660 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1661 if (ResultReg)
1662 return ResultReg;
1663 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1664 if (!MaterialReg) {
1665 // This is a bit ugly/slow, but failing here means falling out of
1666 // fast-isel, which would be very slow.
1667 IntegerType *ITy =
1668 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1669 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1670 if (!MaterialReg)
1671 return 0;
1672 }
1673 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg,
1674 /*IsKill=*/true);
1675 }
1677 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1678 return MRI.createVirtualRegister(RC);
1679 }
1681 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1682 unsigned OpNum) {
1683 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1684 const TargetRegisterClass *RegClass =
1685 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1686 if (!MRI.constrainRegClass(Op, RegClass)) {
1687 // If it's not legal to COPY between the register classes, something
1688 // has gone very wrong before we got here.
1689 unsigned NewOp = createResultReg(RegClass);
1690 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1691 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1692 return NewOp;
1693 }
1694 }
1695 return Op;
1696 }
1698 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1699 const TargetRegisterClass *RC) {
1700 unsigned ResultReg = createResultReg(RC);
1701 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1703 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1704 return ResultReg;
1705 }
1707 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1708 const TargetRegisterClass *RC, unsigned Op0,
1709 bool Op0IsKill) {
1710 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1712 unsigned ResultReg = createResultReg(RC);
1713 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1715 if (II.getNumDefs() >= 1)
1716 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1717 .addReg(Op0, getKillRegState(Op0IsKill));
1718 else {
1719 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1720 .addReg(Op0, getKillRegState(Op0IsKill));
1721 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1722 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1723 }
1725 return ResultReg;
1726 }
1728 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1729 const TargetRegisterClass *RC, unsigned Op0,
1730 bool Op0IsKill, unsigned Op1,
1731 bool Op1IsKill) {
1732 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1734 unsigned ResultReg = createResultReg(RC);
1735 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1736 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1738 if (II.getNumDefs() >= 1)
1739 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1740 .addReg(Op0, getKillRegState(Op0IsKill))
1741 .addReg(Op1, getKillRegState(Op1IsKill));
1742 else {
1743 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1744 .addReg(Op0, getKillRegState(Op0IsKill))
1745 .addReg(Op1, getKillRegState(Op1IsKill));
1746 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1747 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1748 }
1749 return ResultReg;
1750 }
1752 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1753 const TargetRegisterClass *RC, unsigned Op0,
1754 bool Op0IsKill, unsigned Op1,
1755 bool Op1IsKill, unsigned Op2,
1756 bool Op2IsKill) {
1757 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1759 unsigned ResultReg = createResultReg(RC);
1760 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1761 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1762 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1764 if (II.getNumDefs() >= 1)
1765 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1766 .addReg(Op0, getKillRegState(Op0IsKill))
1767 .addReg(Op1, getKillRegState(Op1IsKill))
1768 .addReg(Op2, getKillRegState(Op2IsKill));
1769 else {
1770 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1771 .addReg(Op0, getKillRegState(Op0IsKill))
1772 .addReg(Op1, getKillRegState(Op1IsKill))
1773 .addReg(Op2, getKillRegState(Op2IsKill));
1774 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1775 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1776 }
1777 return ResultReg;
1778 }
1780 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1781 const TargetRegisterClass *RC, unsigned Op0,
1782 bool Op0IsKill, uint64_t Imm) {
1783 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1785 unsigned ResultReg = createResultReg(RC);
1786 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1788 if (II.getNumDefs() >= 1)
1789 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1790 .addReg(Op0, getKillRegState(Op0IsKill))
1791 .addImm(Imm);
1792 else {
1793 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1794 .addReg(Op0, getKillRegState(Op0IsKill))
1795 .addImm(Imm);
1796 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1797 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1798 }
1799 return ResultReg;
1800 }
1802 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1803 const TargetRegisterClass *RC, unsigned Op0,
1804 bool Op0IsKill, uint64_t Imm1,
1805 uint64_t Imm2) {
1806 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1808 unsigned ResultReg = createResultReg(RC);
1809 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1811 if (II.getNumDefs() >= 1)
1812 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1813 .addReg(Op0, getKillRegState(Op0IsKill))
1814 .addImm(Imm1)
1815 .addImm(Imm2);
1816 else {
1817 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1818 .addReg(Op0, getKillRegState(Op0IsKill))
1819 .addImm(Imm1)
1820 .addImm(Imm2);
1821 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1822 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1823 }
1824 return ResultReg;
1825 }
1827 unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode,
1828 const TargetRegisterClass *RC, unsigned Op0,
1829 bool Op0IsKill, const ConstantFP *FPImm) {
1830 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1832 unsigned ResultReg = createResultReg(RC);
1833 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1835 if (II.getNumDefs() >= 1)
1836 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1837 .addReg(Op0, getKillRegState(Op0IsKill))
1838 .addFPImm(FPImm);
1839 else {
1840 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1841 .addReg(Op0, getKillRegState(Op0IsKill))
1842 .addFPImm(FPImm);
1843 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1844 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1845 }
1846 return ResultReg;
1847 }
1849 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1850 const TargetRegisterClass *RC, unsigned Op0,
1851 bool Op0IsKill, unsigned Op1,
1852 bool Op1IsKill, uint64_t Imm) {
1853 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1855 unsigned ResultReg = createResultReg(RC);
1856 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1857 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1859 if (II.getNumDefs() >= 1)
1860 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1861 .addReg(Op0, getKillRegState(Op0IsKill))
1862 .addReg(Op1, getKillRegState(Op1IsKill))
1863 .addImm(Imm);
1864 else {
1865 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1866 .addReg(Op0, getKillRegState(Op0IsKill))
1867 .addReg(Op1, getKillRegState(Op1IsKill))
1868 .addImm(Imm);
1869 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1870 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1871 }
1872 return ResultReg;
1873 }
1875 unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode,
1876 const TargetRegisterClass *RC,
1877 unsigned Op0, bool Op0IsKill, unsigned Op1,
1878 bool Op1IsKill, uint64_t Imm1,
1879 uint64_t Imm2) {
1880 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1882 unsigned ResultReg = createResultReg(RC);
1883 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1884 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1886 if (II.getNumDefs() >= 1)
1887 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1888 .addReg(Op0, getKillRegState(Op0IsKill))
1889 .addReg(Op1, getKillRegState(Op1IsKill))
1890 .addImm(Imm1)
1891 .addImm(Imm2);
1892 else {
1893 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1894 .addReg(Op0, getKillRegState(Op0IsKill))
1895 .addReg(Op1, getKillRegState(Op1IsKill))
1896 .addImm(Imm1)
1897 .addImm(Imm2);
1898 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1899 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1900 }
1901 return ResultReg;
1902 }
1904 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1905 const TargetRegisterClass *RC, uint64_t Imm) {
1906 unsigned ResultReg = createResultReg(RC);
1907 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1909 if (II.getNumDefs() >= 1)
1910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1911 .addImm(Imm);
1912 else {
1913 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
1914 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1915 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1916 }
1917 return ResultReg;
1918 }
1920 unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode,
1921 const TargetRegisterClass *RC, uint64_t Imm1,
1922 uint64_t Imm2) {
1923 unsigned ResultReg = createResultReg(RC);
1924 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1926 if (II.getNumDefs() >= 1)
1927 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1928 .addImm(Imm1)
1929 .addImm(Imm2);
1930 else {
1931 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1)
1932 .addImm(Imm2);
1933 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1934 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1935 }
1936 return ResultReg;
1937 }
1939 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
1940 bool Op0IsKill, uint32_t Idx) {
1941 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1942 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1943 "Cannot yet extract from physregs");
1944 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1945 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
1946 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1947 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
1948 return ResultReg;
1949 }
1951 /// Emit MachineInstrs to compute the value of Op with all but the least
1952 /// significant bit set to zero.
1953 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1954 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1955 }
1957 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1958 /// Emit code to ensure constants are copied into registers when needed.
1959 /// Remember the virtual registers that need to be added to the Machine PHI
1960 /// nodes as input. We cannot just directly add them, because expansion
1961 /// might result in multiple MBB's for one BB. As such, the start of the
1962 /// BB might correspond to a different MBB than the end.
1963 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1964 const TerminatorInst *TI = LLVMBB->getTerminator();
1966 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1967 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1969 // Check successor nodes' PHI nodes that expect a constant to be available
1970 // from this block.
1971 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1972 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1973 if (!isa<PHINode>(SuccBB->begin()))
1974 continue;
1975 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1977 // If this terminator has multiple identical successors (common for
1978 // switches), only handle each succ once.
1979 if (!SuccsHandled.insert(SuccMBB))
1980 continue;
1982 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1984 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1985 // nodes and Machine PHI nodes, but the incoming operands have not been
1986 // emitted yet.
1987 for (BasicBlock::const_iterator I = SuccBB->begin();
1988 const auto *PN = dyn_cast<PHINode>(I); ++I) {
1990 // Ignore dead phi's.
1991 if (PN->use_empty())
1992 continue;
1994 // Only handle legal types. Two interesting things to note here. First,
1995 // by bailing out early, we may leave behind some dead instructions,
1996 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1997 // own moves. Second, this check is necessary because FastISel doesn't
1998 // use CreateRegs to create registers, so it always creates
1999 // exactly one register for each non-void instruction.
2000 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
2001 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2002 // Handle integer promotions, though, because they're common and easy.
2003 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
2004 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
2005 else {
2006 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2007 return false;
2008 }
2009 }
2011 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2013 // Set the DebugLoc for the copy. Prefer the location of the operand
2014 // if there is one; use the location of the PHI otherwise.
2015 DbgLoc = PN->getDebugLoc();
2016 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2017 DbgLoc = Inst->getDebugLoc();
2019 unsigned Reg = getRegForValue(PHIOp);
2020 if (!Reg) {
2021 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2022 return false;
2023 }
2024 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
2025 DbgLoc = DebugLoc();
2026 }
2027 }
2029 return true;
2030 }
2032 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2033 assert(LI->hasOneUse() &&
2034 "tryToFoldLoad expected a LoadInst with a single use");
2035 // We know that the load has a single use, but don't know what it is. If it
2036 // isn't one of the folded instructions, then we can't succeed here. Handle
2037 // this by scanning the single-use users of the load until we get to FoldInst.
2038 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2040 const Instruction *TheUser = LI->user_back();
2041 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2042 // Stay in the right block.
2043 TheUser->getParent() == FoldInst->getParent() &&
2044 --MaxUsers) { // Don't scan too far.
2045 // If there are multiple or no uses of this instruction, then bail out.
2046 if (!TheUser->hasOneUse())
2047 return false;
2049 TheUser = TheUser->user_back();
2050 }
2052 // If we didn't find the fold instruction, then we failed to collapse the
2053 // sequence.
2054 if (TheUser != FoldInst)
2055 return false;
2057 // Don't try to fold volatile loads. Target has to deal with alignment
2058 // constraints.
2059 if (LI->isVolatile())
2060 return false;
2062 // Figure out which vreg this is going into. If there is no assigned vreg yet
2063 // then there actually was no reference to it. Perhaps the load is referenced
2064 // by a dead instruction.
2065 unsigned LoadReg = getRegForValue(LI);
2066 if (!LoadReg)
2067 return false;
2069 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2070 // may mean that the instruction got lowered to multiple MIs, or the use of
2071 // the loaded value ended up being multiple operands of the result.
2072 if (!MRI.hasOneUse(LoadReg))
2073 return false;
2075 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2076 MachineInstr *User = RI->getParent();
2078 // Set the insertion point properly. Folding the load can cause generation of
2079 // other random instructions (like sign extends) for addressing modes; make
2080 // sure they get inserted in a logical place before the new instruction.
2081 FuncInfo.InsertPt = User;
2082 FuncInfo.MBB = User->getParent();
2084 // Ask the target to try folding the load.
2085 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2086 }
2088 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2089 // Must be an add.
2090 if (!isa<AddOperator>(Add))
2091 return false;
2092 // Type size needs to match.
2093 if (DL.getTypeSizeInBits(GEP->getType()) !=
2094 DL.getTypeSizeInBits(Add->getType()))
2095 return false;
2096 // Must be in the same basic block.
2097 if (isa<Instruction>(Add) &&
2098 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2099 return false;
2100 // Must have a constant operand.
2101 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2102 }
2104 MachineMemOperand *
2105 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2106 const Value *Ptr;
2107 Type *ValTy;
2108 unsigned Alignment;
2109 unsigned Flags;
2110 bool IsVolatile;
2112 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2113 Alignment = LI->getAlignment();
2114 IsVolatile = LI->isVolatile();
2115 Flags = MachineMemOperand::MOLoad;
2116 Ptr = LI->getPointerOperand();
2117 ValTy = LI->getType();
2118 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2119 Alignment = SI->getAlignment();
2120 IsVolatile = SI->isVolatile();
2121 Flags = MachineMemOperand::MOStore;
2122 Ptr = SI->getPointerOperand();
2123 ValTy = SI->getValueOperand()->getType();
2124 } else
2125 return nullptr;
2127 bool IsNonTemporal = I->getMetadata("nontemporal") != nullptr;
2128 bool IsInvariant = I->getMetadata("invariant.load") != nullptr;
2129 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2131 AAMDNodes AAInfo;
2132 I->getAAMetadata(AAInfo);
2134 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2135 Alignment = DL.getABITypeAlignment(ValTy);
2137 unsigned Size =
2138 TM.getSubtargetImpl()->getDataLayout()->getTypeStoreSize(ValTy);
2140 if (IsVolatile)
2141 Flags |= MachineMemOperand::MOVolatile;
2142 if (IsNonTemporal)
2143 Flags |= MachineMemOperand::MONonTemporal;
2144 if (IsInvariant)
2145 Flags |= MachineMemOperand::MOInvariant;
2147 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2148 Alignment, AAInfo, Ranges);
2149 }
2151 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2152 // If both operands are the same, then try to optimize or fold the cmp.
2153 CmpInst::Predicate Predicate = CI->getPredicate();
2154 if (CI->getOperand(0) != CI->getOperand(1))
2155 return Predicate;
2157 switch (Predicate) {
2158 default: llvm_unreachable("Invalid predicate!");
2159 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2160 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2161 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2162 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2163 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2164 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2165 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2166 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2167 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2168 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2169 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2170 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2171 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2172 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2173 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2174 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2176 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2177 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2178 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2179 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2180 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2181 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2182 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2183 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2184 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2185 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2186 }
2188 return Predicate;
2189 }