1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include <cstdio>
36 using namespace llvm;
38 namespace {
40 class AArch64Operand;
42 class AArch64AsmParser : public MCTargetAsmParser {
43 private:
44 StringRef Mnemonic; ///< Instruction mnemonic.
45 MCSubtargetInfo &STI;
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
53 }
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
67 bool invertCondCode);
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
87 uint64_t &ErrorInfo,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
90 /// {
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
95 /// }
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
110 public:
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
115 };
116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI) {
120 MCAsmParserExtension::Initialize(_Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
127 }
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
139 int64_t &Addend);
140 };
141 } // end anonymous namespace
143 namespace {
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 /// instruction.
147 class AArch64Operand : public MCParsedAsmOperand {
148 private:
149 enum KindTy {
150 k_Immediate,
151 k_ShiftedImm,
152 k_CondCode,
153 k_Register,
154 k_VectorList,
155 k_VectorIndex,
156 k_Token,
157 k_SysReg,
158 k_SysCR,
159 k_Prefetch,
160 k_ShiftExtend,
161 k_FPImm,
162 k_Barrier
163 } Kind;
165 SMLoc StartLoc, EndLoc;
167 struct TokOp {
168 const char *Data;
169 unsigned Length;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
171 };
173 struct RegOp {
174 unsigned RegNum;
175 bool isVector;
176 };
178 struct VectorListOp {
179 unsigned RegNum;
180 unsigned Count;
181 unsigned NumElements;
182 unsigned ElementKind;
183 };
185 struct VectorIndexOp {
186 unsigned Val;
187 };
189 struct ImmOp {
190 const MCExpr *Val;
191 };
193 struct ShiftedImmOp {
194 const MCExpr *Val;
195 unsigned ShiftAmount;
196 };
198 struct CondCodeOp {
199 AArch64CC::CondCode Code;
200 };
202 struct FPImmOp {
203 unsigned Val; // Encoded 8-bit representation.
204 };
206 struct BarrierOp {
207 unsigned Val; // Not the enum since not all values have names.
208 };
210 struct SysRegOp {
211 const char *Data;
212 unsigned Length;
213 uint32_t MRSReg;
214 uint32_t MSRReg;
215 uint32_t PStateField;
216 };
218 struct SysCRImmOp {
219 unsigned Val;
220 };
222 struct PrefetchOp {
223 unsigned Val;
224 };
226 struct ShiftExtendOp {
227 AArch64_AM::ShiftExtendType Type;
228 unsigned Amount;
229 bool HasExplicitAmount;
230 };
232 struct ExtendOp {
233 unsigned Val;
234 };
236 union {
237 struct TokOp Tok;
238 struct RegOp Reg;
239 struct VectorListOp VectorList;
240 struct VectorIndexOp VectorIndex;
241 struct ImmOp Imm;
242 struct ShiftedImmOp ShiftedImm;
243 struct CondCodeOp CondCode;
244 struct FPImmOp FPImm;
245 struct BarrierOp Barrier;
246 struct SysRegOp SysReg;
247 struct SysCRImmOp SysCRImm;
248 struct PrefetchOp Prefetch;
249 struct ShiftExtendOp ShiftExtend;
250 };
252 // Keep the MCContext around as the MCExprs may need manipulated during
253 // the add<>Operands() calls.
254 MCContext &Ctx;
256 public:
257 AArch64Operand(KindTy K, MCContext &_Ctx)
258 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
260 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 Kind = o.Kind;
262 StartLoc = o.StartLoc;
263 EndLoc = o.EndLoc;
264 switch (Kind) {
265 case k_Token:
266 Tok = o.Tok;
267 break;
268 case k_Immediate:
269 Imm = o.Imm;
270 break;
271 case k_ShiftedImm:
272 ShiftedImm = o.ShiftedImm;
273 break;
274 case k_CondCode:
275 CondCode = o.CondCode;
276 break;
277 case k_FPImm:
278 FPImm = o.FPImm;
279 break;
280 case k_Barrier:
281 Barrier = o.Barrier;
282 break;
283 case k_Register:
284 Reg = o.Reg;
285 break;
286 case k_VectorList:
287 VectorList = o.VectorList;
288 break;
289 case k_VectorIndex:
290 VectorIndex = o.VectorIndex;
291 break;
292 case k_SysReg:
293 SysReg = o.SysReg;
294 break;
295 case k_SysCR:
296 SysCRImm = o.SysCRImm;
297 break;
298 case k_Prefetch:
299 Prefetch = o.Prefetch;
300 break;
301 case k_ShiftExtend:
302 ShiftExtend = o.ShiftExtend;
303 break;
304 }
305 }
307 /// getStartLoc - Get the location of the first token of this operand.
308 SMLoc getStartLoc() const override { return StartLoc; }
309 /// getEndLoc - Get the location of the last token of this operand.
310 SMLoc getEndLoc() const override { return EndLoc; }
312 StringRef getToken() const {
313 assert(Kind == k_Token && "Invalid access!");
314 return StringRef(Tok.Data, Tok.Length);
315 }
317 bool isTokenSuffix() const {
318 assert(Kind == k_Token && "Invalid access!");
319 return Tok.IsSuffix;
320 }
322 const MCExpr *getImm() const {
323 assert(Kind == k_Immediate && "Invalid access!");
324 return Imm.Val;
325 }
327 const MCExpr *getShiftedImmVal() const {
328 assert(Kind == k_ShiftedImm && "Invalid access!");
329 return ShiftedImm.Val;
330 }
332 unsigned getShiftedImmShift() const {
333 assert(Kind == k_ShiftedImm && "Invalid access!");
334 return ShiftedImm.ShiftAmount;
335 }
337 AArch64CC::CondCode getCondCode() const {
338 assert(Kind == k_CondCode && "Invalid access!");
339 return CondCode.Code;
340 }
342 unsigned getFPImm() const {
343 assert(Kind == k_FPImm && "Invalid access!");
344 return FPImm.Val;
345 }
347 unsigned getBarrier() const {
348 assert(Kind == k_Barrier && "Invalid access!");
349 return Barrier.Val;
350 }
352 unsigned getReg() const override {
353 assert(Kind == k_Register && "Invalid access!");
354 return Reg.RegNum;
355 }
357 unsigned getVectorListStart() const {
358 assert(Kind == k_VectorList && "Invalid access!");
359 return VectorList.RegNum;
360 }
362 unsigned getVectorListCount() const {
363 assert(Kind == k_VectorList && "Invalid access!");
364 return VectorList.Count;
365 }
367 unsigned getVectorIndex() const {
368 assert(Kind == k_VectorIndex && "Invalid access!");
369 return VectorIndex.Val;
370 }
372 StringRef getSysReg() const {
373 assert(Kind == k_SysReg && "Invalid access!");
374 return StringRef(SysReg.Data, SysReg.Length);
375 }
377 unsigned getSysCR() const {
378 assert(Kind == k_SysCR && "Invalid access!");
379 return SysCRImm.Val;
380 }
382 unsigned getPrefetch() const {
383 assert(Kind == k_Prefetch && "Invalid access!");
384 return Prefetch.Val;
385 }
387 AArch64_AM::ShiftExtendType getShiftExtendType() const {
388 assert(Kind == k_ShiftExtend && "Invalid access!");
389 return ShiftExtend.Type;
390 }
392 unsigned getShiftExtendAmount() const {
393 assert(Kind == k_ShiftExtend && "Invalid access!");
394 return ShiftExtend.Amount;
395 }
397 bool hasShiftExtendAmount() const {
398 assert(Kind == k_ShiftExtend && "Invalid access!");
399 return ShiftExtend.HasExplicitAmount;
400 }
402 bool isImm() const override { return Kind == k_Immediate; }
403 bool isMem() const override { return false; }
404 bool isSImm9() const {
405 if (!isImm())
406 return false;
407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
408 if (!MCE)
409 return false;
410 int64_t Val = MCE->getValue();
411 return (Val >= -256 && Val < 256);
412 }
413 bool isSImm7s4() const {
414 if (!isImm())
415 return false;
416 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
417 if (!MCE)
418 return false;
419 int64_t Val = MCE->getValue();
420 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
421 }
422 bool isSImm7s8() const {
423 if (!isImm())
424 return false;
425 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
426 if (!MCE)
427 return false;
428 int64_t Val = MCE->getValue();
429 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
430 }
431 bool isSImm7s16() const {
432 if (!isImm())
433 return false;
434 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
435 if (!MCE)
436 return false;
437 int64_t Val = MCE->getValue();
438 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
439 }
441 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
442 AArch64MCExpr::VariantKind ELFRefKind;
443 MCSymbolRefExpr::VariantKind DarwinRefKind;
444 int64_t Addend;
445 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
446 Addend)) {
447 // If we don't understand the expression, assume the best and
448 // let the fixup and relocation code deal with it.
449 return true;
450 }
452 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
453 ELFRefKind == AArch64MCExpr::VK_LO12 ||
454 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
455 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
456 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
457 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
458 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
459 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
460 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
461 // Note that we don't range-check the addend. It's adjusted modulo page
462 // size when converted, so there is no "out of range" condition when using
463 // @pageoff.
464 return Addend >= 0 && (Addend % Scale) == 0;
465 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
466 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
467 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
468 return Addend == 0;
469 }
471 return false;
472 }
474 template <int Scale> bool isUImm12Offset() const {
475 if (!isImm())
476 return false;
478 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
479 if (!MCE)
480 return isSymbolicUImm12Offset(getImm(), Scale);
482 int64_t Val = MCE->getValue();
483 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
484 }
486 bool isImm0_7() const {
487 if (!isImm())
488 return false;
489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
490 if (!MCE)
491 return false;
492 int64_t Val = MCE->getValue();
493 return (Val >= 0 && Val < 8);
494 }
495 bool isImm1_8() const {
496 if (!isImm())
497 return false;
498 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
499 if (!MCE)
500 return false;
501 int64_t Val = MCE->getValue();
502 return (Val > 0 && Val < 9);
503 }
504 bool isImm0_15() const {
505 if (!isImm())
506 return false;
507 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
508 if (!MCE)
509 return false;
510 int64_t Val = MCE->getValue();
511 return (Val >= 0 && Val < 16);
512 }
513 bool isImm1_16() const {
514 if (!isImm())
515 return false;
516 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
517 if (!MCE)
518 return false;
519 int64_t Val = MCE->getValue();
520 return (Val > 0 && Val < 17);
521 }
522 bool isImm0_31() const {
523 if (!isImm())
524 return false;
525 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
526 if (!MCE)
527 return false;
528 int64_t Val = MCE->getValue();
529 return (Val >= 0 && Val < 32);
530 }
531 bool isImm1_31() const {
532 if (!isImm())
533 return false;
534 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
535 if (!MCE)
536 return false;
537 int64_t Val = MCE->getValue();
538 return (Val >= 1 && Val < 32);
539 }
540 bool isImm1_32() const {
541 if (!isImm())
542 return false;
543 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 if (!MCE)
545 return false;
546 int64_t Val = MCE->getValue();
547 return (Val >= 1 && Val < 33);
548 }
549 bool isImm0_63() const {
550 if (!isImm())
551 return false;
552 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
553 if (!MCE)
554 return false;
555 int64_t Val = MCE->getValue();
556 return (Val >= 0 && Val < 64);
557 }
558 bool isImm1_63() const {
559 if (!isImm())
560 return false;
561 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
562 if (!MCE)
563 return false;
564 int64_t Val = MCE->getValue();
565 return (Val >= 1 && Val < 64);
566 }
567 bool isImm1_64() const {
568 if (!isImm())
569 return false;
570 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
571 if (!MCE)
572 return false;
573 int64_t Val = MCE->getValue();
574 return (Val >= 1 && Val < 65);
575 }
576 bool isImm0_127() const {
577 if (!isImm())
578 return false;
579 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
580 if (!MCE)
581 return false;
582 int64_t Val = MCE->getValue();
583 return (Val >= 0 && Val < 128);
584 }
585 bool isImm0_255() const {
586 if (!isImm())
587 return false;
588 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
589 if (!MCE)
590 return false;
591 int64_t Val = MCE->getValue();
592 return (Val >= 0 && Val < 256);
593 }
594 bool isImm0_65535() const {
595 if (!isImm())
596 return false;
597 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
598 if (!MCE)
599 return false;
600 int64_t Val = MCE->getValue();
601 return (Val >= 0 && Val < 65536);
602 }
603 bool isImm32_63() const {
604 if (!isImm())
605 return false;
606 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
607 if (!MCE)
608 return false;
609 int64_t Val = MCE->getValue();
610 return (Val >= 32 && Val < 64);
611 }
612 bool isLogicalImm32() const {
613 if (!isImm())
614 return false;
615 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
616 if (!MCE)
617 return false;
618 int64_t Val = MCE->getValue();
619 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
620 return false;
621 Val &= 0xFFFFFFFF;
622 return AArch64_AM::isLogicalImmediate(Val, 32);
623 }
624 bool isLogicalImm64() const {
625 if (!isImm())
626 return false;
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
628 if (!MCE)
629 return false;
630 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
631 }
632 bool isLogicalImm32Not() const {
633 if (!isImm())
634 return false;
635 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
636 if (!MCE)
637 return false;
638 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
639 return AArch64_AM::isLogicalImmediate(Val, 32);
640 }
641 bool isLogicalImm64Not() const {
642 if (!isImm())
643 return false;
644 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
645 if (!MCE)
646 return false;
647 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
648 }
649 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
650 bool isAddSubImm() const {
651 if (!isShiftedImm() && !isImm())
652 return false;
654 const MCExpr *Expr;
656 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
657 if (isShiftedImm()) {
658 unsigned Shift = ShiftedImm.ShiftAmount;
659 Expr = ShiftedImm.Val;
660 if (Shift != 0 && Shift != 12)
661 return false;
662 } else {
663 Expr = getImm();
664 }
666 AArch64MCExpr::VariantKind ELFRefKind;
667 MCSymbolRefExpr::VariantKind DarwinRefKind;
668 int64_t Addend;
669 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
670 DarwinRefKind, Addend)) {
671 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
672 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
673 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
674 || ELFRefKind == AArch64MCExpr::VK_LO12
675 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
676 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
677 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
678 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
679 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
680 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
681 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
682 }
684 // Otherwise it should be a real immediate in range:
685 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
686 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
687 }
688 bool isCondCode() const { return Kind == k_CondCode; }
689 bool isSIMDImmType10() const {
690 if (!isImm())
691 return false;
692 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
693 if (!MCE)
694 return false;
695 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
696 }
697 bool isBranchTarget26() const {
698 if (!isImm())
699 return false;
700 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
701 if (!MCE)
702 return true;
703 int64_t Val = MCE->getValue();
704 if (Val & 0x3)
705 return false;
706 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
707 }
708 bool isPCRelLabel19() const {
709 if (!isImm())
710 return false;
711 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
712 if (!MCE)
713 return true;
714 int64_t Val = MCE->getValue();
715 if (Val & 0x3)
716 return false;
717 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
718 }
719 bool isBranchTarget14() const {
720 if (!isImm())
721 return false;
722 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
723 if (!MCE)
724 return true;
725 int64_t Val = MCE->getValue();
726 if (Val & 0x3)
727 return false;
728 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
729 }
731 bool
732 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
733 if (!isImm())
734 return false;
736 AArch64MCExpr::VariantKind ELFRefKind;
737 MCSymbolRefExpr::VariantKind DarwinRefKind;
738 int64_t Addend;
739 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
740 DarwinRefKind, Addend)) {
741 return false;
742 }
743 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
744 return false;
746 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
747 if (ELFRefKind == AllowedModifiers[i])
748 return Addend == 0;
749 }
751 return false;
752 }
754 bool isMovZSymbolG3() const {
755 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
756 return isMovWSymbol(Variants);
757 }
759 bool isMovZSymbolG2() const {
760 static AArch64MCExpr::VariantKind Variants[] = {
761 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
762 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
763 return isMovWSymbol(Variants);
764 }
766 bool isMovZSymbolG1() const {
767 static AArch64MCExpr::VariantKind Variants[] = {
768 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
769 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
770 AArch64MCExpr::VK_DTPREL_G1,
771 };
772 return isMovWSymbol(Variants);
773 }
775 bool isMovZSymbolG0() const {
776 static AArch64MCExpr::VariantKind Variants[] = {
777 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
778 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
779 return isMovWSymbol(Variants);
780 }
782 bool isMovKSymbolG3() const {
783 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
784 return isMovWSymbol(Variants);
785 }
787 bool isMovKSymbolG2() const {
788 static AArch64MCExpr::VariantKind Variants[] = {
789 AArch64MCExpr::VK_ABS_G2_NC};
790 return isMovWSymbol(Variants);
791 }
793 bool isMovKSymbolG1() const {
794 static AArch64MCExpr::VariantKind Variants[] = {
795 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
796 AArch64MCExpr::VK_DTPREL_G1_NC
797 };
798 return isMovWSymbol(Variants);
799 }
801 bool isMovKSymbolG0() const {
802 static AArch64MCExpr::VariantKind Variants[] = {
803 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
804 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
805 };
806 return isMovWSymbol(Variants);
807 }
809 template<int RegWidth, int Shift>
810 bool isMOVZMovAlias() const {
811 if (!isImm()) return false;
813 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814 if (!CE) return false;
815 uint64_t Value = CE->getValue();
817 if (RegWidth == 32)
818 Value &= 0xffffffffULL;
820 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
821 if (Value == 0 && Shift != 0)
822 return false;
824 return (Value & ~(0xffffULL << Shift)) == 0;
825 }
827 template<int RegWidth, int Shift>
828 bool isMOVNMovAlias() const {
829 if (!isImm()) return false;
831 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
832 if (!CE) return false;
833 uint64_t Value = CE->getValue();
835 // MOVZ takes precedence over MOVN.
836 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
837 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
838 return false;
840 Value = ~Value;
841 if (RegWidth == 32)
842 Value &= 0xffffffffULL;
844 return (Value & ~(0xffffULL << Shift)) == 0;
845 }
847 bool isFPImm() const { return Kind == k_FPImm; }
848 bool isBarrier() const { return Kind == k_Barrier; }
849 bool isSysReg() const { return Kind == k_SysReg; }
850 bool isMRSSystemRegister() const {
851 if (!isSysReg()) return false;
853 return SysReg.MRSReg != -1U;
854 }
855 bool isMSRSystemRegister() const {
856 if (!isSysReg()) return false;
858 return SysReg.MSRReg != -1U;
859 }
860 bool isSystemPStateField() const {
861 if (!isSysReg()) return false;
863 return SysReg.PStateField != -1U;
864 }
865 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
866 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
867 bool isVectorRegLo() const {
868 return Kind == k_Register && Reg.isVector &&
869 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
870 Reg.RegNum);
871 }
872 bool isGPR32as64() const {
873 return Kind == k_Register && !Reg.isVector &&
874 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
875 }
877 bool isGPR64sp0() const {
878 return Kind == k_Register && !Reg.isVector &&
879 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
880 }
882 /// Is this a vector list with the type implicit (presumably attached to the
883 /// instruction itself)?
884 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
885 return Kind == k_VectorList && VectorList.Count == NumRegs &&
886 !VectorList.ElementKind;
887 }
889 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
890 bool isTypedVectorList() const {
891 if (Kind != k_VectorList)
892 return false;
893 if (VectorList.Count != NumRegs)
894 return false;
895 if (VectorList.ElementKind != ElementKind)
896 return false;
897 return VectorList.NumElements == NumElements;
898 }
900 bool isVectorIndex1() const {
901 return Kind == k_VectorIndex && VectorIndex.Val == 1;
902 }
903 bool isVectorIndexB() const {
904 return Kind == k_VectorIndex && VectorIndex.Val < 16;
905 }
906 bool isVectorIndexH() const {
907 return Kind == k_VectorIndex && VectorIndex.Val < 8;
908 }
909 bool isVectorIndexS() const {
910 return Kind == k_VectorIndex && VectorIndex.Val < 4;
911 }
912 bool isVectorIndexD() const {
913 return Kind == k_VectorIndex && VectorIndex.Val < 2;
914 }
915 bool isToken() const override { return Kind == k_Token; }
916 bool isTokenEqual(StringRef Str) const {
917 return Kind == k_Token && getToken() == Str;
918 }
919 bool isSysCR() const { return Kind == k_SysCR; }
920 bool isPrefetch() const { return Kind == k_Prefetch; }
921 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
922 bool isShifter() const {
923 if (!isShiftExtend())
924 return false;
926 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
927 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
928 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
929 ST == AArch64_AM::MSL);
930 }
931 bool isExtend() const {
932 if (!isShiftExtend())
933 return false;
935 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
936 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
937 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
938 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
939 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
940 ET == AArch64_AM::LSL) &&
941 getShiftExtendAmount() <= 4;
942 }
944 bool isExtend64() const {
945 if (!isExtend())
946 return false;
947 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
948 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
949 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
950 }
951 bool isExtendLSL64() const {
952 if (!isExtend())
953 return false;
954 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
955 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
956 ET == AArch64_AM::LSL) &&
957 getShiftExtendAmount() <= 4;
958 }
960 template<int Width> bool isMemXExtend() const {
961 if (!isExtend())
962 return false;
963 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
964 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
965 (getShiftExtendAmount() == Log2_32(Width / 8) ||
966 getShiftExtendAmount() == 0);
967 }
969 template<int Width> bool isMemWExtend() const {
970 if (!isExtend())
971 return false;
972 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
973 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
974 (getShiftExtendAmount() == Log2_32(Width / 8) ||
975 getShiftExtendAmount() == 0);
976 }
978 template <unsigned width>
979 bool isArithmeticShifter() const {
980 if (!isShifter())
981 return false;
983 // An arithmetic shifter is LSL, LSR, or ASR.
984 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
985 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
986 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
987 }
989 template <unsigned width>
990 bool isLogicalShifter() const {
991 if (!isShifter())
992 return false;
994 // A logical shifter is LSL, LSR, ASR or ROR.
995 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
996 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
997 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
998 getShiftExtendAmount() < width;
999 }
1001 bool isMovImm32Shifter() const {
1002 if (!isShifter())
1003 return false;
1005 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1006 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1007 if (ST != AArch64_AM::LSL)
1008 return false;
1009 uint64_t Val = getShiftExtendAmount();
1010 return (Val == 0 || Val == 16);
1011 }
1013 bool isMovImm64Shifter() const {
1014 if (!isShifter())
1015 return false;
1017 // A MOVi shifter is LSL of 0 or 16.
1018 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1019 if (ST != AArch64_AM::LSL)
1020 return false;
1021 uint64_t Val = getShiftExtendAmount();
1022 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1023 }
1025 bool isLogicalVecShifter() const {
1026 if (!isShifter())
1027 return false;
1029 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1030 unsigned Shift = getShiftExtendAmount();
1031 return getShiftExtendType() == AArch64_AM::LSL &&
1032 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1033 }
1035 bool isLogicalVecHalfWordShifter() const {
1036 if (!isLogicalVecShifter())
1037 return false;
1039 // A logical vector shifter is a left shift by 0 or 8.
1040 unsigned Shift = getShiftExtendAmount();
1041 return getShiftExtendType() == AArch64_AM::LSL &&
1042 (Shift == 0 || Shift == 8);
1043 }
1045 bool isMoveVecShifter() const {
1046 if (!isShiftExtend())
1047 return false;
1049 // A logical vector shifter is a left shift by 8 or 16.
1050 unsigned Shift = getShiftExtendAmount();
1051 return getShiftExtendType() == AArch64_AM::MSL &&
1052 (Shift == 8 || Shift == 16);
1053 }
1055 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1056 // to LDUR/STUR when the offset is not legal for the former but is for
1057 // the latter. As such, in addition to checking for being a legal unscaled
1058 // address, also check that it is not a legal scaled address. This avoids
1059 // ambiguity in the matcher.
1060 template<int Width>
1061 bool isSImm9OffsetFB() const {
1062 return isSImm9() && !isUImm12Offset<Width / 8>();
1063 }
1065 bool isAdrpLabel() const {
1066 // Validation was handled during parsing, so we just sanity check that
1067 // something didn't go haywire.
1068 if (!isImm())
1069 return false;
1071 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1072 int64_t Val = CE->getValue();
1073 int64_t Min = - (4096 * (1LL << (21 - 1)));
1074 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1075 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1076 }
1078 return true;
1079 }
1081 bool isAdrLabel() const {
1082 // Validation was handled during parsing, so we just sanity check that
1083 // something didn't go haywire.
1084 if (!isImm())
1085 return false;
1087 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1088 int64_t Val = CE->getValue();
1089 int64_t Min = - (1LL << (21 - 1));
1090 int64_t Max = ((1LL << (21 - 1)) - 1);
1091 return Val >= Min && Val <= Max;
1092 }
1094 return true;
1095 }
1097 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1098 // Add as immediates when possible. Null MCExpr = 0.
1099 if (!Expr)
1100 Inst.addOperand(MCOperand::CreateImm(0));
1101 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1102 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1103 else
1104 Inst.addOperand(MCOperand::CreateExpr(Expr));
1105 }
1107 void addRegOperands(MCInst &Inst, unsigned N) const {
1108 assert(N == 1 && "Invalid number of operands!");
1109 Inst.addOperand(MCOperand::CreateReg(getReg()));
1110 }
1112 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1113 assert(N == 1 && "Invalid number of operands!");
1114 assert(
1115 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1117 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1118 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1119 RI->getEncodingValue(getReg()));
1121 Inst.addOperand(MCOperand::CreateReg(Reg));
1122 }
1124 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1125 assert(N == 1 && "Invalid number of operands!");
1126 assert(
1127 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1128 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1129 }
1131 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1132 assert(N == 1 && "Invalid number of operands!");
1133 assert(
1134 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1135 Inst.addOperand(MCOperand::CreateReg(getReg()));
1136 }
1138 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1139 assert(N == 1 && "Invalid number of operands!");
1140 Inst.addOperand(MCOperand::CreateReg(getReg()));
1141 }
1143 template <unsigned NumRegs>
1144 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1147 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1148 unsigned FirstReg = FirstRegs[NumRegs - 1];
1150 Inst.addOperand(
1151 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1152 }
1154 template <unsigned NumRegs>
1155 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1156 assert(N == 1 && "Invalid number of operands!");
1157 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1158 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1159 unsigned FirstReg = FirstRegs[NumRegs - 1];
1161 Inst.addOperand(
1162 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1163 }
1165 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1166 assert(N == 1 && "Invalid number of operands!");
1167 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1168 }
1170 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1171 assert(N == 1 && "Invalid number of operands!");
1172 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1173 }
1175 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1176 assert(N == 1 && "Invalid number of operands!");
1177 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1178 }
1180 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1183 }
1185 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1187 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1188 }
1190 void addImmOperands(MCInst &Inst, unsigned N) const {
1191 assert(N == 1 && "Invalid number of operands!");
1192 // If this is a pageoff symrefexpr with an addend, adjust the addend
1193 // to be only the page-offset portion. Otherwise, just add the expr
1194 // as-is.
1195 addExpr(Inst, getImm());
1196 }
1198 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1199 assert(N == 2 && "Invalid number of operands!");
1200 if (isShiftedImm()) {
1201 addExpr(Inst, getShiftedImmVal());
1202 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1203 } else {
1204 addExpr(Inst, getImm());
1205 Inst.addOperand(MCOperand::CreateImm(0));
1206 }
1207 }
1209 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1210 assert(N == 1 && "Invalid number of operands!");
1211 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1212 }
1214 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1215 assert(N == 1 && "Invalid number of operands!");
1216 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1217 if (!MCE)
1218 addExpr(Inst, getImm());
1219 else
1220 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1221 }
1223 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1224 addImmOperands(Inst, N);
1225 }
1227 template<int Scale>
1228 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1229 assert(N == 1 && "Invalid number of operands!");
1230 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1232 if (!MCE) {
1233 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1234 return;
1235 }
1236 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1237 }
1239 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1240 assert(N == 1 && "Invalid number of operands!");
1241 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1242 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1243 }
1245 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1246 assert(N == 1 && "Invalid number of operands!");
1247 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1248 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1249 }
1251 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1252 assert(N == 1 && "Invalid number of operands!");
1253 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1254 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1255 }
1257 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1258 assert(N == 1 && "Invalid number of operands!");
1259 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1260 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1261 }
1263 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1264 assert(N == 1 && "Invalid number of operands!");
1265 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1266 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1267 }
1269 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1270 assert(N == 1 && "Invalid number of operands!");
1271 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1272 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1273 }
1275 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1276 assert(N == 1 && "Invalid number of operands!");
1277 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1278 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1279 }
1281 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1282 assert(N == 1 && "Invalid number of operands!");
1283 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1284 assert(MCE && "Invalid constant immediate operand!");
1285 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1286 }
1288 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1289 assert(N == 1 && "Invalid number of operands!");
1290 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1291 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1292 }
1294 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1295 assert(N == 1 && "Invalid number of operands!");
1296 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1297 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1298 }
1300 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1301 assert(N == 1 && "Invalid number of operands!");
1302 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1303 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1304 }
1306 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1307 assert(N == 1 && "Invalid number of operands!");
1308 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1309 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1310 }
1312 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1313 assert(N == 1 && "Invalid number of operands!");
1314 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1315 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1316 }
1318 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1319 assert(N == 1 && "Invalid number of operands!");
1320 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1321 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1322 }
1324 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1325 assert(N == 1 && "Invalid number of operands!");
1326 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1327 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1328 }
1330 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1331 assert(N == 1 && "Invalid number of operands!");
1332 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1333 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1334 }
1336 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1337 assert(N == 1 && "Invalid number of operands!");
1338 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1339 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1340 }
1342 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1343 assert(N == 1 && "Invalid number of operands!");
1344 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1345 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1346 }
1348 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1349 assert(N == 1 && "Invalid number of operands!");
1350 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1351 uint64_t encoding =
1352 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1353 Inst.addOperand(MCOperand::CreateImm(encoding));
1354 }
1356 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1359 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1360 Inst.addOperand(MCOperand::CreateImm(encoding));
1361 }
1363 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1366 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1367 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1368 Inst.addOperand(MCOperand::CreateImm(encoding));
1369 }
1371 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1374 uint64_t encoding =
1375 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1376 Inst.addOperand(MCOperand::CreateImm(encoding));
1377 }
1379 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1380 assert(N == 1 && "Invalid number of operands!");
1381 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1382 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1383 Inst.addOperand(MCOperand::CreateImm(encoding));
1384 }
1386 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1387 // Branch operands don't encode the low bits, so shift them off
1388 // here. If it's a label, however, just put it on directly as there's
1389 // not enough information now to do anything.
1390 assert(N == 1 && "Invalid number of operands!");
1391 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1392 if (!MCE) {
1393 addExpr(Inst, getImm());
1394 return;
1395 }
1396 assert(MCE && "Invalid constant immediate operand!");
1397 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1398 }
1400 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1401 // Branch operands don't encode the low bits, so shift them off
1402 // here. If it's a label, however, just put it on directly as there's
1403 // not enough information now to do anything.
1404 assert(N == 1 && "Invalid number of operands!");
1405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1406 if (!MCE) {
1407 addExpr(Inst, getImm());
1408 return;
1409 }
1410 assert(MCE && "Invalid constant immediate operand!");
1411 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1412 }
1414 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1415 // Branch operands don't encode the low bits, so shift them off
1416 // here. If it's a label, however, just put it on directly as there's
1417 // not enough information now to do anything.
1418 assert(N == 1 && "Invalid number of operands!");
1419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1420 if (!MCE) {
1421 addExpr(Inst, getImm());
1422 return;
1423 }
1424 assert(MCE && "Invalid constant immediate operand!");
1425 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1426 }
1428 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1429 assert(N == 1 && "Invalid number of operands!");
1430 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1431 }
1433 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1434 assert(N == 1 && "Invalid number of operands!");
1435 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1436 }
1438 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1439 assert(N == 1 && "Invalid number of operands!");
1441 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1442 }
1444 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1447 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1448 }
1450 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1451 assert(N == 1 && "Invalid number of operands!");
1453 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1454 }
1456 void addSysCROperands(MCInst &Inst, unsigned N) const {
1457 assert(N == 1 && "Invalid number of operands!");
1458 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1459 }
1461 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1462 assert(N == 1 && "Invalid number of operands!");
1463 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1464 }
1466 void addShifterOperands(MCInst &Inst, unsigned N) const {
1467 assert(N == 1 && "Invalid number of operands!");
1468 unsigned Imm =
1469 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1470 Inst.addOperand(MCOperand::CreateImm(Imm));
1471 }
1473 void addExtendOperands(MCInst &Inst, unsigned N) const {
1474 assert(N == 1 && "Invalid number of operands!");
1475 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1476 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1477 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1478 Inst.addOperand(MCOperand::CreateImm(Imm));
1479 }
1481 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1482 assert(N == 1 && "Invalid number of operands!");
1483 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1484 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1485 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1486 Inst.addOperand(MCOperand::CreateImm(Imm));
1487 }
1489 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1490 assert(N == 2 && "Invalid number of operands!");
1491 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1492 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1493 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1494 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1495 }
1497 // For 8-bit load/store instructions with a register offset, both the
1498 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1499 // they're disambiguated by whether the shift was explicit or implicit rather
1500 // than its size.
1501 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1502 assert(N == 2 && "Invalid number of operands!");
1503 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1504 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1505 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1506 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1507 }
1509 template<int Shift>
1510 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1511 assert(N == 1 && "Invalid number of operands!");
1513 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1514 uint64_t Value = CE->getValue();
1515 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1516 }
1518 template<int Shift>
1519 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1520 assert(N == 1 && "Invalid number of operands!");
1522 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1523 uint64_t Value = CE->getValue();
1524 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1525 }
1527 void print(raw_ostream &OS) const override;
1529 static std::unique_ptr<AArch64Operand>
1530 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1531 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1532 Op->Tok.Data = Str.data();
1533 Op->Tok.Length = Str.size();
1534 Op->Tok.IsSuffix = IsSuffix;
1535 Op->StartLoc = S;
1536 Op->EndLoc = S;
1537 return Op;
1538 }
1540 static std::unique_ptr<AArch64Operand>
1541 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1542 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1543 Op->Reg.RegNum = RegNum;
1544 Op->Reg.isVector = isVector;
1545 Op->StartLoc = S;
1546 Op->EndLoc = E;
1547 return Op;
1548 }
1550 static std::unique_ptr<AArch64Operand>
1551 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1552 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1553 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1554 Op->VectorList.RegNum = RegNum;
1555 Op->VectorList.Count = Count;
1556 Op->VectorList.NumElements = NumElements;
1557 Op->VectorList.ElementKind = ElementKind;
1558 Op->StartLoc = S;
1559 Op->EndLoc = E;
1560 return Op;
1561 }
1563 static std::unique_ptr<AArch64Operand>
1564 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1565 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1566 Op->VectorIndex.Val = Idx;
1567 Op->StartLoc = S;
1568 Op->EndLoc = E;
1569 return Op;
1570 }
1572 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1573 SMLoc E, MCContext &Ctx) {
1574 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1575 Op->Imm.Val = Val;
1576 Op->StartLoc = S;
1577 Op->EndLoc = E;
1578 return Op;
1579 }
1581 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1582 unsigned ShiftAmount,
1583 SMLoc S, SMLoc E,
1584 MCContext &Ctx) {
1585 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1586 Op->ShiftedImm .Val = Val;
1587 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1588 Op->StartLoc = S;
1589 Op->EndLoc = E;
1590 return Op;
1591 }
1593 static std::unique_ptr<AArch64Operand>
1594 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1595 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1596 Op->CondCode.Code = Code;
1597 Op->StartLoc = S;
1598 Op->EndLoc = E;
1599 return Op;
1600 }
1602 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1603 MCContext &Ctx) {
1604 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1605 Op->FPImm.Val = Val;
1606 Op->StartLoc = S;
1607 Op->EndLoc = S;
1608 return Op;
1609 }
1611 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1612 MCContext &Ctx) {
1613 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1614 Op->Barrier.Val = Val;
1615 Op->StartLoc = S;
1616 Op->EndLoc = S;
1617 return Op;
1618 }
1620 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1621 uint32_t MRSReg,
1622 uint32_t MSRReg,
1623 uint32_t PStateField,
1624 MCContext &Ctx) {
1625 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1626 Op->SysReg.Data = Str.data();
1627 Op->SysReg.Length = Str.size();
1628 Op->SysReg.MRSReg = MRSReg;
1629 Op->SysReg.MSRReg = MSRReg;
1630 Op->SysReg.PStateField = PStateField;
1631 Op->StartLoc = S;
1632 Op->EndLoc = S;
1633 return Op;
1634 }
1636 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1637 SMLoc E, MCContext &Ctx) {
1638 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1639 Op->SysCRImm.Val = Val;
1640 Op->StartLoc = S;
1641 Op->EndLoc = E;
1642 return Op;
1643 }
1645 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1646 MCContext &Ctx) {
1647 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1648 Op->Prefetch.Val = Val;
1649 Op->StartLoc = S;
1650 Op->EndLoc = S;
1651 return Op;
1652 }
1654 static std::unique_ptr<AArch64Operand>
1655 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1656 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1657 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1658 Op->ShiftExtend.Type = ShOp;
1659 Op->ShiftExtend.Amount = Val;
1660 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1661 Op->StartLoc = S;
1662 Op->EndLoc = E;
1663 return Op;
1664 }
1665 };
1667 } // end anonymous namespace.
1669 void AArch64Operand::print(raw_ostream &OS) const {
1670 switch (Kind) {
1671 case k_FPImm:
1672 OS << "<fpimm " << getFPImm() << "("
1673 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1674 break;
1675 case k_Barrier: {
1676 bool Valid;
1677 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1678 if (Valid)
1679 OS << "<barrier " << Name << ">";
1680 else
1681 OS << "<barrier invalid #" << getBarrier() << ">";
1682 break;
1683 }
1684 case k_Immediate:
1685 getImm()->print(OS);
1686 break;
1687 case k_ShiftedImm: {
1688 unsigned Shift = getShiftedImmShift();
1689 OS << "<shiftedimm ";
1690 getShiftedImmVal()->print(OS);
1691 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1692 break;
1693 }
1694 case k_CondCode:
1695 OS << "<condcode " << getCondCode() << ">";
1696 break;
1697 case k_Register:
1698 OS << "<register " << getReg() << ">";
1699 break;
1700 case k_VectorList: {
1701 OS << "<vectorlist ";
1702 unsigned Reg = getVectorListStart();
1703 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1704 OS << Reg + i << " ";
1705 OS << ">";
1706 break;
1707 }
1708 case k_VectorIndex:
1709 OS << "<vectorindex " << getVectorIndex() << ">";
1710 break;
1711 case k_SysReg:
1712 OS << "<sysreg: " << getSysReg() << '>';
1713 break;
1714 case k_Token:
1715 OS << "'" << getToken() << "'";
1716 break;
1717 case k_SysCR:
1718 OS << "c" << getSysCR();
1719 break;
1720 case k_Prefetch: {
1721 bool Valid;
1722 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1723 if (Valid)
1724 OS << "<prfop " << Name << ">";
1725 else
1726 OS << "<prfop invalid #" << getPrefetch() << ">";
1727 break;
1728 }
1729 case k_ShiftExtend: {
1730 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1731 << getShiftExtendAmount();
1732 if (!hasShiftExtendAmount())
1733 OS << "<imp>";
1734 OS << '>';
1735 break;
1736 }
1737 }
1738 }
1740 /// @name Auto-generated Match Functions
1741 /// {
1743 static unsigned MatchRegisterName(StringRef Name);
1745 /// }
1747 static unsigned matchVectorRegName(StringRef Name) {
1748 return StringSwitch<unsigned>(Name)
1749 .Case("v0", AArch64::Q0)
1750 .Case("v1", AArch64::Q1)
1751 .Case("v2", AArch64::Q2)
1752 .Case("v3", AArch64::Q3)
1753 .Case("v4", AArch64::Q4)
1754 .Case("v5", AArch64::Q5)
1755 .Case("v6", AArch64::Q6)
1756 .Case("v7", AArch64::Q7)
1757 .Case("v8", AArch64::Q8)
1758 .Case("v9", AArch64::Q9)
1759 .Case("v10", AArch64::Q10)
1760 .Case("v11", AArch64::Q11)
1761 .Case("v12", AArch64::Q12)
1762 .Case("v13", AArch64::Q13)
1763 .Case("v14", AArch64::Q14)
1764 .Case("v15", AArch64::Q15)
1765 .Case("v16", AArch64::Q16)
1766 .Case("v17", AArch64::Q17)
1767 .Case("v18", AArch64::Q18)
1768 .Case("v19", AArch64::Q19)
1769 .Case("v20", AArch64::Q20)
1770 .Case("v21", AArch64::Q21)
1771 .Case("v22", AArch64::Q22)
1772 .Case("v23", AArch64::Q23)
1773 .Case("v24", AArch64::Q24)
1774 .Case("v25", AArch64::Q25)
1775 .Case("v26", AArch64::Q26)
1776 .Case("v27", AArch64::Q27)
1777 .Case("v28", AArch64::Q28)
1778 .Case("v29", AArch64::Q29)
1779 .Case("v30", AArch64::Q30)
1780 .Case("v31", AArch64::Q31)
1781 .Default(0);
1782 }
1784 static bool isValidVectorKind(StringRef Name) {
1785 return StringSwitch<bool>(Name.lower())
1786 .Case(".8b", true)
1787 .Case(".16b", true)
1788 .Case(".4h", true)
1789 .Case(".8h", true)
1790 .Case(".2s", true)
1791 .Case(".4s", true)
1792 .Case(".1d", true)
1793 .Case(".2d", true)
1794 .Case(".1q", true)
1795 // Accept the width neutral ones, too, for verbose syntax. If those
1796 // aren't used in the right places, the token operand won't match so
1797 // all will work out.
1798 .Case(".b", true)
1799 .Case(".h", true)
1800 .Case(".s", true)
1801 .Case(".d", true)
1802 .Default(false);
1803 }
1805 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1806 char &ElementKind) {
1807 assert(isValidVectorKind(Name));
1809 ElementKind = Name.lower()[Name.size() - 1];
1810 NumElements = 0;
1812 if (Name.size() == 2)
1813 return;
1815 // Parse the lane count
1816 Name = Name.drop_front();
1817 while (isdigit(Name.front())) {
1818 NumElements = 10 * NumElements + (Name.front() - '0');
1819 Name = Name.drop_front();
1820 }
1821 }
1823 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1824 SMLoc &EndLoc) {
1825 StartLoc = getLoc();
1826 RegNo = tryParseRegister();
1827 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1828 return (RegNo == (unsigned)-1);
1829 }
1831 // Matches a register name or register alias previously defined by '.req'
1832 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1833 bool isVector) {
1834 unsigned RegNum = isVector ? matchVectorRegName(Name)
1835 : MatchRegisterName(Name);
1837 if (RegNum == 0) {
1838 // Check for aliases registered via .req. Canonicalize to lower case.
1839 // That's more consistent since register names are case insensitive, and
1840 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1841 auto Entry = RegisterReqs.find(Name.lower());
1842 if (Entry == RegisterReqs.end())
1843 return 0;
1844 // set RegNum if the match is the right kind of register
1845 if (isVector == Entry->getValue().first)
1846 RegNum = Entry->getValue().second;
1847 }
1848 return RegNum;
1849 }
1851 /// tryParseRegister - Try to parse a register name. The token must be an
1852 /// Identifier when called, and if it is a register name the token is eaten and
1853 /// the register is added to the operand list.
1854 int AArch64AsmParser::tryParseRegister() {
1855 MCAsmParser &Parser = getParser();
1856 const AsmToken &Tok = Parser.getTok();
1857 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1859 std::string lowerCase = Tok.getString().lower();
1860 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1861 // Also handle a few aliases of registers.
1862 if (RegNum == 0)
1863 RegNum = StringSwitch<unsigned>(lowerCase)
1864 .Case("fp", AArch64::FP)
1865 .Case("lr", AArch64::LR)
1866 .Case("x31", AArch64::XZR)
1867 .Case("w31", AArch64::WZR)
1868 .Default(0);
1870 if (RegNum == 0)
1871 return -1;
1873 Parser.Lex(); // Eat identifier token.
1874 return RegNum;
1875 }
1877 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1878 /// kind specifier. If it is a register specifier, eat the token and return it.
1879 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1880 MCAsmParser &Parser = getParser();
1881 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1882 TokError("vector register expected");
1883 return -1;
1884 }
1886 StringRef Name = Parser.getTok().getString();
1887 // If there is a kind specifier, it's separated from the register name by
1888 // a '.'.
1889 size_t Start = 0, Next = Name.find('.');
1890 StringRef Head = Name.slice(Start, Next);
1891 unsigned RegNum = matchRegisterNameAlias(Head, true);
1893 if (RegNum) {
1894 if (Next != StringRef::npos) {
1895 Kind = Name.slice(Next, StringRef::npos);
1896 if (!isValidVectorKind(Kind)) {
1897 TokError("invalid vector kind qualifier");
1898 return -1;
1899 }
1900 }
1901 Parser.Lex(); // Eat the register token.
1902 return RegNum;
1903 }
1905 if (expected)
1906 TokError("vector register expected");
1907 return -1;
1908 }
1910 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1911 AArch64AsmParser::OperandMatchResultTy
1912 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1913 MCAsmParser &Parser = getParser();
1914 SMLoc S = getLoc();
1916 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1917 Error(S, "Expected cN operand where 0 <= N <= 15");
1918 return MatchOperand_ParseFail;
1919 }
1921 StringRef Tok = Parser.getTok().getIdentifier();
1922 if (Tok[0] != 'c' && Tok[0] != 'C') {
1923 Error(S, "Expected cN operand where 0 <= N <= 15");
1924 return MatchOperand_ParseFail;
1925 }
1927 uint32_t CRNum;
1928 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1929 if (BadNum || CRNum > 15) {
1930 Error(S, "Expected cN operand where 0 <= N <= 15");
1931 return MatchOperand_ParseFail;
1932 }
1934 Parser.Lex(); // Eat identifier token.
1935 Operands.push_back(
1936 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1937 return MatchOperand_Success;
1938 }
1940 /// tryParsePrefetch - Try to parse a prefetch operand.
1941 AArch64AsmParser::OperandMatchResultTy
1942 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1943 MCAsmParser &Parser = getParser();
1944 SMLoc S = getLoc();
1945 const AsmToken &Tok = Parser.getTok();
1946 // Either an identifier for named values or a 5-bit immediate.
1947 bool Hash = Tok.is(AsmToken::Hash);
1948 if (Hash || Tok.is(AsmToken::Integer)) {
1949 if (Hash)
1950 Parser.Lex(); // Eat hash token.
1951 const MCExpr *ImmVal;
1952 if (getParser().parseExpression(ImmVal))
1953 return MatchOperand_ParseFail;
1955 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1956 if (!MCE) {
1957 TokError("immediate value expected for prefetch operand");
1958 return MatchOperand_ParseFail;
1959 }
1960 unsigned prfop = MCE->getValue();
1961 if (prfop > 31) {
1962 TokError("prefetch operand out of range, [0,31] expected");
1963 return MatchOperand_ParseFail;
1964 }
1966 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1967 return MatchOperand_Success;
1968 }
1970 if (Tok.isNot(AsmToken::Identifier)) {
1971 TokError("pre-fetch hint expected");
1972 return MatchOperand_ParseFail;
1973 }
1975 bool Valid;
1976 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1977 if (!Valid) {
1978 TokError("pre-fetch hint expected");
1979 return MatchOperand_ParseFail;
1980 }
1982 Parser.Lex(); // Eat identifier token.
1983 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1984 return MatchOperand_Success;
1985 }
1987 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1988 /// instruction.
1989 AArch64AsmParser::OperandMatchResultTy
1990 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1991 MCAsmParser &Parser = getParser();
1992 SMLoc S = getLoc();
1993 const MCExpr *Expr;
1995 if (Parser.getTok().is(AsmToken::Hash)) {
1996 Parser.Lex(); // Eat hash token.
1997 }
1999 if (parseSymbolicImmVal(Expr))
2000 return MatchOperand_ParseFail;
2002 AArch64MCExpr::VariantKind ELFRefKind;
2003 MCSymbolRefExpr::VariantKind DarwinRefKind;
2004 int64_t Addend;
2005 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2006 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2007 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2008 // No modifier was specified at all; this is the syntax for an ELF basic
2009 // ADRP relocation (unfortunately).
2010 Expr =
2011 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2012 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2013 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2014 Addend != 0) {
2015 Error(S, "gotpage label reference not allowed an addend");
2016 return MatchOperand_ParseFail;
2017 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2018 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2019 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2020 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2021 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2022 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2023 // The operand must be an @page or @gotpage qualified symbolref.
2024 Error(S, "page or gotpage label reference expected");
2025 return MatchOperand_ParseFail;
2026 }
2027 }
2029 // We have either a label reference possibly with addend or an immediate. The
2030 // addend is a raw value here. The linker will adjust it to only reference the
2031 // page.
2032 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2033 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2035 return MatchOperand_Success;
2036 }
2038 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2039 /// instruction.
2040 AArch64AsmParser::OperandMatchResultTy
2041 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2042 MCAsmParser &Parser = getParser();
2043 SMLoc S = getLoc();
2044 const MCExpr *Expr;
2046 if (Parser.getTok().is(AsmToken::Hash)) {
2047 Parser.Lex(); // Eat hash token.
2048 }
2050 if (getParser().parseExpression(Expr))
2051 return MatchOperand_ParseFail;
2053 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2054 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2056 return MatchOperand_Success;
2057 }
2059 /// tryParseFPImm - A floating point immediate expression operand.
2060 AArch64AsmParser::OperandMatchResultTy
2061 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2062 MCAsmParser &Parser = getParser();
2063 SMLoc S = getLoc();
2065 bool Hash = false;
2066 if (Parser.getTok().is(AsmToken::Hash)) {
2067 Parser.Lex(); // Eat '#'
2068 Hash = true;
2069 }
2071 // Handle negation, as that still comes through as a separate token.
2072 bool isNegative = false;
2073 if (Parser.getTok().is(AsmToken::Minus)) {
2074 isNegative = true;
2075 Parser.Lex();
2076 }
2077 const AsmToken &Tok = Parser.getTok();
2078 if (Tok.is(AsmToken::Real)) {
2079 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2080 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2081 // If we had a '-' in front, toggle the sign bit.
2082 IntVal ^= (uint64_t)isNegative << 63;
2083 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2084 Parser.Lex(); // Eat the token.
2085 // Check for out of range values. As an exception, we let Zero through,
2086 // as we handle that special case in post-processing before matching in
2087 // order to use the zero register for it.
2088 if (Val == -1 && !RealVal.isZero()) {
2089 TokError("expected compatible register or floating-point constant");
2090 return MatchOperand_ParseFail;
2091 }
2092 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2093 return MatchOperand_Success;
2094 }
2095 if (Tok.is(AsmToken::Integer)) {
2096 int64_t Val;
2097 if (!isNegative && Tok.getString().startswith("0x")) {
2098 Val = Tok.getIntVal();
2099 if (Val > 255 || Val < 0) {
2100 TokError("encoded floating point value out of range");
2101 return MatchOperand_ParseFail;
2102 }
2103 } else {
2104 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2105 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2106 // If we had a '-' in front, toggle the sign bit.
2107 IntVal ^= (uint64_t)isNegative << 63;
2108 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2109 }
2110 Parser.Lex(); // Eat the token.
2111 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2112 return MatchOperand_Success;
2113 }
2115 if (!Hash)
2116 return MatchOperand_NoMatch;
2118 TokError("invalid floating point immediate");
2119 return MatchOperand_ParseFail;
2120 }
2122 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2123 AArch64AsmParser::OperandMatchResultTy
2124 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2125 MCAsmParser &Parser = getParser();
2126 SMLoc S = getLoc();
2128 if (Parser.getTok().is(AsmToken::Hash))
2129 Parser.Lex(); // Eat '#'
2130 else if (Parser.getTok().isNot(AsmToken::Integer))
2131 // Operand should start from # or should be integer, emit error otherwise.
2132 return MatchOperand_NoMatch;
2134 const MCExpr *Imm;
2135 if (parseSymbolicImmVal(Imm))
2136 return MatchOperand_ParseFail;
2137 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2138 uint64_t ShiftAmount = 0;
2139 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2140 if (MCE) {
2141 int64_t Val = MCE->getValue();
2142 if (Val > 0xfff && (Val & 0xfff) == 0) {
2143 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2144 ShiftAmount = 12;
2145 }
2146 }
2147 SMLoc E = Parser.getTok().getLoc();
2148 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2149 getContext()));
2150 return MatchOperand_Success;
2151 }
2153 // Eat ','
2154 Parser.Lex();
2156 // The optional operand must be "lsl #N" where N is non-negative.
2157 if (!Parser.getTok().is(AsmToken::Identifier) ||
2158 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2159 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2160 return MatchOperand_ParseFail;
2161 }
2163 // Eat 'lsl'
2164 Parser.Lex();
2166 if (Parser.getTok().is(AsmToken::Hash)) {
2167 Parser.Lex();
2168 }
2170 if (Parser.getTok().isNot(AsmToken::Integer)) {
2171 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2172 return MatchOperand_ParseFail;
2173 }
2175 int64_t ShiftAmount = Parser.getTok().getIntVal();
2177 if (ShiftAmount < 0) {
2178 Error(Parser.getTok().getLoc(), "positive shift amount required");
2179 return MatchOperand_ParseFail;
2180 }
2181 Parser.Lex(); // Eat the number
2183 SMLoc E = Parser.getTok().getLoc();
2184 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2185 S, E, getContext()));
2186 return MatchOperand_Success;
2187 }
2189 /// parseCondCodeString - Parse a Condition Code string.
2190 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2191 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2192 .Case("eq", AArch64CC::EQ)
2193 .Case("ne", AArch64CC::NE)
2194 .Case("cs", AArch64CC::HS)
2195 .Case("hs", AArch64CC::HS)
2196 .Case("cc", AArch64CC::LO)
2197 .Case("lo", AArch64CC::LO)
2198 .Case("mi", AArch64CC::MI)
2199 .Case("pl", AArch64CC::PL)
2200 .Case("vs", AArch64CC::VS)
2201 .Case("vc", AArch64CC::VC)
2202 .Case("hi", AArch64CC::HI)
2203 .Case("ls", AArch64CC::LS)
2204 .Case("ge", AArch64CC::GE)
2205 .Case("lt", AArch64CC::LT)
2206 .Case("gt", AArch64CC::GT)
2207 .Case("le", AArch64CC::LE)
2208 .Case("al", AArch64CC::AL)
2209 .Case("nv", AArch64CC::NV)
2210 .Default(AArch64CC::Invalid);
2211 return CC;
2212 }
2214 /// parseCondCode - Parse a Condition Code operand.
2215 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2216 bool invertCondCode) {
2217 MCAsmParser &Parser = getParser();
2218 SMLoc S = getLoc();
2219 const AsmToken &Tok = Parser.getTok();
2220 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2222 StringRef Cond = Tok.getString();
2223 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2224 if (CC == AArch64CC::Invalid)
2225 return TokError("invalid condition code");
2226 Parser.Lex(); // Eat identifier token.
2228 if (invertCondCode) {
2229 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2230 return TokError("condition codes AL and NV are invalid for this instruction");
2231 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2232 }
2234 Operands.push_back(
2235 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2236 return false;
2237 }
2239 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2240 /// them if present.
2241 AArch64AsmParser::OperandMatchResultTy
2242 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2243 MCAsmParser &Parser = getParser();
2244 const AsmToken &Tok = Parser.getTok();
2245 std::string LowerID = Tok.getString().lower();
2246 AArch64_AM::ShiftExtendType ShOp =
2247 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2248 .Case("lsl", AArch64_AM::LSL)
2249 .Case("lsr", AArch64_AM::LSR)
2250 .Case("asr", AArch64_AM::ASR)
2251 .Case("ror", AArch64_AM::ROR)
2252 .Case("msl", AArch64_AM::MSL)
2253 .Case("uxtb", AArch64_AM::UXTB)
2254 .Case("uxth", AArch64_AM::UXTH)
2255 .Case("uxtw", AArch64_AM::UXTW)
2256 .Case("uxtx", AArch64_AM::UXTX)
2257 .Case("sxtb", AArch64_AM::SXTB)
2258 .Case("sxth", AArch64_AM::SXTH)
2259 .Case("sxtw", AArch64_AM::SXTW)
2260 .Case("sxtx", AArch64_AM::SXTX)
2261 .Default(AArch64_AM::InvalidShiftExtend);
2263 if (ShOp == AArch64_AM::InvalidShiftExtend)
2264 return MatchOperand_NoMatch;
2266 SMLoc S = Tok.getLoc();
2267 Parser.Lex();
2269 bool Hash = getLexer().is(AsmToken::Hash);
2270 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2271 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2272 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2273 ShOp == AArch64_AM::MSL) {
2274 // We expect a number here.
2275 TokError("expected #imm after shift specifier");
2276 return MatchOperand_ParseFail;
2277 }
2279 // "extend" type operatoins don't need an immediate, #0 is implicit.
2280 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2281 Operands.push_back(
2282 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2283 return MatchOperand_Success;
2284 }
2286 if (Hash)
2287 Parser.Lex(); // Eat the '#'.
2289 // Make sure we do actually have a number or a parenthesized expression.
2290 SMLoc E = Parser.getTok().getLoc();
2291 if (!Parser.getTok().is(AsmToken::Integer) &&
2292 !Parser.getTok().is(AsmToken::LParen)) {
2293 Error(E, "expected integer shift amount");
2294 return MatchOperand_ParseFail;
2295 }
2297 const MCExpr *ImmVal;
2298 if (getParser().parseExpression(ImmVal))
2299 return MatchOperand_ParseFail;
2301 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2302 if (!MCE) {
2303 Error(E, "expected constant '#imm' after shift specifier");
2304 return MatchOperand_ParseFail;
2305 }
2307 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2308 Operands.push_back(AArch64Operand::CreateShiftExtend(
2309 ShOp, MCE->getValue(), true, S, E, getContext()));
2310 return MatchOperand_Success;
2311 }
2313 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2314 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2315 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2316 OperandVector &Operands) {
2317 if (Name.find('.') != StringRef::npos)
2318 return TokError("invalid operand");
2320 Mnemonic = Name;
2321 Operands.push_back(
2322 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2324 MCAsmParser &Parser = getParser();
2325 const AsmToken &Tok = Parser.getTok();
2326 StringRef Op = Tok.getString();
2327 SMLoc S = Tok.getLoc();
2329 const MCExpr *Expr = nullptr;
2331 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2332 do { \
2333 Expr = MCConstantExpr::Create(op1, getContext()); \
2334 Operands.push_back( \
2335 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2336 Operands.push_back( \
2337 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2338 Operands.push_back( \
2339 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2340 Expr = MCConstantExpr::Create(op2, getContext()); \
2341 Operands.push_back( \
2342 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2343 } while (0)
2345 if (Mnemonic == "ic") {
2346 if (!Op.compare_lower("ialluis")) {
2347 // SYS #0, C7, C1, #0
2348 SYS_ALIAS(0, 7, 1, 0);
2349 } else if (!Op.compare_lower("iallu")) {
2350 // SYS #0, C7, C5, #0
2351 SYS_ALIAS(0, 7, 5, 0);
2352 } else if (!Op.compare_lower("ivau")) {
2353 // SYS #3, C7, C5, #1
2354 SYS_ALIAS(3, 7, 5, 1);
2355 } else {
2356 return TokError("invalid operand for IC instruction");
2357 }
2358 } else if (Mnemonic == "dc") {
2359 if (!Op.compare_lower("zva")) {
2360 // SYS #3, C7, C4, #1
2361 SYS_ALIAS(3, 7, 4, 1);
2362 } else if (!Op.compare_lower("ivac")) {
2363 // SYS #3, C7, C6, #1
2364 SYS_ALIAS(0, 7, 6, 1);
2365 } else if (!Op.compare_lower("isw")) {
2366 // SYS #0, C7, C6, #2
2367 SYS_ALIAS(0, 7, 6, 2);
2368 } else if (!Op.compare_lower("cvac")) {
2369 // SYS #3, C7, C10, #1
2370 SYS_ALIAS(3, 7, 10, 1);
2371 } else if (!Op.compare_lower("csw")) {
2372 // SYS #0, C7, C10, #2
2373 SYS_ALIAS(0, 7, 10, 2);
2374 } else if (!Op.compare_lower("cvau")) {
2375 // SYS #3, C7, C11, #1
2376 SYS_ALIAS(3, 7, 11, 1);
2377 } else if (!Op.compare_lower("civac")) {
2378 // SYS #3, C7, C14, #1
2379 SYS_ALIAS(3, 7, 14, 1);
2380 } else if (!Op.compare_lower("cisw")) {
2381 // SYS #0, C7, C14, #2
2382 SYS_ALIAS(0, 7, 14, 2);
2383 } else {
2384 return TokError("invalid operand for DC instruction");
2385 }
2386 } else if (Mnemonic == "at") {
2387 if (!Op.compare_lower("s1e1r")) {
2388 // SYS #0, C7, C8, #0
2389 SYS_ALIAS(0, 7, 8, 0);
2390 } else if (!Op.compare_lower("s1e2r")) {
2391 // SYS #4, C7, C8, #0
2392 SYS_ALIAS(4, 7, 8, 0);
2393 } else if (!Op.compare_lower("s1e3r")) {
2394 // SYS #6, C7, C8, #0
2395 SYS_ALIAS(6, 7, 8, 0);
2396 } else if (!Op.compare_lower("s1e1w")) {
2397 // SYS #0, C7, C8, #1
2398 SYS_ALIAS(0, 7, 8, 1);
2399 } else if (!Op.compare_lower("s1e2w")) {
2400 // SYS #4, C7, C8, #1
2401 SYS_ALIAS(4, 7, 8, 1);
2402 } else if (!Op.compare_lower("s1e3w")) {
2403 // SYS #6, C7, C8, #1
2404 SYS_ALIAS(6, 7, 8, 1);
2405 } else if (!Op.compare_lower("s1e0r")) {
2406 // SYS #0, C7, C8, #3
2407 SYS_ALIAS(0, 7, 8, 2);
2408 } else if (!Op.compare_lower("s1e0w")) {
2409 // SYS #0, C7, C8, #3
2410 SYS_ALIAS(0, 7, 8, 3);
2411 } else if (!Op.compare_lower("s12e1r")) {
2412 // SYS #4, C7, C8, #4
2413 SYS_ALIAS(4, 7, 8, 4);
2414 } else if (!Op.compare_lower("s12e1w")) {
2415 // SYS #4, C7, C8, #5
2416 SYS_ALIAS(4, 7, 8, 5);
2417 } else if (!Op.compare_lower("s12e0r")) {
2418 // SYS #4, C7, C8, #6
2419 SYS_ALIAS(4, 7, 8, 6);
2420 } else if (!Op.compare_lower("s12e0w")) {
2421 // SYS #4, C7, C8, #7
2422 SYS_ALIAS(4, 7, 8, 7);
2423 } else {
2424 return TokError("invalid operand for AT instruction");
2425 }
2426 } else if (Mnemonic == "tlbi") {
2427 if (!Op.compare_lower("vmalle1is")) {
2428 // SYS #0, C8, C3, #0
2429 SYS_ALIAS(0, 8, 3, 0);
2430 } else if (!Op.compare_lower("alle2is")) {
2431 // SYS #4, C8, C3, #0
2432 SYS_ALIAS(4, 8, 3, 0);
2433 } else if (!Op.compare_lower("alle3is")) {
2434 // SYS #6, C8, C3, #0
2435 SYS_ALIAS(6, 8, 3, 0);
2436 } else if (!Op.compare_lower("vae1is")) {
2437 // SYS #0, C8, C3, #1
2438 SYS_ALIAS(0, 8, 3, 1);
2439 } else if (!Op.compare_lower("vae2is")) {
2440 // SYS #4, C8, C3, #1
2441 SYS_ALIAS(4, 8, 3, 1);
2442 } else if (!Op.compare_lower("vae3is")) {
2443 // SYS #6, C8, C3, #1
2444 SYS_ALIAS(6, 8, 3, 1);
2445 } else if (!Op.compare_lower("aside1is")) {
2446 // SYS #0, C8, C3, #2
2447 SYS_ALIAS(0, 8, 3, 2);
2448 } else if (!Op.compare_lower("vaae1is")) {
2449 // SYS #0, C8, C3, #3
2450 SYS_ALIAS(0, 8, 3, 3);
2451 } else if (!Op.compare_lower("alle1is")) {
2452 // SYS #4, C8, C3, #4
2453 SYS_ALIAS(4, 8, 3, 4);
2454 } else if (!Op.compare_lower("vale1is")) {
2455 // SYS #0, C8, C3, #5
2456 SYS_ALIAS(0, 8, 3, 5);
2457 } else if (!Op.compare_lower("vaale1is")) {
2458 // SYS #0, C8, C3, #7
2459 SYS_ALIAS(0, 8, 3, 7);
2460 } else if (!Op.compare_lower("vmalle1")) {
2461 // SYS #0, C8, C7, #0
2462 SYS_ALIAS(0, 8, 7, 0);
2463 } else if (!Op.compare_lower("alle2")) {
2464 // SYS #4, C8, C7, #0
2465 SYS_ALIAS(4, 8, 7, 0);
2466 } else if (!Op.compare_lower("vale2is")) {
2467 // SYS #4, C8, C3, #5
2468 SYS_ALIAS(4, 8, 3, 5);
2469 } else if (!Op.compare_lower("vale3is")) {
2470 // SYS #6, C8, C3, #5
2471 SYS_ALIAS(6, 8, 3, 5);
2472 } else if (!Op.compare_lower("alle3")) {
2473 // SYS #6, C8, C7, #0
2474 SYS_ALIAS(6, 8, 7, 0);
2475 } else if (!Op.compare_lower("vae1")) {
2476 // SYS #0, C8, C7, #1
2477 SYS_ALIAS(0, 8, 7, 1);
2478 } else if (!Op.compare_lower("vae2")) {
2479 // SYS #4, C8, C7, #1
2480 SYS_ALIAS(4, 8, 7, 1);
2481 } else if (!Op.compare_lower("vae3")) {
2482 // SYS #6, C8, C7, #1
2483 SYS_ALIAS(6, 8, 7, 1);
2484 } else if (!Op.compare_lower("aside1")) {
2485 // SYS #0, C8, C7, #2
2486 SYS_ALIAS(0, 8, 7, 2);
2487 } else if (!Op.compare_lower("vaae1")) {
2488 // SYS #0, C8, C7, #3
2489 SYS_ALIAS(0, 8, 7, 3);
2490 } else if (!Op.compare_lower("alle1")) {
2491 // SYS #4, C8, C7, #4
2492 SYS_ALIAS(4, 8, 7, 4);
2493 } else if (!Op.compare_lower("vale1")) {
2494 // SYS #0, C8, C7, #5
2495 SYS_ALIAS(0, 8, 7, 5);
2496 } else if (!Op.compare_lower("vale2")) {
2497 // SYS #4, C8, C7, #5
2498 SYS_ALIAS(4, 8, 7, 5);
2499 } else if (!Op.compare_lower("vale3")) {
2500 // SYS #6, C8, C7, #5
2501 SYS_ALIAS(6, 8, 7, 5);
2502 } else if (!Op.compare_lower("vaale1")) {
2503 // SYS #0, C8, C7, #7
2504 SYS_ALIAS(0, 8, 7, 7);
2505 } else if (!Op.compare_lower("ipas2e1")) {
2506 // SYS #4, C8, C4, #1
2507 SYS_ALIAS(4, 8, 4, 1);
2508 } else if (!Op.compare_lower("ipas2le1")) {
2509 // SYS #4, C8, C4, #5
2510 SYS_ALIAS(4, 8, 4, 5);
2511 } else if (!Op.compare_lower("ipas2e1is")) {
2512 // SYS #4, C8, C4, #1
2513 SYS_ALIAS(4, 8, 0, 1);
2514 } else if (!Op.compare_lower("ipas2le1is")) {
2515 // SYS #4, C8, C4, #5
2516 SYS_ALIAS(4, 8, 0, 5);
2517 } else if (!Op.compare_lower("vmalls12e1")) {
2518 // SYS #4, C8, C7, #6
2519 SYS_ALIAS(4, 8, 7, 6);
2520 } else if (!Op.compare_lower("vmalls12e1is")) {
2521 // SYS #4, C8, C3, #6
2522 SYS_ALIAS(4, 8, 3, 6);
2523 } else {
2524 return TokError("invalid operand for TLBI instruction");
2525 }
2526 }
2528 #undef SYS_ALIAS
2530 Parser.Lex(); // Eat operand.
2532 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2533 bool HasRegister = false;
2535 // Check for the optional register operand.
2536 if (getLexer().is(AsmToken::Comma)) {
2537 Parser.Lex(); // Eat comma.
2539 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2540 return TokError("expected register operand");
2542 HasRegister = true;
2543 }
2545 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2546 Parser.eatToEndOfStatement();
2547 return TokError("unexpected token in argument list");
2548 }
2550 if (ExpectRegister && !HasRegister) {
2551 return TokError("specified " + Mnemonic + " op requires a register");
2552 }
2553 else if (!ExpectRegister && HasRegister) {
2554 return TokError("specified " + Mnemonic + " op does not use a register");
2555 }
2557 Parser.Lex(); // Consume the EndOfStatement
2558 return false;
2559 }
2561 AArch64AsmParser::OperandMatchResultTy
2562 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2563 MCAsmParser &Parser = getParser();
2564 const AsmToken &Tok = Parser.getTok();
2566 // Can be either a #imm style literal or an option name
2567 bool Hash = Tok.is(AsmToken::Hash);
2568 if (Hash || Tok.is(AsmToken::Integer)) {
2569 // Immediate operand.
2570 if (Hash)
2571 Parser.Lex(); // Eat the '#'
2572 const MCExpr *ImmVal;
2573 SMLoc ExprLoc = getLoc();
2574 if (getParser().parseExpression(ImmVal))
2575 return MatchOperand_ParseFail;
2576 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2577 if (!MCE) {
2578 Error(ExprLoc, "immediate value expected for barrier operand");
2579 return MatchOperand_ParseFail;
2580 }
2581 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2582 Error(ExprLoc, "barrier operand out of range");
2583 return MatchOperand_ParseFail;
2584 }
2585 Operands.push_back(
2586 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2587 return MatchOperand_Success;
2588 }
2590 if (Tok.isNot(AsmToken::Identifier)) {
2591 TokError("invalid operand for instruction");
2592 return MatchOperand_ParseFail;
2593 }
2595 bool Valid;
2596 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2597 if (!Valid) {
2598 TokError("invalid barrier option name");
2599 return MatchOperand_ParseFail;
2600 }
2602 // The only valid named option for ISB is 'sy'
2603 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2604 TokError("'sy' or #imm operand expected");
2605 return MatchOperand_ParseFail;
2606 }
2608 Operands.push_back(
2609 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2610 Parser.Lex(); // Consume the option
2612 return MatchOperand_Success;
2613 }
2615 AArch64AsmParser::OperandMatchResultTy
2616 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2617 MCAsmParser &Parser = getParser();
2618 const AsmToken &Tok = Parser.getTok();
2620 if (Tok.isNot(AsmToken::Identifier))
2621 return MatchOperand_NoMatch;
2623 bool IsKnown;
2624 auto MRSMapper = AArch64SysReg::MRSMapper(STI.getFeatureBits());
2625 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), IsKnown);
2626 assert(IsKnown == (MRSReg != -1U) &&
2627 "register should be -1 if and only if it's unknown");
2629 auto MSRMapper = AArch64SysReg::MSRMapper(STI.getFeatureBits());
2630 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), IsKnown);
2631 assert(IsKnown == (MSRReg != -1U) &&
2632 "register should be -1 if and only if it's unknown");
2634 uint32_t PStateField =
2635 AArch64PState::PStateMapper().fromString(Tok.getString(), IsKnown);
2636 assert(IsKnown == (PStateField != -1U) &&
2637 "register should be -1 if and only if it's unknown");
2639 Operands.push_back(AArch64Operand::CreateSysReg(
2640 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2641 Parser.Lex(); // Eat identifier
2643 return MatchOperand_Success;
2644 }
2646 /// tryParseVectorRegister - Parse a vector register operand.
2647 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2648 MCAsmParser &Parser = getParser();
2649 if (Parser.getTok().isNot(AsmToken::Identifier))
2650 return true;
2652 SMLoc S = getLoc();
2653 // Check for a vector register specifier first.
2654 StringRef Kind;
2655 int64_t Reg = tryMatchVectorRegister(Kind, false);
2656 if (Reg == -1)
2657 return true;
2658 Operands.push_back(
2659 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2660 // If there was an explicit qualifier, that goes on as a literal text
2661 // operand.
2662 if (!Kind.empty())
2663 Operands.push_back(
2664 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2666 // If there is an index specifier following the register, parse that too.
2667 if (Parser.getTok().is(AsmToken::LBrac)) {
2668 SMLoc SIdx = getLoc();
2669 Parser.Lex(); // Eat left bracket token.
2671 const MCExpr *ImmVal;
2672 if (getParser().parseExpression(ImmVal))
2673 return false;
2674 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2675 if (!MCE) {
2676 TokError("immediate value expected for vector index");
2677 return false;
2678 }
2680 SMLoc E = getLoc();
2681 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2682 Error(E, "']' expected");
2683 return false;
2684 }
2686 Parser.Lex(); // Eat right bracket token.
2688 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2689 E, getContext()));
2690 }
2692 return false;
2693 }
2695 /// parseRegister - Parse a non-vector register operand.
2696 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2697 MCAsmParser &Parser = getParser();
2698 SMLoc S = getLoc();
2699 // Try for a vector register.
2700 if (!tryParseVectorRegister(Operands))
2701 return false;
2703 // Try for a scalar register.
2704 int64_t Reg = tryParseRegister();
2705 if (Reg == -1)
2706 return true;
2707 Operands.push_back(
2708 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2710 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2711 // as a string token in the instruction itself.
2712 if (getLexer().getKind() == AsmToken::LBrac) {
2713 SMLoc LBracS = getLoc();
2714 Parser.Lex();
2715 const AsmToken &Tok = Parser.getTok();
2716 if (Tok.is(AsmToken::Integer)) {
2717 SMLoc IntS = getLoc();
2718 int64_t Val = Tok.getIntVal();
2719 if (Val == 1) {
2720 Parser.Lex();
2721 if (getLexer().getKind() == AsmToken::RBrac) {
2722 SMLoc RBracS = getLoc();
2723 Parser.Lex();
2724 Operands.push_back(
2725 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2726 Operands.push_back(
2727 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2728 Operands.push_back(
2729 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2730 return false;
2731 }
2732 }
2733 }
2734 }
2736 return false;
2737 }
2739 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2740 MCAsmParser &Parser = getParser();
2741 bool HasELFModifier = false;
2742 AArch64MCExpr::VariantKind RefKind;
2744 if (Parser.getTok().is(AsmToken::Colon)) {
2745 Parser.Lex(); // Eat ':"
2746 HasELFModifier = true;
2748 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2749 Error(Parser.getTok().getLoc(),
2750 "expect relocation specifier in operand after ':'");
2751 return true;
2752 }
2754 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2755 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2756 .Case("lo12", AArch64MCExpr::VK_LO12)
2757 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2758 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2759 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2760 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2761 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2762 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2763 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2764 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2765 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2766 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2767 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2768 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2769 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2770 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2771 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2772 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2773 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2774 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2775 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2776 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2777 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2778 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2779 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2780 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2781 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2782 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2783 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2784 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2785 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2786 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2787 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2788 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2789 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2790 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2791 .Default(AArch64MCExpr::VK_INVALID);
2793 if (RefKind == AArch64MCExpr::VK_INVALID) {
2794 Error(Parser.getTok().getLoc(),
2795 "expect relocation specifier in operand after ':'");
2796 return true;
2797 }
2799 Parser.Lex(); // Eat identifier
2801 if (Parser.getTok().isNot(AsmToken::Colon)) {
2802 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2803 return true;
2804 }
2805 Parser.Lex(); // Eat ':'
2806 }
2808 if (getParser().parseExpression(ImmVal))
2809 return true;
2811 if (HasELFModifier)
2812 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2814 return false;
2815 }
2817 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2818 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2819 MCAsmParser &Parser = getParser();
2820 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2821 SMLoc S = getLoc();
2822 Parser.Lex(); // Eat left bracket token.
2823 StringRef Kind;
2824 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2825 if (FirstReg == -1)
2826 return true;
2827 int64_t PrevReg = FirstReg;
2828 unsigned Count = 1;
2830 if (Parser.getTok().is(AsmToken::Minus)) {
2831 Parser.Lex(); // Eat the minus.
2833 SMLoc Loc = getLoc();
2834 StringRef NextKind;
2835 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2836 if (Reg == -1)
2837 return true;
2838 // Any Kind suffices must match on all regs in the list.
2839 if (Kind != NextKind)
2840 return Error(Loc, "mismatched register size suffix");
2842 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2844 if (Space == 0 || Space > 3) {
2845 return Error(Loc, "invalid number of vectors");
2846 }
2848 Count += Space;
2849 }
2850 else {
2851 while (Parser.getTok().is(AsmToken::Comma)) {
2852 Parser.Lex(); // Eat the comma token.
2854 SMLoc Loc = getLoc();
2855 StringRef NextKind;
2856 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2857 if (Reg == -1)
2858 return true;
2859 // Any Kind suffices must match on all regs in the list.
2860 if (Kind != NextKind)
2861 return Error(Loc, "mismatched register size suffix");
2863 // Registers must be incremental (with wraparound at 31)
2864 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2865 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2866 return Error(Loc, "registers must be sequential");
2868 PrevReg = Reg;
2869 ++Count;
2870 }
2871 }
2873 if (Parser.getTok().isNot(AsmToken::RCurly))
2874 return Error(getLoc(), "'}' expected");
2875 Parser.Lex(); // Eat the '}' token.
2877 if (Count > 4)
2878 return Error(S, "invalid number of vectors");
2880 unsigned NumElements = 0;
2881 char ElementKind = 0;
2882 if (!Kind.empty())
2883 parseValidVectorKind(Kind, NumElements, ElementKind);
2885 Operands.push_back(AArch64Operand::CreateVectorList(
2886 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2888 // If there is an index specifier following the list, parse that too.
2889 if (Parser.getTok().is(AsmToken::LBrac)) {
2890 SMLoc SIdx = getLoc();
2891 Parser.Lex(); // Eat left bracket token.
2893 const MCExpr *ImmVal;
2894 if (getParser().parseExpression(ImmVal))
2895 return false;
2896 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2897 if (!MCE) {
2898 TokError("immediate value expected for vector index");
2899 return false;
2900 }
2902 SMLoc E = getLoc();
2903 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2904 Error(E, "']' expected");
2905 return false;
2906 }
2908 Parser.Lex(); // Eat right bracket token.
2910 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2911 E, getContext()));
2912 }
2913 return false;
2914 }
2916 AArch64AsmParser::OperandMatchResultTy
2917 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2918 MCAsmParser &Parser = getParser();
2919 const AsmToken &Tok = Parser.getTok();
2920 if (!Tok.is(AsmToken::Identifier))
2921 return MatchOperand_NoMatch;
2923 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2925 MCContext &Ctx = getContext();
2926 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2927 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2928 return MatchOperand_NoMatch;
2930 SMLoc S = getLoc();
2931 Parser.Lex(); // Eat register
2933 if (Parser.getTok().isNot(AsmToken::Comma)) {
2934 Operands.push_back(
2935 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2936 return MatchOperand_Success;
2937 }
2938 Parser.Lex(); // Eat comma.
2940 if (Parser.getTok().is(AsmToken::Hash))
2941 Parser.Lex(); // Eat hash
2943 if (Parser.getTok().isNot(AsmToken::Integer)) {
2944 Error(getLoc(), "index must be absent or #0");
2945 return MatchOperand_ParseFail;
2946 }
2948 const MCExpr *ImmVal;
2949 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2950 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2951 Error(getLoc(), "index must be absent or #0");
2952 return MatchOperand_ParseFail;
2953 }
2955 Operands.push_back(
2956 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2957 return MatchOperand_Success;
2958 }
2960 /// parseOperand - Parse a arm instruction operand. For now this parses the
2961 /// operand regardless of the mnemonic.
2962 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2963 bool invertCondCode) {
2964 MCAsmParser &Parser = getParser();
2965 // Check if the current operand has a custom associated parser, if so, try to
2966 // custom parse the operand, or fallback to the general approach.
2967 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2968 if (ResTy == MatchOperand_Success)
2969 return false;
2970 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2971 // there was a match, but an error occurred, in which case, just return that
2972 // the operand parsing failed.
2973 if (ResTy == MatchOperand_ParseFail)
2974 return true;
2976 // Nothing custom, so do general case parsing.
2977 SMLoc S, E;
2978 switch (getLexer().getKind()) {
2979 default: {
2980 SMLoc S = getLoc();
2981 const MCExpr *Expr;
2982 if (parseSymbolicImmVal(Expr))
2983 return Error(S, "invalid operand");
2985 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2986 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2987 return false;
2988 }
2989 case AsmToken::LBrac: {
2990 SMLoc Loc = Parser.getTok().getLoc();
2991 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2992 getContext()));
2993 Parser.Lex(); // Eat '['
2995 // There's no comma after a '[', so we can parse the next operand
2996 // immediately.
2997 return parseOperand(Operands, false, false);
2998 }
2999 case AsmToken::LCurly:
3000 return parseVectorList(Operands);
3001 case AsmToken::Identifier: {
3002 // If we're expecting a Condition Code operand, then just parse that.
3003 if (isCondCode)
3004 return parseCondCode(Operands, invertCondCode);
3006 // If it's a register name, parse it.
3007 if (!parseRegister(Operands))
3008 return false;
3010 // This could be an optional "shift" or "extend" operand.
3011 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3012 // We can only continue if no tokens were eaten.
3013 if (GotShift != MatchOperand_NoMatch)
3014 return GotShift;
3016 // This was not a register so parse other operands that start with an
3017 // identifier (like labels) as expressions and create them as immediates.
3018 const MCExpr *IdVal;
3019 S = getLoc();
3020 if (getParser().parseExpression(IdVal))
3021 return true;
3023 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3024 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3025 return false;
3026 }
3027 case AsmToken::Integer:
3028 case AsmToken::Real:
3029 case AsmToken::Hash: {
3030 // #42 -> immediate.
3031 S = getLoc();
3032 if (getLexer().is(AsmToken::Hash))
3033 Parser.Lex();
3035 // Parse a negative sign
3036 bool isNegative = false;
3037 if (Parser.getTok().is(AsmToken::Minus)) {
3038 isNegative = true;
3039 // We need to consume this token only when we have a Real, otherwise
3040 // we let parseSymbolicImmVal take care of it
3041 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3042 Parser.Lex();
3043 }
3045 // The only Real that should come through here is a literal #0.0 for
3046 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3047 // so convert the value.
3048 const AsmToken &Tok = Parser.getTok();
3049 if (Tok.is(AsmToken::Real)) {
3050 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3051 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3052 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3053 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3054 Mnemonic != "fcmlt")
3055 return TokError("unexpected floating point literal");
3056 else if (IntVal != 0 || isNegative)
3057 return TokError("expected floating-point constant #0.0");
3058 Parser.Lex(); // Eat the token.
3060 Operands.push_back(
3061 AArch64Operand::CreateToken("#0", false, S, getContext()));
3062 Operands.push_back(
3063 AArch64Operand::CreateToken(".0", false, S, getContext()));
3064 return false;
3065 }
3067 const MCExpr *ImmVal;
3068 if (parseSymbolicImmVal(ImmVal))
3069 return true;
3071 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3072 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3073 return false;
3074 }
3075 case AsmToken::Equal: {
3076 SMLoc Loc = Parser.getTok().getLoc();
3077 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3078 return Error(Loc, "unexpected token in operand");
3079 Parser.Lex(); // Eat '='
3080 const MCExpr *SubExprVal;
3081 if (getParser().parseExpression(SubExprVal))
3082 return true;
3084 if (Operands.size() < 2 ||
3085 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3086 return true;
3088 bool IsXReg =
3089 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3090 Operands[1]->getReg());
3092 MCContext& Ctx = getContext();
3093 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3094 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3095 if (isa<MCConstantExpr>(SubExprVal)) {
3096 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3097 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3098 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3099 ShiftAmt += 16;
3100 Imm >>= 16;
3101 }
3102 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3103 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3104 Operands.push_back(AArch64Operand::CreateImm(
3105 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3106 if (ShiftAmt)
3107 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3108 ShiftAmt, true, S, E, Ctx));
3109 return false;
3110 }
3111 APInt Simm = APInt(64, Imm << ShiftAmt);
3112 // check if the immediate is an unsigned or signed 32-bit int for W regs
3113 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3114 return Error(Loc, "Immediate too large for register");
3115 }
3116 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3117 const MCExpr *CPLoc =
3118 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3119 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3120 return false;
3121 }
3122 }
3123 }
3125 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3126 /// operands.
3127 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3128 StringRef Name, SMLoc NameLoc,
3129 OperandVector &Operands) {
3130 MCAsmParser &Parser = getParser();
3131 Name = StringSwitch<StringRef>(Name.lower())
3132 .Case("beq", "b.eq")
3133 .Case("bne", "b.ne")
3134 .Case("bhs", "b.hs")
3135 .Case("bcs", "b.cs")
3136 .Case("blo", "b.lo")
3137 .Case("bcc", "b.cc")
3138 .Case("bmi", "b.mi")
3139 .Case("bpl", "b.pl")
3140 .Case("bvs", "b.vs")
3141 .Case("bvc", "b.vc")
3142 .Case("bhi", "b.hi")
3143 .Case("bls", "b.ls")
3144 .Case("bge", "b.ge")
3145 .Case("blt", "b.lt")
3146 .Case("bgt", "b.gt")
3147 .Case("ble", "b.le")
3148 .Case("bal", "b.al")
3149 .Case("bnv", "b.nv")
3150 .Default(Name);
3152 // First check for the AArch64-specific .req directive.
3153 if (Parser.getTok().is(AsmToken::Identifier) &&
3154 Parser.getTok().getIdentifier() == ".req") {
3155 parseDirectiveReq(Name, NameLoc);
3156 // We always return 'error' for this, as we're done with this
3157 // statement and don't need to match the 'instruction."
3158 return true;
3159 }
3161 // Create the leading tokens for the mnemonic, split by '.' characters.
3162 size_t Start = 0, Next = Name.find('.');
3163 StringRef Head = Name.slice(Start, Next);
3165 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3166 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3167 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3168 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3169 Parser.eatToEndOfStatement();
3170 return IsError;
3171 }
3173 Operands.push_back(
3174 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3175 Mnemonic = Head;
3177 // Handle condition codes for a branch mnemonic
3178 if (Head == "b" && Next != StringRef::npos) {
3179 Start = Next;
3180 Next = Name.find('.', Start + 1);
3181 Head = Name.slice(Start + 1, Next);
3183 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3184 (Head.data() - Name.data()));
3185 AArch64CC::CondCode CC = parseCondCodeString(Head);
3186 if (CC == AArch64CC::Invalid)
3187 return Error(SuffixLoc, "invalid condition code");
3188 Operands.push_back(
3189 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3190 Operands.push_back(
3191 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3192 }
3194 // Add the remaining tokens in the mnemonic.
3195 while (Next != StringRef::npos) {
3196 Start = Next;
3197 Next = Name.find('.', Start + 1);
3198 Head = Name.slice(Start, Next);
3199 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3200 (Head.data() - Name.data()) + 1);
3201 Operands.push_back(
3202 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3203 }
3205 // Conditional compare instructions have a Condition Code operand, which needs
3206 // to be parsed and an immediate operand created.
3207 bool condCodeFourthOperand =
3208 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3209 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3210 Head == "csinc" || Head == "csinv" || Head == "csneg");
3212 // These instructions are aliases to some of the conditional select
3213 // instructions. However, the condition code is inverted in the aliased
3214 // instruction.
3215 //
3216 // FIXME: Is this the correct way to handle these? Or should the parser
3217 // generate the aliased instructions directly?
3218 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3219 bool condCodeThirdOperand =
3220 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3222 // Read the remaining operands.
3223 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3224 // Read the first operand.
3225 if (parseOperand(Operands, false, false)) {
3226 Parser.eatToEndOfStatement();
3227 return true;
3228 }
3230 unsigned N = 2;
3231 while (getLexer().is(AsmToken::Comma)) {
3232 Parser.Lex(); // Eat the comma.
3234 // Parse and remember the operand.
3235 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3236 (N == 3 && condCodeThirdOperand) ||
3237 (N == 2 && condCodeSecondOperand),
3238 condCodeSecondOperand || condCodeThirdOperand)) {
3239 Parser.eatToEndOfStatement();
3240 return true;
3241 }
3243 // After successfully parsing some operands there are two special cases to
3244 // consider (i.e. notional operands not separated by commas). Both are due
3245 // to memory specifiers:
3246 // + An RBrac will end an address for load/store/prefetch
3247 // + An '!' will indicate a pre-indexed operation.
3248 //
3249 // It's someone else's responsibility to make sure these tokens are sane
3250 // in the given context!
3251 if (Parser.getTok().is(AsmToken::RBrac)) {
3252 SMLoc Loc = Parser.getTok().getLoc();
3253 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3254 getContext()));
3255 Parser.Lex();
3256 }
3258 if (Parser.getTok().is(AsmToken::Exclaim)) {
3259 SMLoc Loc = Parser.getTok().getLoc();
3260 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3261 getContext()));
3262 Parser.Lex();
3263 }
3265 ++N;
3266 }
3267 }
3269 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3270 SMLoc Loc = Parser.getTok().getLoc();
3271 Parser.eatToEndOfStatement();
3272 return Error(Loc, "unexpected token in argument list");
3273 }
3275 Parser.Lex(); // Consume the EndOfStatement
3276 return false;
3277 }
3279 // FIXME: This entire function is a giant hack to provide us with decent
3280 // operand range validation/diagnostics until TableGen/MC can be extended
3281 // to support autogeneration of this kind of validation.
3282 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3283 SmallVectorImpl<SMLoc> &Loc) {
3284 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3285 // Check for indexed addressing modes w/ the base register being the
3286 // same as a destination/source register or pair load where
3287 // the Rt == Rt2. All of those are undefined behaviour.
3288 switch (Inst.getOpcode()) {
3289 case AArch64::LDPSWpre:
3290 case AArch64::LDPWpost:
3291 case AArch64::LDPWpre:
3292 case AArch64::LDPXpost:
3293 case AArch64::LDPXpre: {
3294 unsigned Rt = Inst.getOperand(1).getReg();
3295 unsigned Rt2 = Inst.getOperand(2).getReg();
3296 unsigned Rn = Inst.getOperand(3).getReg();
3297 if (RI->isSubRegisterEq(Rn, Rt))
3298 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3299 "is also a destination");
3300 if (RI->isSubRegisterEq(Rn, Rt2))
3301 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3302 "is also a destination");
3303 // FALLTHROUGH
3304 }
3305 case AArch64::LDPDi:
3306 case AArch64::LDPQi:
3307 case AArch64::LDPSi:
3308 case AArch64::LDPSWi:
3309 case AArch64::LDPWi:
3310 case AArch64::LDPXi: {
3311 unsigned Rt = Inst.getOperand(0).getReg();
3312 unsigned Rt2 = Inst.getOperand(1).getReg();
3313 if (Rt == Rt2)
3314 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3315 break;
3316 }
3317 case AArch64::LDPDpost:
3318 case AArch64::LDPDpre:
3319 case AArch64::LDPQpost:
3320 case AArch64::LDPQpre:
3321 case AArch64::LDPSpost:
3322 case AArch64::LDPSpre:
3323 case AArch64::LDPSWpost: {
3324 unsigned Rt = Inst.getOperand(1).getReg();
3325 unsigned Rt2 = Inst.getOperand(2).getReg();
3326 if (Rt == Rt2)
3327 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3328 break;
3329 }
3330 case AArch64::STPDpost:
3331 case AArch64::STPDpre:
3332 case AArch64::STPQpost:
3333 case AArch64::STPQpre:
3334 case AArch64::STPSpost:
3335 case AArch64::STPSpre:
3336 case AArch64::STPWpost:
3337 case AArch64::STPWpre:
3338 case AArch64::STPXpost:
3339 case AArch64::STPXpre: {
3340 unsigned Rt = Inst.getOperand(1).getReg();
3341 unsigned Rt2 = Inst.getOperand(2).getReg();
3342 unsigned Rn = Inst.getOperand(3).getReg();
3343 if (RI->isSubRegisterEq(Rn, Rt))
3344 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3345 "is also a source");
3346 if (RI->isSubRegisterEq(Rn, Rt2))
3347 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3348 "is also a source");
3349 break;
3350 }
3351 case AArch64::LDRBBpre:
3352 case AArch64::LDRBpre:
3353 case AArch64::LDRHHpre:
3354 case AArch64::LDRHpre:
3355 case AArch64::LDRSBWpre:
3356 case AArch64::LDRSBXpre:
3357 case AArch64::LDRSHWpre:
3358 case AArch64::LDRSHXpre:
3359 case AArch64::LDRSWpre:
3360 case AArch64::LDRWpre:
3361 case AArch64::LDRXpre:
3362 case AArch64::LDRBBpost:
3363 case AArch64::LDRBpost:
3364 case AArch64::LDRHHpost:
3365 case AArch64::LDRHpost:
3366 case AArch64::LDRSBWpost:
3367 case AArch64::LDRSBXpost:
3368 case AArch64::LDRSHWpost:
3369 case AArch64::LDRSHXpost:
3370 case AArch64::LDRSWpost:
3371 case AArch64::LDRWpost:
3372 case AArch64::LDRXpost: {
3373 unsigned Rt = Inst.getOperand(1).getReg();
3374 unsigned Rn = Inst.getOperand(2).getReg();
3375 if (RI->isSubRegisterEq(Rn, Rt))
3376 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3377 "is also a source");
3378 break;
3379 }
3380 case AArch64::STRBBpost:
3381 case AArch64::STRBpost:
3382 case AArch64::STRHHpost:
3383 case AArch64::STRHpost:
3384 case AArch64::STRWpost:
3385 case AArch64::STRXpost:
3386 case AArch64::STRBBpre:
3387 case AArch64::STRBpre:
3388 case AArch64::STRHHpre:
3389 case AArch64::STRHpre:
3390 case AArch64::STRWpre:
3391 case AArch64::STRXpre: {
3392 unsigned Rt = Inst.getOperand(1).getReg();
3393 unsigned Rn = Inst.getOperand(2).getReg();
3394 if (RI->isSubRegisterEq(Rn, Rt))
3395 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3396 "is also a source");
3397 break;
3398 }
3399 }
3401 // Now check immediate ranges. Separate from the above as there is overlap
3402 // in the instructions being checked and this keeps the nested conditionals
3403 // to a minimum.
3404 switch (Inst.getOpcode()) {
3405 case AArch64::ADDSWri:
3406 case AArch64::ADDSXri:
3407 case AArch64::ADDWri:
3408 case AArch64::ADDXri:
3409 case AArch64::SUBSWri:
3410 case AArch64::SUBSXri:
3411 case AArch64::SUBWri:
3412 case AArch64::SUBXri: {
3413 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3414 // some slight duplication here.
3415 if (Inst.getOperand(2).isExpr()) {
3416 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3417 AArch64MCExpr::VariantKind ELFRefKind;
3418 MCSymbolRefExpr::VariantKind DarwinRefKind;
3419 int64_t Addend;
3420 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3421 return Error(Loc[2], "invalid immediate expression");
3422 }
3424 // Only allow these with ADDXri.
3425 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3426 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3427 Inst.getOpcode() == AArch64::ADDXri)
3428 return false;
3430 // Only allow these with ADDXri/ADDWri
3431 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3432 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3433 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3434 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3435 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3436 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3437 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3438 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3439 (Inst.getOpcode() == AArch64::ADDXri ||
3440 Inst.getOpcode() == AArch64::ADDWri))
3441 return false;
3443 // Don't allow expressions in the immediate field otherwise
3444 return Error(Loc[2], "invalid immediate expression");
3445 }
3446 return false;
3447 }
3448 default:
3449 return false;
3450 }
3451 }
3453 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3454 switch (ErrCode) {
3455 case Match_MissingFeature:
3456 return Error(Loc,
3457 "instruction requires a CPU feature not currently enabled");
3458 case Match_InvalidOperand:
3459 return Error(Loc, "invalid operand for instruction");
3460 case Match_InvalidSuffix:
3461 return Error(Loc, "invalid type suffix for instruction");
3462 case Match_InvalidCondCode:
3463 return Error(Loc, "expected AArch64 condition code");
3464 case Match_AddSubRegExtendSmall:
3465 return Error(Loc,
3466 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3467 case Match_AddSubRegExtendLarge:
3468 return Error(Loc,
3469 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3470 case Match_AddSubSecondSource:
3471 return Error(Loc,
3472 "expected compatible register, symbol or integer in range [0, 4095]");
3473 case Match_LogicalSecondSource:
3474 return Error(Loc, "expected compatible register or logical immediate");
3475 case Match_InvalidMovImm32Shift:
3476 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3477 case Match_InvalidMovImm64Shift:
3478 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3479 case Match_AddSubRegShift32:
3480 return Error(Loc,
3481 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3482 case Match_AddSubRegShift64:
3483 return Error(Loc,
3484 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3485 case Match_InvalidFPImm:
3486 return Error(Loc,
3487 "expected compatible register or floating-point constant");
3488 case Match_InvalidMemoryIndexedSImm9:
3489 return Error(Loc, "index must be an integer in range [-256, 255].");
3490 case Match_InvalidMemoryIndexed4SImm7:
3491 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3492 case Match_InvalidMemoryIndexed8SImm7:
3493 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3494 case Match_InvalidMemoryIndexed16SImm7:
3495 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3496 case Match_InvalidMemoryWExtend8:
3497 return Error(Loc,
3498 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3499 case Match_InvalidMemoryWExtend16:
3500 return Error(Loc,
3501 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3502 case Match_InvalidMemoryWExtend32:
3503 return Error(Loc,
3504 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3505 case Match_InvalidMemoryWExtend64:
3506 return Error(Loc,
3507 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3508 case Match_InvalidMemoryWExtend128:
3509 return Error(Loc,
3510 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3511 case Match_InvalidMemoryXExtend8:
3512 return Error(Loc,
3513 "expected 'lsl' or 'sxtx' with optional shift of #0");
3514 case Match_InvalidMemoryXExtend16:
3515 return Error(Loc,
3516 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3517 case Match_InvalidMemoryXExtend32:
3518 return Error(Loc,
3519 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3520 case Match_InvalidMemoryXExtend64:
3521 return Error(Loc,
3522 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3523 case Match_InvalidMemoryXExtend128:
3524 return Error(Loc,
3525 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3526 case Match_InvalidMemoryIndexed1:
3527 return Error(Loc, "index must be an integer in range [0, 4095].");
3528 case Match_InvalidMemoryIndexed2:
3529 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3530 case Match_InvalidMemoryIndexed4:
3531 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3532 case Match_InvalidMemoryIndexed8:
3533 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3534 case Match_InvalidMemoryIndexed16:
3535 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3536 case Match_InvalidImm0_7:
3537 return Error(Loc, "immediate must be an integer in range [0, 7].");
3538 case Match_InvalidImm0_15:
3539 return Error(Loc, "immediate must be an integer in range [0, 15].");
3540 case Match_InvalidImm0_31:
3541 return Error(Loc, "immediate must be an integer in range [0, 31].");
3542 case Match_InvalidImm0_63:
3543 return Error(Loc, "immediate must be an integer in range [0, 63].");
3544 case Match_InvalidImm0_127:
3545 return Error(Loc, "immediate must be an integer in range [0, 127].");
3546 case Match_InvalidImm0_65535:
3547 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3548 case Match_InvalidImm1_8:
3549 return Error(Loc, "immediate must be an integer in range [1, 8].");
3550 case Match_InvalidImm1_16:
3551 return Error(Loc, "immediate must be an integer in range [1, 16].");
3552 case Match_InvalidImm1_32:
3553 return Error(Loc, "immediate must be an integer in range [1, 32].");
3554 case Match_InvalidImm1_64:
3555 return Error(Loc, "immediate must be an integer in range [1, 64].");
3556 case Match_InvalidIndex1:
3557 return Error(Loc, "expected lane specifier '[1]'");
3558 case Match_InvalidIndexB:
3559 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3560 case Match_InvalidIndexH:
3561 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3562 case Match_InvalidIndexS:
3563 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3564 case Match_InvalidIndexD:
3565 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3566 case Match_InvalidLabel:
3567 return Error(Loc, "expected label or encodable integer pc offset");
3568 case Match_MRS:
3569 return Error(Loc, "expected readable system register");
3570 case Match_MSR:
3571 return Error(Loc, "expected writable system register or pstate");
3572 case Match_MnemonicFail:
3573 return Error(Loc, "unrecognized instruction mnemonic");
3574 default:
3575 llvm_unreachable("unexpected error code!");
3576 }
3577 }
3579 static const char *getSubtargetFeatureName(uint64_t Val);
3581 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3582 OperandVector &Operands,
3583 MCStreamer &Out,
3584 uint64_t &ErrorInfo,
3585 bool MatchingInlineAsm) {
3586 assert(!Operands.empty() && "Unexpect empty operand list!");
3587 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3588 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3590 StringRef Tok = Op.getToken();
3591 unsigned NumOperands = Operands.size();
3593 if (NumOperands == 4 && Tok == "lsl") {
3594 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3595 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3596 if (Op2.isReg() && Op3.isImm()) {
3597 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3598 if (Op3CE) {
3599 uint64_t Op3Val = Op3CE->getValue();
3600 uint64_t NewOp3Val = 0;
3601 uint64_t NewOp4Val = 0;
3602 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3603 Op2.getReg())) {
3604 NewOp3Val = (32 - Op3Val) & 0x1f;
3605 NewOp4Val = 31 - Op3Val;
3606 } else {
3607 NewOp3Val = (64 - Op3Val) & 0x3f;
3608 NewOp4Val = 63 - Op3Val;
3609 }
3611 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3612 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3614 Operands[0] = AArch64Operand::CreateToken(
3615 "ubfm", false, Op.getStartLoc(), getContext());
3616 Operands.push_back(AArch64Operand::CreateImm(
3617 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3618 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3619 Op3.getEndLoc(), getContext());
3620 }
3621 }
3622 } else if (NumOperands == 5) {
3623 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3624 // UBFIZ -> UBFM aliases.
3625 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3626 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3627 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3628 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3630 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3631 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3632 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3634 if (Op3CE && Op4CE) {
3635 uint64_t Op3Val = Op3CE->getValue();
3636 uint64_t Op4Val = Op4CE->getValue();
3638 uint64_t RegWidth = 0;
3639 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3640 Op1.getReg()))
3641 RegWidth = 64;
3642 else
3643 RegWidth = 32;
3645 if (Op3Val >= RegWidth)
3646 return Error(Op3.getStartLoc(),
3647 "expected integer in range [0, 31]");
3648 if (Op4Val < 1 || Op4Val > RegWidth)
3649 return Error(Op4.getStartLoc(),
3650 "expected integer in range [1, 32]");
3652 uint64_t NewOp3Val = 0;
3653 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3654 Op1.getReg()))
3655 NewOp3Val = (32 - Op3Val) & 0x1f;
3656 else
3657 NewOp3Val = (64 - Op3Val) & 0x3f;
3659 uint64_t NewOp4Val = Op4Val - 1;
3661 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3662 return Error(Op4.getStartLoc(),
3663 "requested insert overflows register");
3665 const MCExpr *NewOp3 =
3666 MCConstantExpr::Create(NewOp3Val, getContext());
3667 const MCExpr *NewOp4 =
3668 MCConstantExpr::Create(NewOp4Val, getContext());
3669 Operands[3] = AArch64Operand::CreateImm(
3670 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3671 Operands[4] = AArch64Operand::CreateImm(
3672 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3673 if (Tok == "bfi")
3674 Operands[0] = AArch64Operand::CreateToken(
3675 "bfm", false, Op.getStartLoc(), getContext());
3676 else if (Tok == "sbfiz")
3677 Operands[0] = AArch64Operand::CreateToken(
3678 "sbfm", false, Op.getStartLoc(), getContext());
3679 else if (Tok == "ubfiz")
3680 Operands[0] = AArch64Operand::CreateToken(
3681 "ubfm", false, Op.getStartLoc(), getContext());
3682 else
3683 llvm_unreachable("No valid mnemonic for alias?");
3684 }
3685 }
3687 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3688 // UBFX -> UBFM aliases.
3689 } else if (NumOperands == 5 &&
3690 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3691 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3692 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3693 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3695 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3696 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3697 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3699 if (Op3CE && Op4CE) {
3700 uint64_t Op3Val = Op3CE->getValue();
3701 uint64_t Op4Val = Op4CE->getValue();
3703 uint64_t RegWidth = 0;
3704 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3705 Op1.getReg()))
3706 RegWidth = 64;
3707 else
3708 RegWidth = 32;
3710 if (Op3Val >= RegWidth)
3711 return Error(Op3.getStartLoc(),
3712 "expected integer in range [0, 31]");
3713 if (Op4Val < 1 || Op4Val > RegWidth)
3714 return Error(Op4.getStartLoc(),
3715 "expected integer in range [1, 32]");
3717 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3719 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3720 return Error(Op4.getStartLoc(),
3721 "requested extract overflows register");
3723 const MCExpr *NewOp4 =
3724 MCConstantExpr::Create(NewOp4Val, getContext());
3725 Operands[4] = AArch64Operand::CreateImm(
3726 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3727 if (Tok == "bfxil")
3728 Operands[0] = AArch64Operand::CreateToken(
3729 "bfm", false, Op.getStartLoc(), getContext());
3730 else if (Tok == "sbfx")
3731 Operands[0] = AArch64Operand::CreateToken(
3732 "sbfm", false, Op.getStartLoc(), getContext());
3733 else if (Tok == "ubfx")
3734 Operands[0] = AArch64Operand::CreateToken(
3735 "ubfm", false, Op.getStartLoc(), getContext());
3736 else
3737 llvm_unreachable("No valid mnemonic for alias?");
3738 }
3739 }
3740 }
3741 }
3742 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3743 // InstAlias can't quite handle this since the reg classes aren't
3744 // subclasses.
3745 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3746 // The source register can be Wn here, but the matcher expects a
3747 // GPR64. Twiddle it here if necessary.
3748 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3749 if (Op.isReg()) {
3750 unsigned Reg = getXRegFromWReg(Op.getReg());
3751 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3752 Op.getEndLoc(), getContext());
3753 }
3754 }
3755 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3756 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3757 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3758 if (Op.isReg() &&
3759 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3760 Op.getReg())) {
3761 // The source register can be Wn here, but the matcher expects a
3762 // GPR64. Twiddle it here if necessary.
3763 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3764 if (Op.isReg()) {
3765 unsigned Reg = getXRegFromWReg(Op.getReg());
3766 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3767 Op.getEndLoc(), getContext());
3768 }
3769 }
3770 }
3771 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3772 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3773 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3774 if (Op.isReg() &&
3775 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3776 Op.getReg())) {
3777 // The source register can be Wn here, but the matcher expects a
3778 // GPR32. Twiddle it here if necessary.
3779 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3780 if (Op.isReg()) {
3781 unsigned Reg = getWRegFromXReg(Op.getReg());
3782 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3783 Op.getEndLoc(), getContext());
3784 }
3785 }
3786 }
3788 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3789 if (NumOperands == 3 && Tok == "fmov") {
3790 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3791 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3792 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3793 unsigned zreg =
3794 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3795 RegOp.getReg())
3796 ? AArch64::WZR
3797 : AArch64::XZR;
3798 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3799 Op.getEndLoc(), getContext());
3800 }
3801 }
3803 MCInst Inst;
3804 // First try to match against the secondary set of tables containing the
3805 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3806 unsigned MatchResult =
3807 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3809 // If that fails, try against the alternate table containing long-form NEON:
3810 // "fadd v0.2s, v1.2s, v2.2s"
3811 if (MatchResult != Match_Success)
3812 MatchResult =
3813 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3815 switch (MatchResult) {
3816 case Match_Success: {
3817 // Perform range checking and other semantic validations
3818 SmallVector<SMLoc, 8> OperandLocs;
3819 NumOperands = Operands.size();
3820 for (unsigned i = 1; i < NumOperands; ++i)
3821 OperandLocs.push_back(Operands[i]->getStartLoc());
3822 if (validateInstruction(Inst, OperandLocs))
3823 return true;
3825 Inst.setLoc(IDLoc);
3826 Out.EmitInstruction(Inst, STI);
3827 return false;
3828 }
3829 case Match_MissingFeature: {
3830 assert(ErrorInfo && "Unknown missing feature!");
3831 // Special case the error message for the very common case where only
3832 // a single subtarget feature is missing (neon, e.g.).
3833 std::string Msg = "instruction requires:";
3834 uint64_t Mask = 1;
3835 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3836 if (ErrorInfo & Mask) {
3837 Msg += " ";
3838 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3839 }
3840 Mask <<= 1;
3841 }
3842 return Error(IDLoc, Msg);
3843 }
3844 case Match_MnemonicFail:
3845 return showMatchError(IDLoc, MatchResult);
3846 case Match_InvalidOperand: {
3847 SMLoc ErrorLoc = IDLoc;
3848 if (ErrorInfo != ~0ULL) {
3849 if (ErrorInfo >= Operands.size())
3850 return Error(IDLoc, "too few operands for instruction");
3852 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3853 if (ErrorLoc == SMLoc())
3854 ErrorLoc = IDLoc;
3855 }
3856 // If the match failed on a suffix token operand, tweak the diagnostic
3857 // accordingly.
3858 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3859 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3860 MatchResult = Match_InvalidSuffix;
3862 return showMatchError(ErrorLoc, MatchResult);
3863 }
3864 case Match_InvalidMemoryIndexed1:
3865 case Match_InvalidMemoryIndexed2:
3866 case Match_InvalidMemoryIndexed4:
3867 case Match_InvalidMemoryIndexed8:
3868 case Match_InvalidMemoryIndexed16:
3869 case Match_InvalidCondCode:
3870 case Match_AddSubRegExtendSmall:
3871 case Match_AddSubRegExtendLarge:
3872 case Match_AddSubSecondSource:
3873 case Match_LogicalSecondSource:
3874 case Match_AddSubRegShift32:
3875 case Match_AddSubRegShift64:
3876 case Match_InvalidMovImm32Shift:
3877 case Match_InvalidMovImm64Shift:
3878 case Match_InvalidFPImm:
3879 case Match_InvalidMemoryWExtend8:
3880 case Match_InvalidMemoryWExtend16:
3881 case Match_InvalidMemoryWExtend32:
3882 case Match_InvalidMemoryWExtend64:
3883 case Match_InvalidMemoryWExtend128:
3884 case Match_InvalidMemoryXExtend8:
3885 case Match_InvalidMemoryXExtend16:
3886 case Match_InvalidMemoryXExtend32:
3887 case Match_InvalidMemoryXExtend64:
3888 case Match_InvalidMemoryXExtend128:
3889 case Match_InvalidMemoryIndexed4SImm7:
3890 case Match_InvalidMemoryIndexed8SImm7:
3891 case Match_InvalidMemoryIndexed16SImm7:
3892 case Match_InvalidMemoryIndexedSImm9:
3893 case Match_InvalidImm0_7:
3894 case Match_InvalidImm0_15:
3895 case Match_InvalidImm0_31:
3896 case Match_InvalidImm0_63:
3897 case Match_InvalidImm0_127:
3898 case Match_InvalidImm0_65535:
3899 case Match_InvalidImm1_8:
3900 case Match_InvalidImm1_16:
3901 case Match_InvalidImm1_32:
3902 case Match_InvalidImm1_64:
3903 case Match_InvalidIndex1:
3904 case Match_InvalidIndexB:
3905 case Match_InvalidIndexH:
3906 case Match_InvalidIndexS:
3907 case Match_InvalidIndexD:
3908 case Match_InvalidLabel:
3909 case Match_MSR:
3910 case Match_MRS: {
3911 if (ErrorInfo >= Operands.size())
3912 return Error(IDLoc, "too few operands for instruction");
3913 // Any time we get here, there's nothing fancy to do. Just get the
3914 // operand SMLoc and display the diagnostic.
3915 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3916 if (ErrorLoc == SMLoc())
3917 ErrorLoc = IDLoc;
3918 return showMatchError(ErrorLoc, MatchResult);
3919 }
3920 }
3922 llvm_unreachable("Implement any new match types added!");
3923 }
3925 /// ParseDirective parses the arm specific directives
3926 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3927 const MCObjectFileInfo::Environment Format =
3928 getContext().getObjectFileInfo()->getObjectFileType();
3929 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3930 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3932 StringRef IDVal = DirectiveID.getIdentifier();
3933 SMLoc Loc = DirectiveID.getLoc();
3934 if (IDVal == ".hword")
3935 return parseDirectiveWord(2, Loc);
3936 if (IDVal == ".word")
3937 return parseDirectiveWord(4, Loc);
3938 if (IDVal == ".xword")
3939 return parseDirectiveWord(8, Loc);
3940 if (IDVal == ".tlsdesccall")
3941 return parseDirectiveTLSDescCall(Loc);
3942 if (IDVal == ".ltorg" || IDVal == ".pool")
3943 return parseDirectiveLtorg(Loc);
3944 if (IDVal == ".unreq")
3945 return parseDirectiveUnreq(DirectiveID.getLoc());
3947 if (!IsMachO && !IsCOFF) {
3948 if (IDVal == ".inst")
3949 return parseDirectiveInst(Loc);
3950 }
3952 return parseDirectiveLOH(IDVal, Loc);
3953 }
3955 /// parseDirectiveWord
3956 /// ::= .word [ expression (, expression)* ]
3957 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3958 MCAsmParser &Parser = getParser();
3959 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3960 for (;;) {
3961 const MCExpr *Value;
3962 if (getParser().parseExpression(Value))
3963 return true;
3965 getParser().getStreamer().EmitValue(Value, Size);
3967 if (getLexer().is(AsmToken::EndOfStatement))
3968 break;
3970 // FIXME: Improve diagnostic.
3971 if (getLexer().isNot(AsmToken::Comma))
3972 return Error(L, "unexpected token in directive");
3973 Parser.Lex();
3974 }
3975 }
3977 Parser.Lex();
3978 return false;
3979 }
3981 /// parseDirectiveInst
3982 /// ::= .inst opcode [, ...]
3983 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
3984 MCAsmParser &Parser = getParser();
3985 if (getLexer().is(AsmToken::EndOfStatement)) {
3986 Parser.eatToEndOfStatement();
3987 Error(Loc, "expected expression following directive");
3988 return false;
3989 }
3991 for (;;) {
3992 const MCExpr *Expr;
3994 if (getParser().parseExpression(Expr)) {
3995 Error(Loc, "expected expression");
3996 return false;
3997 }
3999 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4000 if (!Value) {
4001 Error(Loc, "expected constant expression");
4002 return false;
4003 }
4005 getTargetStreamer().emitInst(Value->getValue());
4007 if (getLexer().is(AsmToken::EndOfStatement))
4008 break;
4010 if (getLexer().isNot(AsmToken::Comma)) {
4011 Error(Loc, "unexpected token in directive");
4012 return false;
4013 }
4015 Parser.Lex(); // Eat comma.
4016 }
4018 Parser.Lex();
4019 return false;
4020 }
4022 // parseDirectiveTLSDescCall:
4023 // ::= .tlsdesccall symbol
4024 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4025 StringRef Name;
4026 if (getParser().parseIdentifier(Name))
4027 return Error(L, "expected symbol after directive");
4029 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4030 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4031 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4033 MCInst Inst;
4034 Inst.setOpcode(AArch64::TLSDESCCALL);
4035 Inst.addOperand(MCOperand::CreateExpr(Expr));
4037 getParser().getStreamer().EmitInstruction(Inst, STI);
4038 return false;
4039 }
4041 /// ::= .loh <lohName | lohId> label1, ..., labelN
4042 /// The number of arguments depends on the loh identifier.
4043 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4044 if (IDVal != MCLOHDirectiveName())
4045 return true;
4046 MCLOHType Kind;
4047 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4048 if (getParser().getTok().isNot(AsmToken::Integer))
4049 return TokError("expected an identifier or a number in directive");
4050 // We successfully get a numeric value for the identifier.
4051 // Check if it is valid.
4052 int64_t Id = getParser().getTok().getIntVal();
4053 if (Id <= -1U && !isValidMCLOHType(Id))
4054 return TokError("invalid numeric identifier in directive");
4055 Kind = (MCLOHType)Id;
4056 } else {
4057 StringRef Name = getTok().getIdentifier();
4058 // We successfully parse an identifier.
4059 // Check if it is a recognized one.
4060 int Id = MCLOHNameToId(Name);
4062 if (Id == -1)
4063 return TokError("invalid identifier in directive");
4064 Kind = (MCLOHType)Id;
4065 }
4066 // Consume the identifier.
4067 Lex();
4068 // Get the number of arguments of this LOH.
4069 int NbArgs = MCLOHIdToNbArgs(Kind);
4071 assert(NbArgs != -1 && "Invalid number of arguments");
4073 SmallVector<MCSymbol *, 3> Args;
4074 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4075 StringRef Name;
4076 if (getParser().parseIdentifier(Name))
4077 return TokError("expected identifier in directive");
4078 Args.push_back(getContext().GetOrCreateSymbol(Name));
4080 if (Idx + 1 == NbArgs)
4081 break;
4082 if (getLexer().isNot(AsmToken::Comma))
4083 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4084 Lex();
4085 }
4086 if (getLexer().isNot(AsmToken::EndOfStatement))
4087 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4089 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4090 return false;
4091 }
4093 /// parseDirectiveLtorg
4094 /// ::= .ltorg | .pool
4095 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4096 getTargetStreamer().emitCurrentConstantPool();
4097 return false;
4098 }
4100 /// parseDirectiveReq
4101 /// ::= name .req registername
4102 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4103 MCAsmParser &Parser = getParser();
4104 Parser.Lex(); // Eat the '.req' token.
4105 SMLoc SRegLoc = getLoc();
4106 unsigned RegNum = tryParseRegister();
4107 bool IsVector = false;
4109 if (RegNum == static_cast<unsigned>(-1)) {
4110 StringRef Kind;
4111 RegNum = tryMatchVectorRegister(Kind, false);
4112 if (!Kind.empty()) {
4113 Error(SRegLoc, "vector register without type specifier expected");
4114 return false;
4115 }
4116 IsVector = true;
4117 }
4119 if (RegNum == static_cast<unsigned>(-1)) {
4120 Parser.eatToEndOfStatement();
4121 Error(SRegLoc, "register name or alias expected");
4122 return false;
4123 }
4125 // Shouldn't be anything else.
4126 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4127 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4128 Parser.eatToEndOfStatement();
4129 return false;
4130 }
4132 Parser.Lex(); // Consume the EndOfStatement
4134 auto pair = std::make_pair(IsVector, RegNum);
4135 if (!RegisterReqs.insert(std::make_pair(Name, pair)).second)
4136 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4138 return true;
4139 }
4141 /// parseDirectiveUneq
4142 /// ::= .unreq registername
4143 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4144 MCAsmParser &Parser = getParser();
4145 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4146 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4147 Parser.eatToEndOfStatement();
4148 return false;
4149 }
4150 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4151 Parser.Lex(); // Eat the identifier.
4152 return false;
4153 }
4155 bool
4156 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4157 AArch64MCExpr::VariantKind &ELFRefKind,
4158 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4159 int64_t &Addend) {
4160 ELFRefKind = AArch64MCExpr::VK_INVALID;
4161 DarwinRefKind = MCSymbolRefExpr::VK_None;
4162 Addend = 0;
4164 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4165 ELFRefKind = AE->getKind();
4166 Expr = AE->getSubExpr();
4167 }
4169 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4170 if (SE) {
4171 // It's a simple symbol reference with no addend.
4172 DarwinRefKind = SE->getKind();
4173 return true;
4174 }
4176 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4177 if (!BE)
4178 return false;
4180 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4181 if (!SE)
4182 return false;
4183 DarwinRefKind = SE->getKind();
4185 if (BE->getOpcode() != MCBinaryExpr::Add &&
4186 BE->getOpcode() != MCBinaryExpr::Sub)
4187 return false;
4189 // See if the addend is is a constant, otherwise there's more going
4190 // on here than we can deal with.
4191 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4192 if (!AddendExpr)
4193 return false;
4195 Addend = AddendExpr->getValue();
4196 if (BE->getOpcode() == MCBinaryExpr::Sub)
4197 Addend = -Addend;
4199 // It's some symbol reference + a constant addend, but really
4200 // shouldn't use both Darwin and ELF syntax.
4201 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4202 DarwinRefKind == MCSymbolRefExpr::VK_None;
4203 }
4205 /// Force static initialization.
4206 extern "C" void LLVMInitializeAArch64AsmParser() {
4207 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4208 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4209 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4210 }
4212 #define GET_REGISTER_MATCHER
4213 #define GET_SUBTARGET_FEATURE_NAME
4214 #define GET_MATCHER_IMPLEMENTATION
4215 #include "AArch64GenAsmMatcher.inc"
4217 // Define this matcher function after the auto-generated include so we
4218 // have the match class enum definitions.
4219 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4220 unsigned Kind) {
4221 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4222 // If the kind is a token for a literal immediate, check if our asm
4223 // operand matches. This is for InstAliases which have a fixed-value
4224 // immediate in the syntax.
4225 int64_t ExpectedVal;
4226 switch (Kind) {
4227 default:
4228 return Match_InvalidOperand;
4229 case MCK__35_0:
4230 ExpectedVal = 0;
4231 break;
4232 case MCK__35_1:
4233 ExpectedVal = 1;
4234 break;
4235 case MCK__35_12:
4236 ExpectedVal = 12;
4237 break;
4238 case MCK__35_16:
4239 ExpectedVal = 16;
4240 break;
4241 case MCK__35_2:
4242 ExpectedVal = 2;
4243 break;
4244 case MCK__35_24:
4245 ExpectedVal = 24;
4246 break;
4247 case MCK__35_3:
4248 ExpectedVal = 3;
4249 break;
4250 case MCK__35_32:
4251 ExpectedVal = 32;
4252 break;
4253 case MCK__35_4:
4254 ExpectedVal = 4;
4255 break;
4256 case MCK__35_48:
4257 ExpectedVal = 48;
4258 break;
4259 case MCK__35_6:
4260 ExpectedVal = 6;
4261 break;
4262 case MCK__35_64:
4263 ExpectedVal = 64;
4264 break;
4265 case MCK__35_8:
4266 ExpectedVal = 8;
4267 break;
4268 }
4269 if (!Op.isImm())
4270 return Match_InvalidOperand;
4271 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4272 if (!CE)
4273 return Match_InvalidOperand;
4274 if (CE->getValue() == ExpectedVal)
4275 return Match_Success;
4276 return Match_InvalidOperand;
4277 }