summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'libpixelflinger/codeflinger/MIPS64Assembler.cpp')
-rw-r--r--libpixelflinger/codeflinger/MIPS64Assembler.cpp1452
1 files changed, 1452 insertions, 0 deletions
diff --git a/libpixelflinger/codeflinger/MIPS64Assembler.cpp b/libpixelflinger/codeflinger/MIPS64Assembler.cpp
new file mode 100644
index 000000000..a5305cca2
--- /dev/null
+++ b/libpixelflinger/codeflinger/MIPS64Assembler.cpp
@@ -0,0 +1,1452 @@
1/* libs/pixelflinger/codeflinger/MIPS64Assembler.cpp
2**
3** Copyright 2015, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9** http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19/* MIPS64 assembler and ARM->MIPS64 assembly translator
20**
21** The approach is utilize MIPSAssembler generator, using inherited MIPS64Assembler
22** that overrides just the specific MIPS64r6 instructions.
23** For now ArmToMips64Assembler is copied over from ArmToMipsAssembler class,
24** changing some MIPS64r6 related stuff.
25**
26*/
27
28
29#define LOG_TAG "MIPS64Assembler"
30
31#include <stdio.h>
32#include <stdlib.h>
33#include <cutils/log.h>
34#include <cutils/properties.h>
35
36#if defined(WITH_LIB_HARDWARE)
37#include <hardware_legacy/qemu_tracing.h>
38#endif
39
40#include <private/pixelflinger/ggl_context.h>
41
42#include "MIPS64Assembler.h"
43#include "CodeCache.h"
44#include "mips64_disassem.h"
45
46
47#define NOT_IMPLEMENTED() LOG_ALWAYS_FATAL("Arm instruction %s not yet implemented\n", __func__)
48
49
50// ----------------------------------------------------------------------------
51
52namespace android {
53
54// ----------------------------------------------------------------------------
55#if 0
56#pragma mark -
57#pragma mark ArmToMips64Assembler...
58#endif
59
60ArmToMips64Assembler::ArmToMips64Assembler(const sp<Assembly>& assembly,
61 char *abuf, int linesz, int instr_count)
62 : ARMAssemblerInterface(),
63 mArmDisassemblyBuffer(abuf),
64 mArmLineLength(linesz),
65 mArmInstrCount(instr_count),
66 mInum(0),
67 mAssembly(assembly)
68{
69 mMips = new MIPS64Assembler(assembly, this);
70 mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
71 init_conditional_labels();
72}
73
74ArmToMips64Assembler::ArmToMips64Assembler(void* assembly)
75 : ARMAssemblerInterface(),
76 mArmDisassemblyBuffer(NULL),
77 mInum(0),
78 mAssembly(NULL)
79{
80 mMips = new MIPS64Assembler(assembly, this);
81 mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
82 init_conditional_labels();
83}
84
85ArmToMips64Assembler::~ArmToMips64Assembler()
86{
87 delete mMips;
88 free((void *) mArmPC);
89}
90
91uint32_t* ArmToMips64Assembler::pc() const
92{
93 return mMips->pc();
94}
95
96uint32_t* ArmToMips64Assembler::base() const
97{
98 return mMips->base();
99}
100
101void ArmToMips64Assembler::reset()
102{
103 cond.labelnum = 0;
104 mInum = 0;
105 mMips->reset();
106}
107
108int ArmToMips64Assembler::getCodegenArch()
109{
110 return CODEGEN_ARCH_MIPS64;
111}
112
113void ArmToMips64Assembler::comment(const char* string)
114{
115 mMips->comment(string);
116}
117
118void ArmToMips64Assembler::label(const char* theLabel)
119{
120 mMips->label(theLabel);
121}
122
123void ArmToMips64Assembler::disassemble(const char* name)
124{
125 mMips->disassemble(name);
126}
127
128void ArmToMips64Assembler::init_conditional_labels()
129{
130 int i;
131 for (i=0;i<99; ++i) {
132 sprintf(cond.label[i], "cond_%d", i);
133 }
134}
135
136
137
138#if 0
139#pragma mark -
140#pragma mark Prolog/Epilog & Generate...
141#endif
142
143void ArmToMips64Assembler::prolog()
144{
145 mArmPC[mInum++] = pc(); // save starting PC for this instr
146
147 mMips->DADDIU(R_sp, R_sp, -(5 * 8));
148 mMips->SD(R_s0, R_sp, 0);
149 mMips->SD(R_s1, R_sp, 8);
150 mMips->SD(R_s2, R_sp, 16);
151 mMips->SD(R_s3, R_sp, 24);
152 mMips->SD(R_s4, R_sp, 32);
153 mMips->MOVE(R_v0, R_a0); // move context * passed in a0 to v0 (arm r0)
154}
155
156void ArmToMips64Assembler::epilog(uint32_t touched)
157{
158 mArmPC[mInum++] = pc(); // save starting PC for this instr
159
160 mMips->LD(R_s0, R_sp, 0);
161 mMips->LD(R_s1, R_sp, 8);
162 mMips->LD(R_s2, R_sp, 16);
163 mMips->LD(R_s3, R_sp, 24);
164 mMips->LD(R_s4, R_sp, 32);
165 mMips->DADDIU(R_sp, R_sp, (5 * 8));
166 mMips->JR(R_ra);
167
168}
169
170int ArmToMips64Assembler::generate(const char* name)
171{
172 return mMips->generate(name);
173}
174
175void ArmToMips64Assembler::fix_branches()
176{
177 mMips->fix_branches();
178}
179
180uint32_t* ArmToMips64Assembler::pcForLabel(const char* label)
181{
182 return mMips->pcForLabel(label);
183}
184
185void ArmToMips64Assembler::set_condition(int mode, int R1, int R2) {
186 if (mode == 2) {
187 cond.type = SBIT_COND;
188 } else {
189 cond.type = CMP_COND;
190 }
191 cond.r1 = R1;
192 cond.r2 = R2;
193}
194
195//----------------------------------------------------------
196
197#if 0
198#pragma mark -
199#pragma mark Addressing modes & shifters...
200#endif
201
202
203// do not need this for MIPS, but it is in the Interface (virtual)
204int ArmToMips64Assembler::buildImmediate(
205 uint32_t immediate, uint32_t& rot, uint32_t& imm)
206{
207 // for MIPS, any 32-bit immediate is OK
208 rot = 0;
209 imm = immediate;
210 return 0;
211}
212
213// shifters...
214
215bool ArmToMips64Assembler::isValidImmediate(uint32_t immediate)
216{
217 // for MIPS, any 32-bit immediate is OK
218 return true;
219}
220
221uint32_t ArmToMips64Assembler::imm(uint32_t immediate)
222{
223 amode.value = immediate;
224 return AMODE_IMM;
225}
226
227uint32_t ArmToMips64Assembler::reg_imm(int Rm, int type, uint32_t shift)
228{
229 amode.reg = Rm;
230 amode.stype = type;
231 amode.value = shift;
232 return AMODE_REG_IMM;
233}
234
235uint32_t ArmToMips64Assembler::reg_rrx(int Rm)
236{
237 // reg_rrx mode is not used in the GLLAssember code at this time
238 return AMODE_UNSUPPORTED;
239}
240
241uint32_t ArmToMips64Assembler::reg_reg(int Rm, int type, int Rs)
242{
243 // reg_reg mode is not used in the GLLAssember code at this time
244 return AMODE_UNSUPPORTED;
245}
246
247
248// addressing modes...
249// LDR(B)/STR(B)/PLD (immediate and Rm can be negative, which indicate U=0)
250uint32_t ArmToMips64Assembler::immed12_pre(int32_t immed12, int W)
251{
252 LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
253 "LDR(B)/STR(B)/PLD immediate too big (%08x)",
254 immed12);
255 amode.value = immed12;
256 amode.writeback = W;
257 return AMODE_IMM_12_PRE;
258}
259
260uint32_t ArmToMips64Assembler::immed12_post(int32_t immed12)
261{
262 LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
263 "LDR(B)/STR(B)/PLD immediate too big (%08x)",
264 immed12);
265
266 amode.value = immed12;
267 return AMODE_IMM_12_POST;
268}
269
270uint32_t ArmToMips64Assembler::reg_scale_pre(int Rm, int type,
271 uint32_t shift, int W)
272{
273 LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented");
274
275 amode.reg = Rm;
276 // amode.stype = type; // more advanced modes not used in GGLAssembler yet
277 // amode.value = shift;
278 // amode.writeback = W;
279 return AMODE_REG_SCALE_PRE;
280}
281
282uint32_t ArmToMips64Assembler::reg_scale_post(int Rm, int type, uint32_t shift)
283{
284 LOG_ALWAYS_FATAL("adr mode reg_scale_post not yet implemented\n");
285 return AMODE_UNSUPPORTED;
286}
287
288// LDRH/LDRSB/LDRSH/STRH (immediate and Rm can be negative, which indicate U=0)
289uint32_t ArmToMips64Assembler::immed8_pre(int32_t immed8, int W)
290{
291 LOG_ALWAYS_FATAL("adr mode immed8_pre not yet implemented\n");
292
293 LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
294 "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
295 immed8);
296 return AMODE_IMM_8_PRE;
297}
298
299uint32_t ArmToMips64Assembler::immed8_post(int32_t immed8)
300{
301 LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
302 "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
303 immed8);
304 amode.value = immed8;
305 return AMODE_IMM_8_POST;
306}
307
308uint32_t ArmToMips64Assembler::reg_pre(int Rm, int W)
309{
310 LOG_ALWAYS_FATAL_IF(W, "reg_pre writeback not yet implemented");
311 amode.reg = Rm;
312 return AMODE_REG_PRE;
313}
314
315uint32_t ArmToMips64Assembler::reg_post(int Rm)
316{
317 LOG_ALWAYS_FATAL("adr mode reg_post not yet implemented\n");
318 return AMODE_UNSUPPORTED;
319}
320
321
322
323// ----------------------------------------------------------------------------
324
325#if 0
326#pragma mark -
327#pragma mark Data Processing...
328#endif
329
330
331static const char * const dpOpNames[] = {
332 "AND", "EOR", "SUB", "RSB", "ADD", "ADC", "SBC", "RSC",
333 "TST", "TEQ", "CMP", "CMN", "ORR", "MOV", "BIC", "MVN"
334};
335
336// check if the operand registers from a previous CMP or S-bit instruction
337// would be overwritten by this instruction. If so, move the value to a
338// safe register.
339// Note that we cannot tell at _this_ instruction time if a future (conditional)
340// instruction will _also_ use this value (a defect of the simple 1-pass, one-
341// instruction-at-a-time translation). Therefore we must be conservative and
342// save the value before it is overwritten. This costs an extra MOVE instr.
343
344void ArmToMips64Assembler::protectConditionalOperands(int Rd)
345{
346 if (Rd == cond.r1) {
347 mMips->MOVE(R_cmp, cond.r1);
348 cond.r1 = R_cmp;
349 }
350 if (cond.type == CMP_COND && Rd == cond.r2) {
351 mMips->MOVE(R_cmp2, cond.r2);
352 cond.r2 = R_cmp2;
353 }
354}
355
356
357// interprets the addressing mode, and generates the common code
358// used by the majority of data-processing ops. Many MIPS instructions
359// have a register-based form and a different immediate form. See
360// opAND below for an example. (this could be inlined)
361//
362// this works with the imm(), reg_imm() methods above, which are directly
363// called by the GLLAssembler.
364// note: _signed parameter defaults to false (un-signed)
365// note: tmpReg parameter defaults to 1, MIPS register AT
366int ArmToMips64Assembler::dataProcAdrModes(int op, int& source, bool _signed, int tmpReg)
367{
368 if (op < AMODE_REG) {
369 source = op;
370 return SRC_REG;
371 } else if (op == AMODE_IMM) {
372 if ((!_signed && amode.value > 0xffff)
373 || (_signed && ((int)amode.value < -32768 || (int)amode.value > 32767) )) {
374 mMips->LUI(tmpReg, (amode.value >> 16));
375 if (amode.value & 0x0000ffff) {
376 mMips->ORI(tmpReg, tmpReg, (amode.value & 0x0000ffff));
377 }
378 source = tmpReg;
379 return SRC_REG;
380 } else {
381 source = amode.value;
382 return SRC_IMM;
383 }
384 } else if (op == AMODE_REG_IMM) {
385 switch (amode.stype) {
386 case LSL: mMips->SLL(tmpReg, amode.reg, amode.value); break;
387 case LSR: mMips->SRL(tmpReg, amode.reg, amode.value); break;
388 case ASR: mMips->SRA(tmpReg, amode.reg, amode.value); break;
389 case ROR: mMips->ROTR(tmpReg, amode.reg, amode.value); break;
390 }
391 source = tmpReg;
392 return SRC_REG;
393 } else { // adr mode RRX is not used in GGL Assembler at this time
394 // we are screwed, this should be exception, assert-fail or something
395 LOG_ALWAYS_FATAL("adr mode reg_rrx not yet implemented\n");
396 return SRC_ERROR;
397 }
398}
399
400
401void ArmToMips64Assembler::dataProcessing(int opcode, int cc,
402 int s, int Rd, int Rn, uint32_t Op2)
403{
404 int src; // src is modified by dataProcAdrModes() - passed as int&
405
406 if (cc != AL) {
407 protectConditionalOperands(Rd);
408 // the branch tests register(s) set by prev CMP or instr with 'S' bit set
409 // inverse the condition to jump past this conditional instruction
410 ArmToMips64Assembler::B(cc^1, cond.label[++cond.labelnum]);
411 } else {
412 mArmPC[mInum++] = pc(); // save starting PC for this instr
413 }
414
415 switch (opcode) {
416 case opAND:
417 if (dataProcAdrModes(Op2, src) == SRC_REG) {
418 mMips->AND(Rd, Rn, src);
419 } else { // adr mode was SRC_IMM
420 mMips->ANDI(Rd, Rn, src);
421 }
422 break;
423
424 case opADD:
425 // set "signed" to true for adr modes
426 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
427 mMips->ADDU(Rd, Rn, src);
428 } else { // adr mode was SRC_IMM
429 mMips->ADDIU(Rd, Rn, src);
430 }
431 break;
432
433 case opSUB:
434 // set "signed" to true for adr modes
435 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
436 mMips->SUBU(Rd, Rn, src);
437 } else { // adr mode was SRC_IMM
438 mMips->SUBIU(Rd, Rn, src);
439 }
440 break;
441
442 case opADD64:
443 // set "signed" to true for adr modes
444 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
445 mMips->DADDU(Rd, Rn, src);
446 } else { // adr mode was SRC_IMM
447 mMips->DADDIU(Rd, Rn, src);
448 }
449 break;
450
451 case opSUB64:
452 // set "signed" to true for adr modes
453 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
454 mMips->DSUBU(Rd, Rn, src);
455 } else { // adr mode was SRC_IMM
456 mMips->DSUBIU(Rd, Rn, src);
457 }
458 break;
459
460 case opEOR:
461 if (dataProcAdrModes(Op2, src) == SRC_REG) {
462 mMips->XOR(Rd, Rn, src);
463 } else { // adr mode was SRC_IMM
464 mMips->XORI(Rd, Rn, src);
465 }
466 break;
467
468 case opORR:
469 if (dataProcAdrModes(Op2, src) == SRC_REG) {
470 mMips->OR(Rd, Rn, src);
471 } else { // adr mode was SRC_IMM
472 mMips->ORI(Rd, Rn, src);
473 }
474 break;
475
476 case opBIC:
477 if (dataProcAdrModes(Op2, src) == SRC_IMM) {
478 // if we are 16-bit imnmediate, load to AT reg
479 mMips->ORI(R_at, 0, src);
480 src = R_at;
481 }
482 mMips->NOT(R_at, src);
483 mMips->AND(Rd, Rn, R_at);
484 break;
485
486 case opRSB:
487 if (dataProcAdrModes(Op2, src) == SRC_IMM) {
488 // if we are 16-bit imnmediate, load to AT reg
489 mMips->ORI(R_at, 0, src);
490 src = R_at;
491 }
492 mMips->SUBU(Rd, src, Rn); // subu with the parameters reversed
493 break;
494
495 case opMOV:
496 if (Op2 < AMODE_REG) { // op2 is reg # in this case
497 mMips->MOVE(Rd, Op2);
498 } else if (Op2 == AMODE_IMM) {
499 if (amode.value > 0xffff) {
500 mMips->LUI(Rd, (amode.value >> 16));
501 if (amode.value & 0x0000ffff) {
502 mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
503 }
504 } else {
505 mMips->ORI(Rd, 0, amode.value);
506 }
507 } else if (Op2 == AMODE_REG_IMM) {
508 switch (amode.stype) {
509 case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
510 case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
511 case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
512 case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
513 }
514 }
515 else {
516 // adr mode RRX is not used in GGL Assembler at this time
517 mMips->UNIMPL();
518 }
519 break;
520
521 case opMVN: // this is a 1's complement: NOT
522 if (Op2 < AMODE_REG) { // op2 is reg # in this case
523 mMips->NOR(Rd, Op2, 0); // NOT is NOR with 0
524 break;
525 } else if (Op2 == AMODE_IMM) {
526 if (amode.value > 0xffff) {
527 mMips->LUI(Rd, (amode.value >> 16));
528 if (amode.value & 0x0000ffff) {
529 mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
530 }
531 } else {
532 mMips->ORI(Rd, 0, amode.value);
533 }
534 } else if (Op2 == AMODE_REG_IMM) {
535 switch (amode.stype) {
536 case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
537 case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
538 case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
539 case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
540 }
541 }
542 else {
543 // adr mode RRX is not used in GGL Assembler at this time
544 mMips->UNIMPL();
545 }
546 mMips->NOR(Rd, Rd, 0); // NOT is NOR with 0
547 break;
548
549 case opCMP:
550 // Either operand of a CMP instr could get overwritten by a subsequent
551 // conditional instruction, which is ok, _UNLESS_ there is a _second_
552 // conditional instruction. Under MIPS, this requires doing the comparison
553 // again (SLT), and the original operands must be available. (and this
554 // pattern of multiple conditional instructions from same CMP _is_ used
555 // in GGL-Assembler)
556 //
557 // For now, if a conditional instr overwrites the operands, we will
558 // move them to dedicated temp regs. This is ugly, and inefficient,
559 // and should be optimized.
560 //
561 // WARNING: making an _Assumption_ that CMP operand regs will NOT be
562 // trashed by intervening NON-conditional instructions. In the general
563 // case this is legal, but it is NOT currently done in GGL-Assembler.
564
565 cond.type = CMP_COND;
566 cond.r1 = Rn;
567 if (dataProcAdrModes(Op2, src, false, R_cmp2) == SRC_REG) {
568 cond.r2 = src;
569 } else { // adr mode was SRC_IMM
570 mMips->ORI(R_cmp2, R_zero, src);
571 cond.r2 = R_cmp2;
572 }
573
574 break;
575
576
577 case opTST:
578 case opTEQ:
579 case opCMN:
580 case opADC:
581 case opSBC:
582 case opRSC:
583 mMips->UNIMPL(); // currently unused in GGL Assembler code
584 break;
585 }
586
587 if (cc != AL) {
588 mMips->label(cond.label[cond.labelnum]);
589 }
590 if (s && opcode != opCMP) {
591 cond.type = SBIT_COND;
592 cond.r1 = Rd;
593 }
594}
595
596
597
598#if 0
599#pragma mark -
600#pragma mark Multiply...
601#endif
602
603// multiply, accumulate
604void ArmToMips64Assembler::MLA(int cc, int s,
605 int Rd, int Rm, int Rs, int Rn) {
606
607 //ALOGW("MLA");
608 mArmPC[mInum++] = pc(); // save starting PC for this instr
609
610 mMips->MUL(R_at, Rm, Rs);
611 mMips->ADDU(Rd, R_at, Rn);
612 if (s) {
613 cond.type = SBIT_COND;
614 cond.r1 = Rd;
615 }
616}
617
618void ArmToMips64Assembler::MUL(int cc, int s,
619 int Rd, int Rm, int Rs) {
620 mArmPC[mInum++] = pc();
621 mMips->MUL(Rd, Rm, Rs);
622 if (s) {
623 cond.type = SBIT_COND;
624 cond.r1 = Rd;
625 }
626}
627
628void ArmToMips64Assembler::UMULL(int cc, int s,
629 int RdLo, int RdHi, int Rm, int Rs) {
630 mArmPC[mInum++] = pc();
631 mMips->MUH(RdHi, Rm, Rs);
632 mMips->MUL(RdLo, Rm, Rs);
633
634 if (s) {
635 cond.type = SBIT_COND;
636 cond.r1 = RdHi; // BUG...
637 LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
638 }
639}
640
641void ArmToMips64Assembler::UMUAL(int cc, int s,
642 int RdLo, int RdHi, int Rm, int Rs) {
643 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
644 "UMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
645 // *mPC++ = (cc<<28) | (1<<23) | (1<<21) | (s<<20) |
646 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
647 mArmPC[mInum++] = pc();
648 mMips->NOP2();
649 NOT_IMPLEMENTED();
650 if (s) {
651 cond.type = SBIT_COND;
652 cond.r1 = RdHi; // BUG...
653 LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
654 }
655}
656
657void ArmToMips64Assembler::SMULL(int cc, int s,
658 int RdLo, int RdHi, int Rm, int Rs) {
659 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
660 "SMULL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
661 // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (s<<20) |
662 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
663 mArmPC[mInum++] = pc();
664 mMips->NOP2();
665 NOT_IMPLEMENTED();
666 if (s) {
667 cond.type = SBIT_COND;
668 cond.r1 = RdHi; // BUG...
669 LOG_ALWAYS_FATAL("Condition on SMULL must be on 64-bit result\n");
670 }
671}
672void ArmToMips64Assembler::SMUAL(int cc, int s,
673 int RdLo, int RdHi, int Rm, int Rs) {
674 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
675 "SMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
676 // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (1<<21) | (s<<20) |
677 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
678 mArmPC[mInum++] = pc();
679 mMips->NOP2();
680 NOT_IMPLEMENTED();
681 if (s) {
682 cond.type = SBIT_COND;
683 cond.r1 = RdHi; // BUG...
684 LOG_ALWAYS_FATAL("Condition on SMUAL must be on 64-bit result\n");
685 }
686}
687
688
689
690#if 0
691#pragma mark -
692#pragma mark Branches...
693#endif
694
695// branches...
696
697void ArmToMips64Assembler::B(int cc, const char* label)
698{
699 mArmPC[mInum++] = pc();
700 if (cond.type == SBIT_COND) { cond.r2 = R_zero; }
701
702 switch(cc) {
703 case EQ: mMips->BEQ(cond.r1, cond.r2, label); break;
704 case NE: mMips->BNE(cond.r1, cond.r2, label); break;
705 case HS: mMips->BGEU(cond.r1, cond.r2, label); break;
706 case LO: mMips->BLTU(cond.r1, cond.r2, label); break;
707 case MI: mMips->BLT(cond.r1, cond.r2, label); break;
708 case PL: mMips->BGE(cond.r1, cond.r2, label); break;
709
710 case HI: mMips->BGTU(cond.r1, cond.r2, label); break;
711 case LS: mMips->BLEU(cond.r1, cond.r2, label); break;
712 case GE: mMips->BGE(cond.r1, cond.r2, label); break;
713 case LT: mMips->BLT(cond.r1, cond.r2, label); break;
714 case GT: mMips->BGT(cond.r1, cond.r2, label); break;
715 case LE: mMips->BLE(cond.r1, cond.r2, label); break;
716 case AL: mMips->B(label); break;
717 case NV: /* B Never - no instruction */ break;
718
719 case VS:
720 case VC:
721 default:
722 LOG_ALWAYS_FATAL("Unsupported cc: %02x\n", cc);
723 break;
724 }
725}
726
727void ArmToMips64Assembler::BL(int cc, const char* label)
728{
729 LOG_ALWAYS_FATAL("branch-and-link not supported yet\n");
730 mArmPC[mInum++] = pc();
731}
732
733// no use for Branches with integer PC, but they're in the Interface class ....
734void ArmToMips64Assembler::B(int cc, uint32_t* to_pc)
735{
736 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
737 mArmPC[mInum++] = pc();
738}
739
740void ArmToMips64Assembler::BL(int cc, uint32_t* to_pc)
741{
742 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
743 mArmPC[mInum++] = pc();
744}
745
746void ArmToMips64Assembler::BX(int cc, int Rn)
747{
748 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
749 mArmPC[mInum++] = pc();
750}
751
752
753
754#if 0
755#pragma mark -
756#pragma mark Data Transfer...
757#endif
758
759// data transfer...
760void ArmToMips64Assembler::LDR(int cc, int Rd, int Rn, uint32_t offset)
761{
762 mArmPC[mInum++] = pc();
763 // work-around for ARM default address mode of immed12_pre(0)
764 if (offset > AMODE_UNSUPPORTED) offset = 0;
765 switch (offset) {
766 case 0:
767 amode.value = 0;
768 amode.writeback = 0;
769 // fall thru to next case ....
770 case AMODE_IMM_12_PRE:
771 if (Rn == ARMAssemblerInterface::SP) {
772 Rn = R_sp; // convert LDR via Arm SP to LW via Mips SP
773 }
774 mMips->LW(Rd, Rn, amode.value);
775 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
776 mMips->DADDIU(Rn, Rn, amode.value);
777 }
778 break;
779 case AMODE_IMM_12_POST:
780 if (Rn == ARMAssemblerInterface::SP) {
781 Rn = R_sp; // convert STR thru Arm SP to STR thru Mips SP
782 }
783 mMips->LW(Rd, Rn, 0);
784 mMips->DADDIU(Rn, Rn, amode.value);
785 break;
786 case AMODE_REG_SCALE_PRE:
787 // we only support simple base + index, no advanced modes for this one yet
788 mMips->DADDU(R_at, Rn, amode.reg);
789 mMips->LW(Rd, R_at, 0);
790 break;
791 }
792}
793
794void ArmToMips64Assembler::LDRB(int cc, int Rd, int Rn, uint32_t offset)
795{
796 mArmPC[mInum++] = pc();
797 // work-around for ARM default address mode of immed12_pre(0)
798 if (offset > AMODE_UNSUPPORTED) offset = 0;
799 switch (offset) {
800 case 0:
801 amode.value = 0;
802 amode.writeback = 0;
803 // fall thru to next case ....
804 case AMODE_IMM_12_PRE:
805 mMips->LBU(Rd, Rn, amode.value);
806 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
807 mMips->DADDIU(Rn, Rn, amode.value);
808 }
809 break;
810 case AMODE_IMM_12_POST:
811 mMips->LBU(Rd, Rn, 0);
812 mMips->DADDIU(Rn, Rn, amode.value);
813 break;
814 case AMODE_REG_SCALE_PRE:
815 // we only support simple base + index, no advanced modes for this one yet
816 mMips->DADDU(R_at, Rn, amode.reg);
817 mMips->LBU(Rd, R_at, 0);
818 break;
819 }
820
821}
822
823void ArmToMips64Assembler::STR(int cc, int Rd, int Rn, uint32_t offset)
824{
825 mArmPC[mInum++] = pc();
826 // work-around for ARM default address mode of immed12_pre(0)
827 if (offset > AMODE_UNSUPPORTED) offset = 0;
828 switch (offset) {
829 case 0:
830 amode.value = 0;
831 amode.writeback = 0;
832 // fall thru to next case ....
833 case AMODE_IMM_12_PRE:
834 if (Rn == ARMAssemblerInterface::SP) {
835 Rn = R_sp; // convert STR thru Arm SP to SW thru Mips SP
836 }
837 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
838 // If we will writeback, then update the index reg, then store.
839 // This correctly handles stack-push case.
840 mMips->DADDIU(Rn, Rn, amode.value);
841 mMips->SW(Rd, Rn, 0);
842 } else {
843 // No writeback so store offset by value
844 mMips->SW(Rd, Rn, amode.value);
845 }
846 break;
847 case AMODE_IMM_12_POST:
848 mMips->SW(Rd, Rn, 0);
849 mMips->DADDIU(Rn, Rn, amode.value); // post index always writes back
850 break;
851 case AMODE_REG_SCALE_PRE:
852 // we only support simple base + index, no advanced modes for this one yet
853 mMips->DADDU(R_at, Rn, amode.reg);
854 mMips->SW(Rd, R_at, 0);
855 break;
856 }
857}
858
859void ArmToMips64Assembler::STRB(int cc, int Rd, int Rn, uint32_t offset)
860{
861 mArmPC[mInum++] = pc();
862 // work-around for ARM default address mode of immed12_pre(0)
863 if (offset > AMODE_UNSUPPORTED) offset = 0;
864 switch (offset) {
865 case 0:
866 amode.value = 0;
867 amode.writeback = 0;
868 // fall thru to next case ....
869 case AMODE_IMM_12_PRE:
870 mMips->SB(Rd, Rn, amode.value);
871 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
872 mMips->DADDIU(Rn, Rn, amode.value);
873 }
874 break;
875 case AMODE_IMM_12_POST:
876 mMips->SB(Rd, Rn, 0);
877 mMips->DADDIU(Rn, Rn, amode.value);
878 break;
879 case AMODE_REG_SCALE_PRE:
880 // we only support simple base + index, no advanced modes for this one yet
881 mMips->DADDU(R_at, Rn, amode.reg);
882 mMips->SB(Rd, R_at, 0);
883 break;
884 }
885}
886
887void ArmToMips64Assembler::LDRH(int cc, int Rd, int Rn, uint32_t offset)
888{
889 mArmPC[mInum++] = pc();
890 // work-around for ARM default address mode of immed8_pre(0)
891 if (offset > AMODE_UNSUPPORTED) offset = 0;
892 switch (offset) {
893 case 0:
894 amode.value = 0;
895 // fall thru to next case ....
896 case AMODE_IMM_8_PRE: // no support yet for writeback
897 mMips->LHU(Rd, Rn, amode.value);
898 break;
899 case AMODE_IMM_8_POST:
900 mMips->LHU(Rd, Rn, 0);
901 mMips->DADDIU(Rn, Rn, amode.value);
902 break;
903 case AMODE_REG_PRE:
904 // we only support simple base +/- index
905 if (amode.reg >= 0) {
906 mMips->DADDU(R_at, Rn, amode.reg);
907 } else {
908 mMips->DSUBU(R_at, Rn, abs(amode.reg));
909 }
910 mMips->LHU(Rd, R_at, 0);
911 break;
912 }
913}
914
915void ArmToMips64Assembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset)
916{
917 mArmPC[mInum++] = pc();
918 mMips->NOP2();
919 NOT_IMPLEMENTED();
920}
921
922void ArmToMips64Assembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset)
923{
924 mArmPC[mInum++] = pc();
925 mMips->NOP2();
926 NOT_IMPLEMENTED();
927}
928
929void ArmToMips64Assembler::STRH(int cc, int Rd, int Rn, uint32_t offset)
930{
931 mArmPC[mInum++] = pc();
932 // work-around for ARM default address mode of immed8_pre(0)
933 if (offset > AMODE_UNSUPPORTED) offset = 0;
934 switch (offset) {
935 case 0:
936 amode.value = 0;
937 // fall thru to next case ....
938 case AMODE_IMM_8_PRE: // no support yet for writeback
939 mMips->SH(Rd, Rn, amode.value);
940 break;
941 case AMODE_IMM_8_POST:
942 mMips->SH(Rd, Rn, 0);
943 mMips->DADDIU(Rn, Rn, amode.value);
944 break;
945 case AMODE_REG_PRE:
946 // we only support simple base +/- index
947 if (amode.reg >= 0) {
948 mMips->DADDU(R_at, Rn, amode.reg);
949 } else {
950 mMips->DSUBU(R_at, Rn, abs(amode.reg));
951 }
952 mMips->SH(Rd, R_at, 0);
953 break;
954 }
955}
956
957
958
959#if 0
960#pragma mark -
961#pragma mark Block Data Transfer...
962#endif
963
964// block data transfer...
965void ArmToMips64Assembler::LDM(int cc, int dir,
966 int Rn, int W, uint32_t reg_list)
967{ // ED FD EA FA IB IA DB DA
968 // const uint8_t P[8] = { 1, 0, 1, 0, 1, 0, 1, 0 };
969 // const uint8_t U[8] = { 1, 1, 0, 0, 1, 1, 0, 0 };
970 // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
971 // (uint32_t(U[dir])<<23) | (1<<20) | (W<<21) | (Rn<<16) | reg_list;
972 mArmPC[mInum++] = pc();
973 mMips->NOP2();
974 NOT_IMPLEMENTED();
975}
976
977void ArmToMips64Assembler::STM(int cc, int dir,
978 int Rn, int W, uint32_t reg_list)
979{ // FA EA FD ED IB IA DB DA
980 // const uint8_t P[8] = { 0, 1, 0, 1, 1, 0, 1, 0 };
981 // const uint8_t U[8] = { 0, 0, 1, 1, 1, 1, 0, 0 };
982 // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
983 // (uint32_t(U[dir])<<23) | (0<<20) | (W<<21) | (Rn<<16) | reg_list;
984 mArmPC[mInum++] = pc();
985 mMips->NOP2();
986 NOT_IMPLEMENTED();
987}
988
989
990
991#if 0
992#pragma mark -
993#pragma mark Special...
994#endif
995
996// special...
997void ArmToMips64Assembler::SWP(int cc, int Rn, int Rd, int Rm) {
998 // *mPC++ = (cc<<28) | (2<<23) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
999 mArmPC[mInum++] = pc();
1000 mMips->NOP2();
1001 NOT_IMPLEMENTED();
1002}
1003
1004void ArmToMips64Assembler::SWPB(int cc, int Rn, int Rd, int Rm) {
1005 // *mPC++ = (cc<<28) | (2<<23) | (1<<22) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
1006 mArmPC[mInum++] = pc();
1007 mMips->NOP2();
1008 NOT_IMPLEMENTED();
1009}
1010
1011void ArmToMips64Assembler::SWI(int cc, uint32_t comment) {
1012 // *mPC++ = (cc<<28) | (0xF<<24) | comment;
1013 mArmPC[mInum++] = pc();
1014 mMips->NOP2();
1015 NOT_IMPLEMENTED();
1016}
1017
1018
1019#if 0
1020#pragma mark -
1021#pragma mark DSP instructions...
1022#endif
1023
1024// DSP instructions...
1025void ArmToMips64Assembler::PLD(int Rn, uint32_t offset) {
1026 LOG_ALWAYS_FATAL_IF(!((offset&(1<<24)) && !(offset&(1<<21))),
1027 "PLD only P=1, W=0");
1028 // *mPC++ = 0xF550F000 | (Rn<<16) | offset;
1029 mArmPC[mInum++] = pc();
1030 mMips->NOP2();
1031 NOT_IMPLEMENTED();
1032}
1033
1034void ArmToMips64Assembler::CLZ(int cc, int Rd, int Rm)
1035{
1036 mArmPC[mInum++] = pc();
1037 mMips->CLZ(Rd, Rm);
1038}
1039
1040void ArmToMips64Assembler::QADD(int cc, int Rd, int Rm, int Rn)
1041{
1042 // *mPC++ = (cc<<28) | 0x1000050 | (Rn<<16) | (Rd<<12) | Rm;
1043 mArmPC[mInum++] = pc();
1044 mMips->NOP2();
1045 NOT_IMPLEMENTED();
1046}
1047
1048void ArmToMips64Assembler::QDADD(int cc, int Rd, int Rm, int Rn)
1049{
1050 // *mPC++ = (cc<<28) | 0x1400050 | (Rn<<16) | (Rd<<12) | Rm;
1051 mArmPC[mInum++] = pc();
1052 mMips->NOP2();
1053 NOT_IMPLEMENTED();
1054}
1055
1056void ArmToMips64Assembler::QSUB(int cc, int Rd, int Rm, int Rn)
1057{
1058 // *mPC++ = (cc<<28) | 0x1200050 | (Rn<<16) | (Rd<<12) | Rm;
1059 mArmPC[mInum++] = pc();
1060 mMips->NOP2();
1061 NOT_IMPLEMENTED();
1062}
1063
1064void ArmToMips64Assembler::QDSUB(int cc, int Rd, int Rm, int Rn)
1065{
1066 // *mPC++ = (cc<<28) | 0x1600050 | (Rn<<16) | (Rd<<12) | Rm;
1067 mArmPC[mInum++] = pc();
1068 mMips->NOP2();
1069 NOT_IMPLEMENTED();
1070}
1071
1072// 16 x 16 signed multiply (like SMLAxx without the accumulate)
1073void ArmToMips64Assembler::SMUL(int cc, int xy,
1074 int Rd, int Rm, int Rs)
1075{
1076 mArmPC[mInum++] = pc();
1077
1078 // the 16 bits may be in the top or bottom half of 32-bit source reg,
1079 // as defined by the codes BB, BT, TB, TT (compressed param xy)
1080 // where x corresponds to Rm and y to Rs
1081
1082 // select half-reg for Rm
1083 if (xy & xyTB) {
1084 // use top 16-bits
1085 mMips->SRA(R_at, Rm, 16);
1086 } else {
1087 // use bottom 16, but sign-extend to 32
1088 mMips->SEH(R_at, Rm);
1089 }
1090 // select half-reg for Rs
1091 if (xy & xyBT) {
1092 // use top 16-bits
1093 mMips->SRA(R_at2, Rs, 16);
1094 } else {
1095 // use bottom 16, but sign-extend to 32
1096 mMips->SEH(R_at2, Rs);
1097 }
1098 mMips->MUL(Rd, R_at, R_at2);
1099}
1100
1101// signed 32b x 16b multiple, save top 32-bits of 48-bit result
1102void ArmToMips64Assembler::SMULW(int cc, int y,
1103 int Rd, int Rm, int Rs)
1104{
1105 mArmPC[mInum++] = pc();
1106
1107 // the selector yT or yB refers to reg Rs
1108 if (y & yT) {
1109 // zero the bottom 16-bits, with 2 shifts, it can affect result
1110 mMips->SRL(R_at, Rs, 16);
1111 mMips->SLL(R_at, R_at, 16);
1112
1113 } else {
1114 // move low 16-bit half, to high half
1115 mMips->SLL(R_at, Rs, 16);
1116 }
1117 mMips->MUH(Rd, Rm, R_at);
1118}
1119
1120// 16 x 16 signed multiply, accumulate: Rd = Rm{16} * Rs{16} + Rn
1121void ArmToMips64Assembler::SMLA(int cc, int xy,
1122 int Rd, int Rm, int Rs, int Rn)
1123{
1124 mArmPC[mInum++] = pc();
1125
1126 // the 16 bits may be in the top or bottom half of 32-bit source reg,
1127 // as defined by the codes BB, BT, TB, TT (compressed param xy)
1128 // where x corresponds to Rm and y to Rs
1129
1130 // select half-reg for Rm
1131 if (xy & xyTB) {
1132 // use top 16-bits
1133 mMips->SRA(R_at, Rm, 16);
1134 } else {
1135 // use bottom 16, but sign-extend to 32
1136 mMips->SEH(R_at, Rm);
1137 }
1138 // select half-reg for Rs
1139 if (xy & xyBT) {
1140 // use top 16-bits
1141 mMips->SRA(R_at2, Rs, 16);
1142 } else {
1143 // use bottom 16, but sign-extend to 32
1144 mMips->SEH(R_at2, Rs);
1145 }
1146
1147 mMips->MUL(R_at, R_at, R_at2);
1148 mMips->ADDU(Rd, R_at, Rn);
1149}
1150
1151void ArmToMips64Assembler::SMLAL(int cc, int xy,
1152 int RdHi, int RdLo, int Rs, int Rm)
1153{
1154 // *mPC++ = (cc<<28) | 0x1400080 | (RdHi<<16) | (RdLo<<12) | (Rs<<8) | (xy<<4) | Rm;
1155 mArmPC[mInum++] = pc();
1156 mMips->NOP2();
1157 NOT_IMPLEMENTED();
1158}
1159
1160void ArmToMips64Assembler::SMLAW(int cc, int y,
1161 int Rd, int Rm, int Rs, int Rn)
1162{
1163 // *mPC++ = (cc<<28) | 0x1200080 | (Rd<<16) | (Rn<<12) | (Rs<<8) | (y<<4) | Rm;
1164 mArmPC[mInum++] = pc();
1165 mMips->NOP2();
1166 NOT_IMPLEMENTED();
1167}
1168
1169// used by ARMv6 version of GGLAssembler::filter32
1170void ArmToMips64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
1171{
1172 mArmPC[mInum++] = pc();
1173
1174 //Rd[31:16] := ZeroExtend((Rm ROR (8 * sh))[23:16]),
1175 //Rd[15:0] := ZeroExtend((Rm ROR (8 * sh))[7:0]). sh 0-3.
1176
1177 mMips->ROTR(R_at2, Rm, rotate * 8);
1178 mMips->LUI(R_at, 0xFF);
1179 mMips->ORI(R_at, R_at, 0xFF);
1180 mMips->AND(Rd, R_at2, R_at);
1181}
1182
1183void ArmToMips64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
1184{
1185 /* Placeholder for UBFX */
1186 mArmPC[mInum++] = pc();
1187
1188 mMips->NOP2();
1189 NOT_IMPLEMENTED();
1190}
1191
1192// ----------------------------------------------------------------------------
1193// Address Processing...
1194// ----------------------------------------------------------------------------
1195
1196void ArmToMips64Assembler::ADDR_ADD(int cc,
1197 int s, int Rd, int Rn, uint32_t Op2)
1198{
1199// if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1200// if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
1201 dataProcessing(opADD64, cc, s, Rd, Rn, Op2);
1202}
1203
1204void ArmToMips64Assembler::ADDR_SUB(int cc,
1205 int s, int Rd, int Rn, uint32_t Op2)
1206{
1207// if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1208// if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
1209 dataProcessing(opSUB64, cc, s, Rd, Rn, Op2);
1210}
1211
1212void ArmToMips64Assembler::ADDR_LDR(int cc, int Rd, int Rn, uint32_t offset) {
1213 mArmPC[mInum++] = pc();
1214 // work-around for ARM default address mode of immed12_pre(0)
1215 if (offset > AMODE_UNSUPPORTED) offset = 0;
1216 switch (offset) {
1217 case 0:
1218 amode.value = 0;
1219 amode.writeback = 0;
1220 // fall thru to next case ....
1221 case AMODE_IMM_12_PRE:
1222 if (Rn == ARMAssemblerInterface::SP) {
1223 Rn = R_sp; // convert LDR via Arm SP to LW via Mips SP
1224 }
1225 mMips->LD(Rd, Rn, amode.value);
1226 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
1227 mMips->DADDIU(Rn, Rn, amode.value);
1228 }
1229 break;
1230 case AMODE_IMM_12_POST:
1231 if (Rn == ARMAssemblerInterface::SP) {
1232 Rn = R_sp; // convert STR thru Arm SP to STR thru Mips SP
1233 }
1234 mMips->LD(Rd, Rn, 0);
1235 mMips->DADDIU(Rn, Rn, amode.value);
1236 break;
1237 case AMODE_REG_SCALE_PRE:
1238 // we only support simple base + index, no advanced modes for this one yet
1239 mMips->DADDU(R_at, Rn, amode.reg);
1240 mMips->LD(Rd, R_at, 0);
1241 break;
1242 }
1243}
1244
1245void ArmToMips64Assembler::ADDR_STR(int cc, int Rd, int Rn, uint32_t offset) {
1246 mArmPC[mInum++] = pc();
1247 // work-around for ARM default address mode of immed12_pre(0)
1248 if (offset > AMODE_UNSUPPORTED) offset = 0;
1249 switch (offset) {
1250 case 0:
1251 amode.value = 0;
1252 amode.writeback = 0;
1253 // fall thru to next case ....
1254 case AMODE_IMM_12_PRE:
1255 if (Rn == ARMAssemblerInterface::SP) {
1256 Rn = R_sp; // convert STR thru Arm SP to SW thru Mips SP
1257 }
1258 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
1259 // If we will writeback, then update the index reg, then store.
1260 // This correctly handles stack-push case.
1261 mMips->DADDIU(Rn, Rn, amode.value);
1262 mMips->SD(Rd, Rn, 0);
1263 } else {
1264 // No writeback so store offset by value
1265 mMips->SD(Rd, Rn, amode.value);
1266 }
1267 break;
1268 case AMODE_IMM_12_POST:
1269 mMips->SD(Rd, Rn, 0);
1270 mMips->DADDIU(Rn, Rn, amode.value); // post index always writes back
1271 break;
1272 case AMODE_REG_SCALE_PRE:
1273 // we only support simple base + index, no advanced modes for this one yet
1274 mMips->DADDU(R_at, Rn, amode.reg);
1275 mMips->SD(Rd, R_at, 0);
1276 break;
1277 }
1278}
1279
1280#if 0
1281#pragma mark -
1282#pragma mark MIPS Assembler...
1283#endif
1284
1285
1286//**************************************************************************
1287//**************************************************************************
1288//**************************************************************************
1289
1290
1291/* MIPS64 assembler
1292** this is a subset of mips64r6, targeted specifically at ARM instruction
1293** replacement in the pixelflinger/codeflinger code.
1294**
1295** This class is extended from MIPSAssembler class and overrides only
1296** MIPS64r6 specific stuff.
1297*/
1298
1299MIPS64Assembler::MIPS64Assembler(const sp<Assembly>& assembly, ArmToMips64Assembler *parent)
1300 : mParent(parent),
1301 MIPSAssembler::MIPSAssembler(assembly, NULL)
1302{
1303}
1304
1305MIPS64Assembler::MIPS64Assembler(void* assembly, ArmToMips64Assembler *parent)
1306 : mParent(parent),
1307 MIPSAssembler::MIPSAssembler(NULL, NULL)
1308{
1309 mBase = mPC = (uint32_t *)assembly;
1310}
1311
1312MIPS64Assembler::~MIPS64Assembler()
1313{
1314}
1315
1316void MIPS64Assembler::reset()
1317{
1318 if (mAssembly != NULL) {
1319 mBase = mPC = (uint32_t *)mAssembly->base();
1320 } else {
1321 mPC = mBase = base();
1322 }
1323 mBranchTargets.clear();
1324 mLabels.clear();
1325 mLabelsInverseMapping.clear();
1326 mComments.clear();
1327}
1328
1329
1330void MIPS64Assembler::disassemble(const char* name)
1331{
1332 char di_buf[140];
1333
1334 bool arm_disasm_fmt = (mParent->mArmDisassemblyBuffer == NULL) ? false : true;
1335
1336 typedef char dstr[40];
1337 dstr *lines = (dstr *)mParent->mArmDisassemblyBuffer;
1338
1339 if (mParent->mArmDisassemblyBuffer != NULL) {
1340 for (int i=0; i<mParent->mArmInstrCount; ++i) {
1341 string_detab(lines[i]);
1342 }
1343 }
1344
1345 // iArm is an index to Arm instructions 1...n for this assembly sequence
1346 // mArmPC[iArm] holds the value of the Mips-PC for the first MIPS
1347 // instruction corresponding to that Arm instruction number
1348
1349 int iArm = 0;
1350 size_t count = pc()-base();
1351 uint32_t* mipsPC = base();
1352
1353 while (count--) {
1354 ssize_t label = mLabelsInverseMapping.indexOfKey(mipsPC);
1355 if (label >= 0) {
1356 ALOGW("%s:\n", mLabelsInverseMapping.valueAt(label));
1357 }
1358 ssize_t comment = mComments.indexOfKey(mipsPC);
1359 if (comment >= 0) {
1360 ALOGW("; %s\n", mComments.valueAt(comment));
1361 }
1362 ::mips_disassem(mipsPC, di_buf, arm_disasm_fmt);
1363 string_detab(di_buf);
1364 string_pad(di_buf, 30);
1365 ALOGW("%08lx: %08x %s", uintptr_t(mipsPC), uint32_t(*mipsPC), di_buf);
1366 mipsPC++;
1367 }
1368}
1369
1370void MIPS64Assembler::fix_branches()
1371{
1372 // fixup all the branches
1373 size_t count = mBranchTargets.size();
1374 while (count--) {
1375 const branch_target_t& bt = mBranchTargets[count];
1376 uint32_t* target_pc = mLabels.valueFor(bt.label);
1377 LOG_ALWAYS_FATAL_IF(!target_pc,
1378 "error resolving branch targets, target_pc is null");
1379 int32_t offset = int32_t(target_pc - (bt.pc+1));
1380 *bt.pc |= offset & 0x00FFFF;
1381 }
1382}
1383
1384void MIPS64Assembler::DADDU(int Rd, int Rs, int Rt)
1385{
1386 *mPC++ = (spec_op<<OP_SHF) | (daddu_fn<<FUNC_SHF)
1387 | (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF);
1388}
1389
1390void MIPS64Assembler::DADDIU(int Rt, int Rs, int16_t imm)
1391{
1392 *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1393}
1394
1395void MIPS64Assembler::DSUBU(int Rd, int Rs, int Rt)
1396{
1397 *mPC++ = (spec_op<<OP_SHF) | (dsubu_fn<<FUNC_SHF) |
1398 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1399}
1400
1401void MIPS64Assembler::DSUBIU(int Rt, int Rs, int16_t imm) // really addiu(d, s, -j)
1402{
1403 *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | ((-imm) & MSK_16);
1404}
1405
1406void MIPS64Assembler::MUL(int Rd, int Rs, int Rt)
1407{
1408 *mPC++ = (spec_op<<OP_SHF) | (mul_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1409 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1410}
1411
1412void MIPS64Assembler::MUH(int Rd, int Rs, int Rt)
1413{
1414 *mPC++ = (spec_op<<OP_SHF) | (muh_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1415 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1416}
1417
1418void MIPS64Assembler::CLO(int Rd, int Rs)
1419{
1420 *mPC++ = (spec_op<<OP_SHF) | (17<<FUNC_SHF) |
1421 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1422}
1423
1424void MIPS64Assembler::CLZ(int Rd, int Rs)
1425{
1426 *mPC++ = (spec_op<<OP_SHF) | (16<<FUNC_SHF) |
1427 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1428}
1429
1430void MIPS64Assembler::LD(int Rt, int Rbase, int16_t offset)
1431{
1432 *mPC++ = (ld_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1433}
1434
1435void MIPS64Assembler::SD(int Rt, int Rbase, int16_t offset)
1436{
1437 *mPC++ = (sd_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1438}
1439
1440void MIPS64Assembler::LUI(int Rt, int16_t offset)
1441{
1442 *mPC++ = (aui_op<<OP_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1443}
1444
1445
1446void MIPS64Assembler::JR(int Rs)
1447{
1448 *mPC++ = (spec_op<<OP_SHF) | (Rs<<RS_SHF) | (jalr_fn << FUNC_SHF);
1449 MIPS64Assembler::NOP();
1450}
1451
1452}; // namespace android: