1 /*
2 * linux/arch/arm/boot/compressed/head.S
3 *
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/linkage.h>
12 #include <asm/assembler.h>
14 /*
15 * Debugging stuff
16 *
17 * Note that these macros must not contain any code which is not
18 * 100% relocatable. Any attempt to do so will result in a crash.
19 * Please select one of the following when turning on debugging.
20 */
21 #ifdef DEBUG
23 #if defined(CONFIG_DEBUG_ICEDCC)
25 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
26 .macro loadsp, rb, tmp
27 .endm
28 .macro writeb, ch, rb
29 mcr p14, 0, \ch, c0, c5, 0
30 .endm
31 #elif defined(CONFIG_CPU_XSCALE)
32 .macro loadsp, rb, tmp
33 .endm
34 .macro writeb, ch, rb
35 mcr p14, 0, \ch, c8, c0, 0
36 .endm
37 #else
38 .macro loadsp, rb, tmp
39 .endm
40 .macro writeb, ch, rb
41 mcr p14, 0, \ch, c1, c0, 0
42 .endm
43 #endif
45 #else
47 #include CONFIG_DEBUG_LL_INCLUDE
49 .macro writeb, ch, rb
50 senduart \ch, \rb
51 .endm
53 #if defined(CONFIG_ARCH_SA1100)
54 .macro loadsp, rb, tmp
55 mov \rb, #0x80000000 @ physical base address
56 #ifdef CONFIG_DEBUG_LL_SER3
57 add \rb, \rb, #0x00050000 @ Ser3
58 #else
59 add \rb, \rb, #0x00010000 @ Ser1
60 #endif
61 .endm
62 #elif defined(CONFIG_ARCH_S3C24XX)
63 .macro loadsp, rb, tmp
64 mov \rb, #0x50000000
65 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
66 .endm
67 #else
68 .macro loadsp, rb, tmp
69 addruart \rb, \tmp
70 .endm
71 #endif
72 #endif
73 #endif
75 .macro kputc,val
76 mov r0, \val
77 bl putc
78 .endm
80 .macro kphex,val,len
81 mov r0, \val
82 mov r1, #\len
83 bl phex
84 .endm
86 .macro debug_reloc_start
87 #ifdef DEBUG
88 kputc #'\n'
89 kphex r6, 8 /* processor id */
90 kputc #':'
91 kphex r7, 8 /* architecture id */
92 #ifdef CONFIG_CPU_CP15
93 kputc #':'
94 mrc p15, 0, r0, c1, c0
95 kphex r0, 8 /* control reg */
96 #endif
97 kputc #'\n'
98 kphex r5, 8 /* decompressed kernel start */
99 kputc #'-'
100 kphex r9, 8 /* decompressed kernel end */
101 kputc #'>'
102 kphex r4, 8 /* kernel execution address */
103 kputc #'\n'
104 #endif
105 .endm
107 .macro debug_reloc_end
108 #ifdef DEBUG
109 kphex r5, 8 /* end of kernel */
110 kputc #'\n'
111 mov r0, r4
112 bl memdump /* dump 256 bytes at start of kernel */
113 #endif
114 .endm
116 .section ".start", #alloc, #execinstr
117 /*
118 * sort out different calling conventions
119 */
120 .align
121 .arm @ Always enter in ARM state
122 start:
123 .type start,#function
124 .rept 7
125 mov r0, r0
126 .endr
127 ARM( mov r0, r0 )
128 ARM( b 1f )
129 THUMB( adr r12, BSYM(1f) )
130 THUMB( bx r12 )
132 .word 0x016f2818 @ Magic numbers to help the loader
133 .word start @ absolute load/run zImage address
134 .word _edata @ zImage end address
135 THUMB( .thumb )
136 1:
137 mrs r9, cpsr
138 #ifdef CONFIG_ARM_VIRT_EXT
139 bl __hyp_stub_install @ get into SVC mode, reversibly
140 #endif
141 mov r7, r1 @ save architecture ID
142 mov r8, r2 @ save atags pointer
144 #ifndef __ARM_ARCH_2__
145 /*
146 * Booting from Angel - need to enter SVC mode and disable
147 * FIQs/IRQs (numeric definitions from angel arm.h source).
148 * We only do this if we were in user mode on entry.
149 */
150 mrs r2, cpsr @ get current mode
151 tst r2, #3 @ not user?
152 bne not_angel
153 mov r0, #0x17 @ angel_SWIreason_EnterSVC
154 ARM( swi 0x123456 ) @ angel_SWI_ARM
155 THUMB( svc 0xab ) @ angel_SWI_THUMB
156 not_angel:
157 safe_svcmode_maskall r0
158 msr spsr_cxsf, r9 @ Save the CPU boot mode in
159 @ SPSR
160 #else
161 teqp pc, #0x0c000003 @ turn off interrupts
162 #endif
164 /*
165 * Note that some cache flushing and other stuff may
166 * be needed here - is there an Angel SWI call for this?
167 */
169 /*
170 * some architecture specific code can be inserted
171 * by the linker here, but it should preserve r7, r8, and r9.
172 */
174 .text
176 #ifdef CONFIG_AUTO_ZRELADDR
177 @ determine final kernel image address
178 mov r4, pc
179 and r4, r4, #0xf8000000
180 add r4, r4, #TEXT_OFFSET
181 #else
182 ldr r4, =zreladdr
183 #endif
185 bl cache_on
187 restart: adr r0, LC0
188 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
189 ldr sp, [r0, #28]
191 /*
192 * We might be running at a different address. We need
193 * to fix up various pointers.
194 */
195 sub r0, r0, r1 @ calculate the delta offset
196 add r6, r6, r0 @ _edata
197 add r10, r10, r0 @ inflated kernel size location
199 /*
200 * The kernel build system appends the size of the
201 * decompressed kernel at the end of the compressed data
202 * in little-endian form.
203 */
204 ldrb r9, [r10, #0]
205 ldrb lr, [r10, #1]
206 orr r9, r9, lr, lsl #8
207 ldrb lr, [r10, #2]
208 ldrb r10, [r10, #3]
209 orr r9, r9, lr, lsl #16
210 orr r9, r9, r10, lsl #24
212 #ifndef CONFIG_ZBOOT_ROM
213 /* malloc space is above the relocated stack (64k max) */
214 add sp, sp, r0
215 add r10, sp, #0x10000
216 #else
217 /*
218 * With ZBOOT_ROM the bss/stack is non relocatable,
219 * but someone could still run this code from RAM,
220 * in which case our reference is _edata.
221 */
222 mov r10, r6
223 #endif
225 mov r5, #0 @ init dtb size to 0
226 #ifdef CONFIG_ARM_APPENDED_DTB
227 /*
228 * r0 = delta
229 * r2 = BSS start
230 * r3 = BSS end
231 * r4 = final kernel address
232 * r5 = appended dtb size (still unknown)
233 * r6 = _edata
234 * r7 = architecture ID
235 * r8 = atags/device tree pointer
236 * r9 = size of decompressed image
237 * r10 = end of this image, including bss/stack/malloc space if non XIP
238 * r11 = GOT start
239 * r12 = GOT end
240 * sp = stack pointer
241 *
242 * if there are device trees (dtb) appended to zImage, advance r10 so that the
243 * dtb data will get relocated along with the kernel if necessary.
244 */
246 ldr lr, [r6, #0]
247 #ifndef __ARMEB__
248 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
249 #else
250 ldr r1, =0xd00dfeed
251 #endif
252 cmp lr, r1
253 bne dtb_check_done @ not found
255 #ifdef CONFIG_ARM_ATAG_DTB_COMPAT
256 /*
257 * OK... Let's do some funky business here.
258 * If we do have a DTB appended to zImage, and we do have
259 * an ATAG list around, we want the later to be translated
260 * and folded into the former here. To be on the safe side,
261 * let's temporarily move the stack away into the malloc
262 * area. No GOT fixup has occurred yet, but none of the
263 * code we're about to call uses any global variable.
264 */
265 add sp, sp, #0x10000
266 stmfd sp!, {r0-r3, ip, lr}
267 mov r0, r8
268 mov r1, r6
269 sub r2, sp, r6
270 bl atags_to_fdt
272 /*
273 * If returned value is 1, there is no ATAG at the location
274 * pointed by r8. Try the typical 0x100 offset from start
275 * of RAM and hope for the best.
276 */
277 cmp r0, #1
278 sub r0, r4, #TEXT_OFFSET
279 add r0, r0, #0x100
280 mov r1, r6
281 sub r2, sp, r6
282 bleq atags_to_fdt
284 ldmfd sp!, {r0-r3, ip, lr}
285 sub sp, sp, #0x10000
286 #endif
288 mov r8, r6 @ use the appended device tree
290 /*
291 * Make sure that the DTB doesn't end up in the final
292 * kernel's .bss area. To do so, we adjust the decompressed
293 * kernel size to compensate if that .bss size is larger
294 * than the relocated code.
295 */
296 ldr r5, =_kernel_bss_size
297 adr r1, wont_overwrite
298 sub r1, r6, r1
299 subs r1, r5, r1
300 addhi r9, r9, r1
302 /* Get the dtb's size */
303 ldr r5, [r6, #4]
304 #ifndef __ARMEB__
305 /* convert r5 (dtb size) to little endian */
306 eor r1, r5, r5, ror #16
307 bic r1, r1, #0x00ff0000
308 mov r5, r5, ror #8
309 eor r5, r5, r1, lsr #8
310 #endif
312 /* preserve 64-bit alignment */
313 add r5, r5, #7
314 bic r5, r5, #7
316 /* relocate some pointers past the appended dtb */
317 add r6, r6, r5
318 add r10, r10, r5
319 add sp, sp, r5
320 dtb_check_done:
321 #endif
323 /*
324 * Check to see if we will overwrite ourselves.
325 * r4 = final kernel address
326 * r9 = size of decompressed image
327 * r10 = end of this image, including bss/stack/malloc space if non XIP
328 * We basically want:
329 * r4 - 16k page directory >= r10 -> OK
330 * r4 + image length <= address of wont_overwrite -> OK
331 */
332 add r10, r10, #16384
333 cmp r4, r10
334 bhs wont_overwrite
335 add r10, r4, r9
336 adr r9, wont_overwrite
337 cmp r10, r9
338 bls wont_overwrite
340 /*
341 * Relocate ourselves past the end of the decompressed kernel.
342 * r6 = _edata
343 * r10 = end of the decompressed kernel
344 * Because we always copy ahead, we need to do it from the end and go
345 * backward in case the source and destination overlap.
346 */
347 /*
348 * Bump to the next 256-byte boundary with the size of
349 * the relocation code added. This avoids overwriting
350 * ourself when the offset is small.
351 */
352 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
353 bic r10, r10, #255
355 /* Get start of code we want to copy and align it down. */
356 adr r5, restart
357 bic r5, r5, #31
359 /* Relocate the hyp vector base if necessary */
360 #ifdef CONFIG_ARM_VIRT_EXT
361 mrs r0, spsr
362 and r0, r0, #MODE_MASK
363 cmp r0, #HYP_MODE
364 bne 1f
366 bl __hyp_get_vectors
367 sub r0, r0, r5
368 add r0, r0, r10
369 bl __hyp_set_vectors
370 1:
371 #endif
373 sub r9, r6, r5 @ size to copy
374 add r9, r9, #31 @ rounded up to a multiple
375 bic r9, r9, #31 @ ... of 32 bytes
376 add r6, r9, r5
377 add r9, r9, r10
379 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
380 cmp r6, r5
381 stmdb r9!, {r0 - r3, r10 - r12, lr}
382 bhi 1b
384 /* Preserve offset to relocated code. */
385 sub r6, r9, r6
387 #ifndef CONFIG_ZBOOT_ROM
388 /* cache_clean_flush may use the stack, so relocate it */
389 add sp, sp, r6
390 #endif
392 bl cache_clean_flush
394 adr r0, BSYM(restart)
395 add r0, r0, r6
396 mov pc, r0
398 wont_overwrite:
399 /*
400 * If delta is zero, we are running at the address we were linked at.
401 * r0 = delta
402 * r2 = BSS start
403 * r3 = BSS end
404 * r4 = kernel execution address
405 * r5 = appended dtb size (0 if not present)
406 * r7 = architecture ID
407 * r8 = atags pointer
408 * r11 = GOT start
409 * r12 = GOT end
410 * sp = stack pointer
411 */
412 orrs r1, r0, r5
413 beq not_relocated
415 add r11, r11, r0
416 add r12, r12, r0
418 #ifndef CONFIG_ZBOOT_ROM
419 /*
420 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
421 * we need to fix up pointers into the BSS region.
422 * Note that the stack pointer has already been fixed up.
423 */
424 add r2, r2, r0
425 add r3, r3, r0
427 /*
428 * Relocate all entries in the GOT table.
429 * Bump bss entries to _edata + dtb size
430 */
431 1: ldr r1, [r11, #0] @ relocate entries in the GOT
432 add r1, r1, r0 @ This fixes up C references
433 cmp r1, r2 @ if entry >= bss_start &&
434 cmphs r3, r1 @ bss_end > entry
435 addhi r1, r1, r5 @ entry += dtb size
436 str r1, [r11], #4 @ next entry
437 cmp r11, r12
438 blo 1b
440 /* bump our bss pointers too */
441 add r2, r2, r5
442 add r3, r3, r5
444 #else
446 /*
447 * Relocate entries in the GOT table. We only relocate
448 * the entries that are outside the (relocated) BSS region.
449 */
450 1: ldr r1, [r11, #0] @ relocate entries in the GOT
451 cmp r1, r2 @ entry < bss_start ||
452 cmphs r3, r1 @ _end < entry
453 addlo r1, r1, r0 @ table. This fixes up the
454 str r1, [r11], #4 @ C references.
455 cmp r11, r12
456 blo 1b
457 #endif
459 not_relocated: mov r0, #0
460 1: str r0, [r2], #4 @ clear bss
461 str r0, [r2], #4
462 str r0, [r2], #4
463 str r0, [r2], #4
464 cmp r2, r3
465 blo 1b
467 /*
468 * The C runtime environment should now be setup sufficiently.
469 * Set up some pointers, and start decompressing.
470 * r4 = kernel execution address
471 * r7 = architecture ID
472 * r8 = atags pointer
473 */
474 mov r0, r4
475 mov r1, sp @ malloc space above stack
476 add r2, sp, #0x10000 @ 64k max
477 mov r3, r7
478 bl decompress_kernel
479 bl cache_clean_flush
480 bl cache_off
481 mov r1, r7 @ restore architecture number
482 mov r2, r8 @ restore atags pointer
484 #ifdef CONFIG_ARM_VIRT_EXT
485 mrs r0, spsr @ Get saved CPU boot mode
486 and r0, r0, #MODE_MASK
487 cmp r0, #HYP_MODE @ if not booted in HYP mode...
488 bne __enter_kernel @ boot kernel directly
490 adr r12, .L__hyp_reentry_vectors_offset
491 ldr r0, [r12]
492 add r0, r0, r12
494 bl __hyp_set_vectors
495 __HVC(0) @ otherwise bounce to hyp mode
497 b . @ should never be reached
499 .align 2
500 .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
501 #else
502 b __enter_kernel
503 #endif
505 .align 2
506 .type LC0, #object
507 LC0: .word LC0 @ r1
508 .word __bss_start @ r2
509 .word _end @ r3
510 .word _edata @ r6
511 .word input_data_end - 4 @ r10 (inflated size location)
512 .word _got_start @ r11
513 .word _got_end @ ip
514 .word .L_user_stack_end @ sp
515 .size LC0, . - LC0
517 #ifdef CONFIG_ARCH_RPC
518 .globl params
519 params: ldr r0, =0x10000100 @ params_phys for RPC
520 mov pc, lr
521 .ltorg
522 .align
523 #endif
525 /*
526 * Turn on the cache. We need to setup some page tables so that we
527 * can have both the I and D caches on.
528 *
529 * We place the page tables 16k down from the kernel execution address,
530 * and we hope that nothing else is using it. If we're using it, we
531 * will go pop!
532 *
533 * On entry,
534 * r4 = kernel execution address
535 * r7 = architecture number
536 * r8 = atags pointer
537 * On exit,
538 * r0, r1, r2, r3, r9, r10, r12 corrupted
539 * This routine must preserve:
540 * r4, r7, r8
541 */
542 .align 5
543 cache_on: mov r3, #8 @ cache_on function
544 b call_cache_fn
546 /*
547 * Initialize the highest priority protection region, PR7
548 * to cover all 32bit address and cacheable and bufferable.
549 */
550 __armv4_mpu_cache_on:
551 mov r0, #0x3f @ 4G, the whole
552 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
553 mcr p15, 0, r0, c6, c7, 1
555 mov r0, #0x80 @ PR7
556 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
557 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
558 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
560 mov r0, #0xc000
561 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
562 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
564 mov r0, #0
565 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
566 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
567 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
568 mrc p15, 0, r0, c1, c0, 0 @ read control reg
569 @ ...I .... ..D. WC.M
570 orr r0, r0, #0x002d @ .... .... ..1. 11.1
571 orr r0, r0, #0x1000 @ ...1 .... .... ....
573 mcr p15, 0, r0, c1, c0, 0 @ write control reg
575 mov r0, #0
576 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
577 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
578 mov pc, lr
580 __armv3_mpu_cache_on:
581 mov r0, #0x3f @ 4G, the whole
582 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
584 mov r0, #0x80 @ PR7
585 mcr p15, 0, r0, c2, c0, 0 @ cache on
586 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
588 mov r0, #0xc000
589 mcr p15, 0, r0, c5, c0, 0 @ access permission
591 mov r0, #0
592 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
593 /*
594 * ?? ARMv3 MMU does not allow reading the control register,
595 * does this really work on ARMv3 MPU?
596 */
597 mrc p15, 0, r0, c1, c0, 0 @ read control reg
598 @ .... .... .... WC.M
599 orr r0, r0, #0x000d @ .... .... .... 11.1
600 /* ?? this overwrites the value constructed above? */
601 mov r0, #0
602 mcr p15, 0, r0, c1, c0, 0 @ write control reg
604 /* ?? invalidate for the second time? */
605 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
606 mov pc, lr
608 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
609 #define CB_BITS 0x08
610 #else
611 #define CB_BITS 0x0c
612 #endif
614 __setup_mmu: sub r3, r4, #16384 @ Page directory size
615 bic r3, r3, #0xff @ Align the pointer
616 bic r3, r3, #0x3f00
617 /*
618 * Initialise the page tables, turning on the cacheable and bufferable
619 * bits for the RAM area only.
620 */
621 mov r0, r3
622 mov r9, r0, lsr #18
623 mov r9, r9, lsl #18 @ start of RAM
624 add r10, r9, #0x10000000 @ a reasonable RAM size
625 mov r1, #0x12 @ XN|U + section mapping
626 orr r1, r1, #3 << 10 @ AP=11
627 add r2, r3, #16384
628 1: cmp r1, r9 @ if virt > start of RAM
629 cmphs r10, r1 @ && end of RAM > virt
630 bic r1, r1, #0x1c @ clear XN|U + C + B
631 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
632 orrhs r1, r1, r6 @ set RAM section settings
633 str r1, [r0], #4 @ 1:1 mapping
634 add r1, r1, #1048576
635 teq r0, r2
636 bne 1b
637 /*
638 * If ever we are running from Flash, then we surely want the cache
639 * to be enabled also for our execution instance... We map 2MB of it
640 * so there is no map overlap problem for up to 1 MB compressed kernel.
641 * If the execution is in RAM then we would only be duplicating the above.
642 */
643 orr r1, r6, #0x04 @ ensure B is set for this
644 orr r1, r1, #3 << 10
645 mov r2, pc
646 mov r2, r2, lsr #20
647 orr r1, r1, r2, lsl #20
648 add r0, r3, r2, lsl #2
649 str r1, [r0], #4
650 add r1, r1, #1048576
651 str r1, [r0]
652 mov pc, lr
653 ENDPROC(__setup_mmu)
655 @ Enable unaligned access on v6, to allow better code generation
656 @ for the decompressor C code:
657 __armv6_mmu_cache_on:
658 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
659 bic r0, r0, #2 @ A (no unaligned access fault)
660 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
661 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
662 b __armv4_mmu_cache_on
664 __arm926ejs_mmu_cache_on:
665 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
666 mov r0, #4 @ put dcache in WT mode
667 mcr p15, 7, r0, c15, c0, 0
668 #endif
670 __armv4_mmu_cache_on:
671 mov r12, lr
672 #ifdef CONFIG_MMU
673 mov r6, #CB_BITS | 0x12 @ U
674 bl __setup_mmu
675 mov r0, #0
676 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
677 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
678 mrc p15, 0, r0, c1, c0, 0 @ read control reg
679 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
680 orr r0, r0, #0x0030
681 #ifdef CONFIG_CPU_ENDIAN_BE8
682 orr r0, r0, #1 << 25 @ big-endian page tables
683 #endif
684 bl __common_mmu_cache_on
685 mov r0, #0
686 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
687 #endif
688 mov pc, r12
690 __armv7_mmu_cache_on:
691 mov r12, lr
692 #ifdef CONFIG_MMU
693 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
694 tst r11, #0xf @ VMSA
695 movne r6, #CB_BITS | 0x02 @ !XN
696 blne __setup_mmu
697 mov r0, #0
698 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
699 tst r11, #0xf @ VMSA
700 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
701 #endif
702 mrc p15, 0, r0, c1, c0, 0 @ read control reg
703 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
704 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
705 orr r0, r0, #0x003c @ write buffer
706 bic r0, r0, #2 @ A (no unaligned access fault)
707 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
708 @ (needed for ARM1176)
709 #ifdef CONFIG_MMU
710 #ifdef CONFIG_CPU_ENDIAN_BE8
711 orr r0, r0, #1 << 25 @ big-endian page tables
712 #endif
713 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
714 orrne r0, r0, #1 @ MMU enabled
715 movne r1, #0xfffffffd @ domain 0 = client
716 bic r6, r6, #1 << 31 @ 32-bit translation system
717 bic r6, r6, #3 << 0 @ use only ttbr0
718 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
719 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
720 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
721 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
722 #endif
723 mcr p15, 0, r0, c7, c5, 4 @ ISB
724 mcr p15, 0, r0, c1, c0, 0 @ load control register
725 mrc p15, 0, r0, c1, c0, 0 @ and read it back
726 mov r0, #0
727 mcr p15, 0, r0, c7, c5, 4 @ ISB
728 mov pc, r12
730 __fa526_cache_on:
731 mov r12, lr
732 mov r6, #CB_BITS | 0x12 @ U
733 bl __setup_mmu
734 mov r0, #0
735 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
736 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
737 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
738 mrc p15, 0, r0, c1, c0, 0 @ read control reg
739 orr r0, r0, #0x1000 @ I-cache enable
740 bl __common_mmu_cache_on
741 mov r0, #0
742 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
743 mov pc, r12
745 __common_mmu_cache_on:
746 #ifndef CONFIG_THUMB2_KERNEL
747 #ifndef DEBUG
748 orr r0, r0, #0x000d @ Write buffer, mmu
749 #endif
750 mov r1, #-1
751 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
752 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
753 b 1f
754 .align 5 @ cache line aligned
755 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
756 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
757 sub pc, lr, r0, lsr #32 @ properly flush pipeline
758 #endif
760 #define PROC_ENTRY_SIZE (4*5)
762 /*
763 * Here follow the relocatable cache support functions for the
764 * various processors. This is a generic hook for locating an
765 * entry and jumping to an instruction at the specified offset
766 * from the start of the block. Please note this is all position
767 * independent code.
768 *
769 * r1 = corrupted
770 * r2 = corrupted
771 * r3 = block offset
772 * r9 = corrupted
773 * r12 = corrupted
774 */
776 call_cache_fn: adr r12, proc_types
777 #ifdef CONFIG_CPU_CP15
778 mrc p15, 0, r9, c0, c0 @ get processor ID
779 #else
780 ldr r9, =CONFIG_PROCESSOR_ID
781 #endif
782 1: ldr r1, [r12, #0] @ get value
783 ldr r2, [r12, #4] @ get mask
784 eor r1, r1, r9 @ (real ^ match)
785 tst r1, r2 @ & mask
786 ARM( addeq pc, r12, r3 ) @ call cache function
787 THUMB( addeq r12, r3 )
788 THUMB( moveq pc, r12 ) @ call cache function
789 add r12, r12, #PROC_ENTRY_SIZE
790 b 1b
792 /*
793 * Table for cache operations. This is basically:
794 * - CPU ID match
795 * - CPU ID mask
796 * - 'cache on' method instruction
797 * - 'cache off' method instruction
798 * - 'cache flush' method instruction
799 *
800 * We match an entry using: ((real_id ^ match) & mask) == 0
801 *
802 * Writethrough caches generally only need 'on' and 'off'
803 * methods. Writeback caches _must_ have the flush method
804 * defined.
805 */
806 .align 2
807 .type proc_types,#object
808 proc_types:
809 #if !defined(CONFIG_CPU_V7)
810 /* This collides with some V7 IDs, preventing correct detection */
811 .word 0x00000000 @ old ARM ID
812 .word 0x0000f000
813 mov pc, lr
814 THUMB( nop )
815 mov pc, lr
816 THUMB( nop )
817 mov pc, lr
818 THUMB( nop )
819 #endif
821 .word 0x41007000 @ ARM7/710
822 .word 0xfff8fe00
823 mov pc, lr
824 THUMB( nop )
825 mov pc, lr
826 THUMB( nop )
827 mov pc, lr
828 THUMB( nop )
830 .word 0x41807200 @ ARM720T (writethrough)
831 .word 0xffffff00
832 W(b) __armv4_mmu_cache_on
833 W(b) __armv4_mmu_cache_off
834 mov pc, lr
835 THUMB( nop )
837 .word 0x41007400 @ ARM74x
838 .word 0xff00ff00
839 W(b) __armv3_mpu_cache_on
840 W(b) __armv3_mpu_cache_off
841 W(b) __armv3_mpu_cache_flush
843 .word 0x41009400 @ ARM94x
844 .word 0xff00ff00
845 W(b) __armv4_mpu_cache_on
846 W(b) __armv4_mpu_cache_off
847 W(b) __armv4_mpu_cache_flush
849 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
850 .word 0xff0ffff0
851 W(b) __arm926ejs_mmu_cache_on
852 W(b) __armv4_mmu_cache_off
853 W(b) __armv5tej_mmu_cache_flush
855 .word 0x00007000 @ ARM7 IDs
856 .word 0x0000f000
857 mov pc, lr
858 THUMB( nop )
859 mov pc, lr
860 THUMB( nop )
861 mov pc, lr
862 THUMB( nop )
864 @ Everything from here on will be the new ID system.
866 .word 0x4401a100 @ sa110 / sa1100
867 .word 0xffffffe0
868 W(b) __armv4_mmu_cache_on
869 W(b) __armv4_mmu_cache_off
870 W(b) __armv4_mmu_cache_flush
872 .word 0x6901b110 @ sa1110
873 .word 0xfffffff0
874 W(b) __armv4_mmu_cache_on
875 W(b) __armv4_mmu_cache_off
876 W(b) __armv4_mmu_cache_flush
878 .word 0x56056900
879 .word 0xffffff00 @ PXA9xx
880 W(b) __armv4_mmu_cache_on
881 W(b) __armv4_mmu_cache_off
882 W(b) __armv4_mmu_cache_flush
884 .word 0x56158000 @ PXA168
885 .word 0xfffff000
886 W(b) __armv4_mmu_cache_on
887 W(b) __armv4_mmu_cache_off
888 W(b) __armv5tej_mmu_cache_flush
890 .word 0x56050000 @ Feroceon
891 .word 0xff0f0000
892 W(b) __armv4_mmu_cache_on
893 W(b) __armv4_mmu_cache_off
894 W(b) __armv5tej_mmu_cache_flush
896 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
897 /* this conflicts with the standard ARMv5TE entry */
898 .long 0x41009260 @ Old Feroceon
899 .long 0xff00fff0
900 b __armv4_mmu_cache_on
901 b __armv4_mmu_cache_off
902 b __armv5tej_mmu_cache_flush
903 #endif
905 .word 0x66015261 @ FA526
906 .word 0xff01fff1
907 W(b) __fa526_cache_on
908 W(b) __armv4_mmu_cache_off
909 W(b) __fa526_cache_flush
911 @ These match on the architecture ID
913 .word 0x00020000 @ ARMv4T
914 .word 0x000f0000
915 W(b) __armv4_mmu_cache_on
916 W(b) __armv4_mmu_cache_off
917 W(b) __armv4_mmu_cache_flush
919 .word 0x00050000 @ ARMv5TE
920 .word 0x000f0000
921 W(b) __armv4_mmu_cache_on
922 W(b) __armv4_mmu_cache_off
923 W(b) __armv4_mmu_cache_flush
925 .word 0x00060000 @ ARMv5TEJ
926 .word 0x000f0000
927 W(b) __armv4_mmu_cache_on
928 W(b) __armv4_mmu_cache_off
929 W(b) __armv5tej_mmu_cache_flush
931 .word 0x0007b000 @ ARMv6
932 .word 0x000ff000
933 W(b) __armv6_mmu_cache_on
934 W(b) __armv4_mmu_cache_off
935 W(b) __armv6_mmu_cache_flush
937 .word 0x000f0000 @ new CPU Id
938 .word 0x000f0000
939 W(b) __armv7_mmu_cache_on
940 W(b) __armv7_mmu_cache_off
941 W(b) __armv7_mmu_cache_flush
943 .word 0 @ unrecognised type
944 .word 0
945 mov pc, lr
946 THUMB( nop )
947 mov pc, lr
948 THUMB( nop )
949 mov pc, lr
950 THUMB( nop )
952 .size proc_types, . - proc_types
954 /*
955 * If you get a "non-constant expression in ".if" statement"
956 * error from the assembler on this line, check that you have
957 * not accidentally written a "b" instruction where you should
958 * have written W(b).
959 */
960 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
961 .error "The size of one or more proc_types entries is wrong."
962 .endif
964 /*
965 * Turn off the Cache and MMU. ARMv3 does not support
966 * reading the control register, but ARMv4 does.
967 *
968 * On exit,
969 * r0, r1, r2, r3, r9, r12 corrupted
970 * This routine must preserve:
971 * r4, r7, r8
972 */
973 .align 5
974 cache_off: mov r3, #12 @ cache_off function
975 b call_cache_fn
977 __armv4_mpu_cache_off:
978 mrc p15, 0, r0, c1, c0
979 bic r0, r0, #0x000d
980 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
981 mov r0, #0
982 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
983 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
984 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
985 mov pc, lr
987 __armv3_mpu_cache_off:
988 mrc p15, 0, r0, c1, c0
989 bic r0, r0, #0x000d
990 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
991 mov r0, #0
992 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
993 mov pc, lr
995 __armv4_mmu_cache_off:
996 #ifdef CONFIG_MMU
997 mrc p15, 0, r0, c1, c0
998 bic r0, r0, #0x000d
999 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1000 mov r0, #0
1001 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1002 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
1003 #endif
1004 mov pc, lr
1006 __armv7_mmu_cache_off:
1007 mrc p15, 0, r0, c1, c0
1008 #ifdef CONFIG_MMU
1009 bic r0, r0, #0x000d
1010 #else
1011 bic r0, r0, #0x000c
1012 #endif
1013 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1014 mov r12, lr
1015 bl __armv7_mmu_cache_flush
1016 mov r0, #0
1017 #ifdef CONFIG_MMU
1018 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
1019 #endif
1020 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1021 mcr p15, 0, r0, c7, c10, 4 @ DSB
1022 mcr p15, 0, r0, c7, c5, 4 @ ISB
1023 mov pc, r12
1025 /*
1026 * Clean and flush the cache to maintain consistency.
1027 *
1028 * On exit,
1029 * r1, r2, r3, r9, r10, r11, r12 corrupted
1030 * This routine must preserve:
1031 * r4, r6, r7, r8
1032 */
1033 .align 5
1034 cache_clean_flush:
1035 mov r3, #16
1036 b call_cache_fn
1038 __armv4_mpu_cache_flush:
1039 mov r2, #1
1040 mov r3, #0
1041 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1042 mov r1, #7 << 5 @ 8 segments
1043 1: orr r3, r1, #63 << 26 @ 64 entries
1044 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1045 subs r3, r3, #1 << 26
1046 bcs 2b @ entries 63 to 0
1047 subs r1, r1, #1 << 5
1048 bcs 1b @ segments 7 to 0
1050 teq r2, #0
1051 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1052 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1053 mov pc, lr
1055 __fa526_cache_flush:
1056 mov r1, #0
1057 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1058 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1059 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1060 mov pc, lr
1062 __armv6_mmu_cache_flush:
1063 mov r1, #0
1064 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1065 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1066 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1067 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1068 mov pc, lr
1070 __armv7_mmu_cache_flush:
1071 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1072 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1073 mov r10, #0
1074 beq hierarchical
1075 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1076 b iflush
1077 hierarchical:
1078 mcr p15, 0, r10, c7, c10, 5 @ DMB
1079 stmfd sp!, {r0-r7, r9-r11}
1080 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1081 ands r3, r0, #0x7000000 @ extract loc from clidr
1082 mov r3, r3, lsr #23 @ left align loc bit field
1083 beq finished @ if loc is 0, then no need to clean
1084 mov r10, #0 @ start clean at cache level 0
1085 loop1:
1086 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1087 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1088 and r1, r1, #7 @ mask of the bits for current cache only
1089 cmp r1, #2 @ see what cache we have at this level
1090 blt skip @ skip if no cache, or just i-cache
1091 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1092 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1093 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1094 and r2, r1, #7 @ extract the length of the cache lines
1095 add r2, r2, #4 @ add 4 (line length offset)
1096 ldr r4, =0x3ff
1097 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1098 clz r5, r4 @ find bit position of way size increment
1099 ldr r7, =0x7fff
1100 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1101 loop2:
1102 mov r9, r4 @ create working copy of max way size
1103 loop3:
1104 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1105 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1106 THUMB( lsl r6, r9, r5 )
1107 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1108 THUMB( lsl r6, r7, r2 )
1109 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1110 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1111 subs r9, r9, #1 @ decrement the way
1112 bge loop3
1113 subs r7, r7, #1 @ decrement the index
1114 bge loop2
1115 skip:
1116 add r10, r10, #2 @ increment cache number
1117 cmp r3, r10
1118 bgt loop1
1119 finished:
1120 ldmfd sp!, {r0-r7, r9-r11}
1121 mov r10, #0 @ swith back to cache level 0
1122 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1123 iflush:
1124 mcr p15, 0, r10, c7, c10, 4 @ DSB
1125 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1126 mcr p15, 0, r10, c7, c10, 4 @ DSB
1127 mcr p15, 0, r10, c7, c5, 4 @ ISB
1128 mov pc, lr
1130 __armv5tej_mmu_cache_flush:
1131 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1132 bne 1b
1133 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1134 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1135 mov pc, lr
1137 __armv4_mmu_cache_flush:
1138 mov r2, #64*1024 @ default: 32K dcache size (*2)
1139 mov r11, #32 @ default: 32 byte line size
1140 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1141 teq r3, r9 @ cache ID register present?
1142 beq no_cache_id
1143 mov r1, r3, lsr #18
1144 and r1, r1, #7
1145 mov r2, #1024
1146 mov r2, r2, lsl r1 @ base dcache size *2
1147 tst r3, #1 << 14 @ test M bit
1148 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1149 mov r3, r3, lsr #12
1150 and r3, r3, #3
1151 mov r11, #8
1152 mov r11, r11, lsl r3 @ cache line size in bytes
1153 no_cache_id:
1154 mov r1, pc
1155 bic r1, r1, #63 @ align to longest cache line
1156 add r2, r1, r2
1157 1:
1158 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1159 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1160 THUMB( add r1, r1, r11 )
1161 teq r1, r2
1162 bne 1b
1164 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1165 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1166 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1167 mov pc, lr
1169 __armv3_mmu_cache_flush:
1170 __armv3_mpu_cache_flush:
1171 mov r1, #0
1172 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1173 mov pc, lr
1175 /*
1176 * Various debugging routines for printing hex characters and
1177 * memory, which again must be relocatable.
1178 */
1179 #ifdef DEBUG
1180 .align 2
1181 .type phexbuf,#object
1182 phexbuf: .space 12
1183 .size phexbuf, . - phexbuf
1185 @ phex corrupts {r0, r1, r2, r3}
1186 phex: adr r3, phexbuf
1187 mov r2, #0
1188 strb r2, [r3, r1]
1189 1: subs r1, r1, #1
1190 movmi r0, r3
1191 bmi puts
1192 and r2, r0, #15
1193 mov r0, r0, lsr #4
1194 cmp r2, #10
1195 addge r2, r2, #7
1196 add r2, r2, #'0'
1197 strb r2, [r3, r1]
1198 b 1b
1200 @ puts corrupts {r0, r1, r2, r3}
1201 puts: loadsp r3, r1
1202 1: ldrb r2, [r0], #1
1203 teq r2, #0
1204 moveq pc, lr
1205 2: writeb r2, r3
1206 mov r1, #0x00020000
1207 3: subs r1, r1, #1
1208 bne 3b
1209 teq r2, #'\n'
1210 moveq r2, #'\r'
1211 beq 2b
1212 teq r0, #0
1213 bne 1b
1214 mov pc, lr
1215 @ putc corrupts {r0, r1, r2, r3}
1216 putc:
1217 mov r2, r0
1218 mov r0, #0
1219 loadsp r3, r1
1220 b 2b
1222 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1223 memdump: mov r12, r0
1224 mov r10, lr
1225 mov r11, #0
1226 2: mov r0, r11, lsl #2
1227 add r0, r0, r12
1228 mov r1, #8
1229 bl phex
1230 mov r0, #':'
1231 bl putc
1232 1: mov r0, #' '
1233 bl putc
1234 ldr r0, [r12, r11, lsl #2]
1235 mov r1, #8
1236 bl phex
1237 and r0, r11, #7
1238 teq r0, #3
1239 moveq r0, #' '
1240 bleq putc
1241 and r0, r11, #7
1242 add r11, r11, #1
1243 teq r0, #7
1244 bne 1b
1245 mov r0, #'\n'
1246 bl putc
1247 cmp r11, #64
1248 blt 2b
1249 mov pc, r10
1250 #endif
1252 .ltorg
1254 #ifdef CONFIG_ARM_VIRT_EXT
1255 .align 5
1256 __hyp_reentry_vectors:
1257 W(b) . @ reset
1258 W(b) . @ undef
1259 W(b) . @ svc
1260 W(b) . @ pabort
1261 W(b) . @ dabort
1262 W(b) __enter_kernel @ hyp
1263 W(b) . @ irq
1264 W(b) . @ fiq
1265 #endif /* CONFIG_ARM_VIRT_EXT */
1267 __enter_kernel:
1268 mov r0, #0 @ must be 0
1269 ARM( mov pc, r4 ) @ call kernel
1270 THUMB( bx r4 ) @ entry point is always ARM
1272 reloc_code_end:
1274 .align
1275 .section ".stack", "aw", %nobits
1276 .L_user_stack: .space 4096
1277 .L_user_stack_end: