758dbb88806181e81d4388f703749da6f68f6d3d
1 /*
2 * Copyright (c) 2014, Mentor Graphics Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. Neither the name of Mentor Graphics Corporation nor the names of its
14 * contributors may be used to endorse or promote products derived from this
15 * software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
30 #include "libfdt/types.h"
31 #include "libfdt/libfdt.h"
32 #include "zlib/zlib.h"
34 /* External variables. */
35 extern unsigned int _image_start;
36 extern unsigned int _image_end;
37 extern unsigned int _bss_start;
38 extern unsigned int _bss_end;
40 /* Definitions.*/
41 #define FIT_IMAGE_START (void *)&_image_start
42 #define FIT_IMAGE_END (void *)&_image_end
44 #define BSS_START (void *)&_bss_start
45 #define BSS_END (void *)&_bss_end
47 #define BSS_SIZE (((unsigned int)BSS_END) - ((unsigned int)BSS_START))
49 #define XILINX_ARM_MACHINE 3343
51 #define KERNEL_RESERVED_SPACE 0x7FF2000
53 #define PUTC(a) ((*((volatile unsigned int *) 0xE0001030)) = (a))
55 /* Globals. */
56 unsigned int linux_kernel_start, dtb_start, linux_kernel_size, dtb_size;
58 /* Static functions. */
59 static void boot_linux_fit_image(void);
61 static int process_and_relocate_fit_image(char *image_start,
62 unsigned int image_size);
64 extern void start_linux_with_dtb(void);
66 static void clear_bss(void);
67 static void invalidate_cache(void);
68 static void clean_system(void);
70 void put_char(char c)
71 {
72 PUTC(c);
74 while (((*((volatile unsigned int *)0xE000102C)) & 0x00000008) == 0) ;
75 }
77 void putstring(const char *str)
78 {
79 while (*str) {
80 put_char(*str++);
81 }
82 }
84 /* Boots the linux kernel. */
85 void boot_linux(void)
86 {
87 /* Clear BSS */
88 clear_bss();
90 clean_system();
92 putstring("\n\r********************************* \n\r");
93 putstring("OpenAMP Linux Bootstrap.");
94 putstring("\n\r********************************* \n\r");
96 /* Currently supporting only FIT image format. */
97 boot_linux_fit_image();
98 }
100 /* Boots a FIT format linux image. */
101 static void boot_linux_fit_image(void)
102 {
103 unsigned int image_size, status;
105 char *image_start;
107 /* Retrieve linux image start and end addresses. */
108 image_start = (char *)FIT_IMAGE_START;
110 /* Retrieve linux image size. */
111 image_size = (FIT_IMAGE_END - FIT_IMAGE_START);
113 /* Check for a valid linux image size. */
114 if (image_size > 0) {
116 /* let us parse and relocate the FIT image. */
117 status =
118 process_and_relocate_fit_image(image_start, image_size);
120 /* Image processed and relocated successfully. */
121 if (!status) {
123 putstring("\n\rLinux Bootstrap: Booting Linux. \n\r");
125 /* Image has been processed and relocated. Now boot linux */
126 start_linux_with_dtb();
127 } else {
128 /* Go into an error loop. */
129 while (1) ;
130 }
131 } else {
132 /* Go into an error loop. */
133 while (1) ;
134 }
135 }
137 /* Returns zero for success. */
138 static int process_and_relocate_fit_image(char *image_start,
139 unsigned int image_size)
140 {
141 unsigned int fit_image_start, compressed = 0;
142 unsigned long kernel_address;
143 int size, load_size, load_address, dtb_address;
144 char *conf_name = NULL;
145 void *data;
146 int cfg_offset, offset, ret;
147 z_stream strm;
149 putstring
150 ("\n\rLinux Bootstrap: Locating Linux Kernel and DTB from FIT image.\n\r");
152 fit_image_start = (unsigned int)image_start;
154 /* Retrieve default FIT image configuration node. */
155 offset =
156 fdt_path_offset((const void *)fit_image_start, "/configurations");
158 if (offset >= 0) {
159 /* Retrieve default configuration name. */
160 conf_name =
161 (char *)fdt_getprop((const void *)fit_image_start, offset,
162 "default", &size);
163 }
165 if (conf_name) {
166 /* Retrieve the offset of configuration node. */
167 cfg_offset =
168 fdt_subnode_offset((const void *)fit_image_start, offset,
169 conf_name);
170 }
172 /* Retrieve kernel node using the config node. */
173 conf_name =
174 (char *)fdt_getprop((const void *)fit_image_start, cfg_offset,
175 "kernel", &size);
177 if (conf_name) {
178 offset =
179 fdt_path_offset((const void *)fit_image_start, "/images");
181 if (offset >= 0) {
182 offset =
183 fdt_subnode_offset((const void *)fit_image_start,
184 offset, conf_name);
185 }
186 }
188 if (offset >= 0) {
189 /* Retrieve kernel image address and size. */
190 kernel_address =
191 (unsigned long)fdt_getprop((const void *)fit_image_start,
192 offset, "data", &load_size);
194 /* Retrieve kernel load address. */
195 data =
196 (void *)fdt_getprop((const void *)fit_image_start, offset,
197 "load", &size);
199 load_address = *((int *)data);
201 load_address = be32_to_cpu(load_address);
203 /* Check kernel image for compression. */
204 data =
205 (void *)fdt_getprop((const void *)fit_image_start, offset,
206 "compression", &size);
208 if (data != NULL) {
209 if (!(strcmp(data, "gzip"))) {
210 compressed = 1;
211 }
212 }
213 }
215 memset((void *)load_address, 0, 0x0600000 - load_address);
217 if (compressed == 1) {
218 putstring
219 ("\n\rLinux Bootstrap: Kernel image is compressed. Starting decompression process. It may take a while...\n\r");
221 /* Initialize zlib stream. */
222 strm.zalloc = Z_NULL;
223 strm.zfree = Z_NULL;
224 strm.opaque = Z_NULL;
225 strm.avail_in = 0;
226 strm.next_in = Z_NULL;
228 /* Initialize the zlib state for de-compression. */
229 ret = inflateInit2(&strm, MAX_WBITS + 16);
231 if (ret == Z_OK) {
232 strm.next_in = (Bytef *) kernel_address;
233 strm.avail_out = KERNEL_RESERVED_SPACE;
234 strm.avail_in = load_size;
236 /* Pointer to output space. */
237 strm.next_out = (Bytef *) load_address;
239 /* Call the de-compression engine. */
240 ret = inflate(&strm, Z_FINISH);
241 }
243 (void)inflateEnd(&strm);
245 if ((ret != Z_OK) && (ret != Z_STREAM_END)) {
247 /* return with an error. */
248 return 1;
249 }
251 putstring
252 ("\n\rLinux Bootstrap: Linux image decompression complete. \n\r");
254 } else {
255 /* Uncompressed image. Just load to the load address. */
256 memcpy((void *)load_address, (void *)kernel_address, load_size);
257 }
259 putstring
260 ("\n\rLinux Bootstrap: Linux kernel image has been loaded into memory. \n\r");
262 /* Save kernel load address and size. */
263 linux_kernel_start = load_address;
264 linux_kernel_size = load_size;
266 /* Retrieve DTB node using the config node. */
267 conf_name =
268 (char *)fdt_getprop((const void *)fit_image_start, cfg_offset,
269 "fdt", &size);
271 if (conf_name) {
272 offset =
273 fdt_path_offset((const void *)fit_image_start, "/images");
275 if (offset >= 0) {
276 offset =
277 fdt_subnode_offset((const void *)fit_image_start,
278 offset, conf_name);
279 }
280 }
282 if (offset >= 0) {
283 /* Retrieve DTB address and size. */
284 dtb_address =
285 (unsigned long)fdt_getprop((const void *)fit_image_start,
286 offset, "data", &load_size);
287 }
289 dtb_start = (linux_kernel_start + KERNEL_RESERVED_SPACE) & 0xFFFFFF00;
290 dtb_size = load_size;
292 memcpy((void *)dtb_start, (void *)dtb_address, load_size);
294 putstring("\n\rLinux Bootstrap: Loaded DTB. \n\r");
296 return 0;
297 }
299 static void clear_bss(void)
300 {
301 memset(BSS_START, 0, BSS_SIZE);
302 }
304 /*
305 * The code in this section is for invalidating the cache at startup
306 *
307 */
309 /* ARM Coprocessor registers */
310 #define ARM_AR_CP0 p0
311 #define ARM_AR_CP1 p1
312 #define ARM_AR_CP2 p2
313 #define ARM_AR_CP3 p3
314 #define ARM_AR_CP4 p4
315 #define ARM_AR_CP5 p5
316 #define ARM_AR_CP6 p6
317 #define ARM_AR_CP7 p7
318 #define ARM_AR_CP8 p8
319 #define ARM_AR_CP9 p9
320 #define ARM_AR_CP10 p10
321 #define ARM_AR_CP11 p11
322 #define ARM_AR_CP12 p12
323 #define ARM_AR_CP13 p13
324 #define ARM_AR_CP14 p14
325 #define ARM_AR_CP15 p15
327 /* CRn and CRm register values */
328 #define ARM_AR_C0 c0
329 #define ARM_AR_C1 c1
330 #define ARM_AR_C2 c2
331 #define ARM_AR_C3 c3
332 #define ARM_AR_C4 c4
333 #define ARM_AR_C5 c5
334 #define ARM_AR_C6 c6
335 #define ARM_AR_C7 c7
336 #define ARM_AR_C8 c8
337 #define ARM_AR_C9 c9
338 #define ARM_AR_C10 c10
339 #define ARM_AR_C11 c11
340 #define ARM_AR_C12 c12
341 #define ARM_AR_C13 c13
342 #define ARM_AR_C14 c14
343 #define ARM_AR_C15 c15
345 /* This define is used to add quotes to anything passed in */
346 #define ARM_AR_QUOTES(x) #x
348 /* This macro writes to a coprocessor register */
349 #define ARM_AR_CP_WRITE(cp, op1, cp_value, crn, crm, op2) \
350 { \
351 asm volatile(" MCR " ARM_AR_QUOTES(cp) "," \
352 #op1 \
353 ", %0, " \
354 ARM_AR_QUOTES(crn) "," \
355 ARM_AR_QUOTES(crm) "," \
356 #op2 \
357 : /* No outputs */ \
358 : "r" (cp_value)); \
359 }
361 /* This macro reads from a coprocessor register */
362 #define ARM_AR_CP_READ(cp, op1, cp_value_ptr, crn, crm, op2) \
363 { \
364 asm volatile(" MRC " ARM_AR_QUOTES(cp) "," \
365 #op1 \
366 ", %0, " \
367 ARM_AR_QUOTES(crn) "," \
368 ARM_AR_QUOTES(crm) "," \
369 #op2 \
370 : "=r" (*(unsigned long *)(cp_value_ptr)) \
371 : /* No inputs */ ); \
372 }
374 /* This macro executes a ISB instruction */
375 #define ARM_AR_ISB_EXECUTE() \
376 { \
377 asm volatile(" ISB"); \
378 }
380 /* This macro executes a DSB instruction */
381 #define ARM_AR_DSB_EXECUTE() \
382 { \
383 asm volatile(" DSB"); \
384 }
386 /* CLIDR and CCSIDR mask values */
387 #define ARM_AR_MEM_CLIDR_LOC_MASK 0x7000000
388 #define ARM_AR_MEM_CCSIDR_LINESIZE_MASK 0x7
389 #define ARM_AR_MEM_CCSIDR_ASSOC_MASK 0x3FF
390 #define ARM_AR_MEM_CCSIDR_NUMSET_MASK 0x7FFF
392 /* CLIDR and CCSIDR shift values */
393 #define ARM_AR_MEM_CLIDR_LOC_RSHT_OFFSET 24
394 #define ARM_AR_MEM_CCSIDR_ASSOC_RSHT_OFFSET 3
395 #define ARM_AR_MEM_CCSIDR_NUMSET_RSHT_OFFSET 13
397 /* Extract 'encoded' line length of the cache */
398 #define ARM_AR_MEM_CCSIDR_LINESIZE_GET(ccsidr_reg) (ccsidr_reg & \
399 ARM_AR_MEM_CCSIDR_LINESIZE_MASK)
401 /* Extract 'encoded' way size of the cache */
402 #define ARM_AR_MEM_CCSIDR_ASSOC_GET(ccsidr_reg) (ARM_AR_MEM_CCSIDR_ASSOC_MASK & \
403 (ccsidr_reg >> \
404 ARM_AR_MEM_CCSIDR_ASSOC_RSHT_OFFSET))
406 /* Extract 'encoded' maximum number of index size */
407 #define ARM_AR_MEM_CCSIDR_NUMSET_GET(ccsidr_reg) (ARM_AR_MEM_CCSIDR_NUMSET_MASK & \
408 (ccsidr_reg >> \
409 ARM_AR_MEM_CCSIDR_NUMSET_RSHT_OFFSET))
411 /* Refer to chapter B3.12.31 c7, Cache and branch predictor maintenance functions in the
412 ARM Architecture Reference Manual ARMv7-A and ARMv7-R Edition 1360*/
413 /* Calculate # of bits to be shifted for set size and way size */
415 /* log2(line size in bytes) = ccsidr_linesize + 2 + logbase2(4) */
416 #define ARM_AR_MEM_L_CALCULATE(linesize) (linesize + 2 + 2)
418 /* log2(nsets) = 32 - way_size_bit_pos */
420 /* Find the bit position of way size increment */
421 #define ARM_AR_MEM_A_CALCULATE(assoc, a_offset_ref) \
422 { \
423 unsigned int temp_pos = 0x80000000; \
424 \
425 *a_offset_ref = 0; \
426 \
427 /* Logic to count the number of leading zeros before the first 1 */ \
428 while(!((assoc & temp_pos) == temp_pos)) \
429 { \
430 (*a_offset_ref)++; \
431 temp_pos = temp_pos >> 1; \
432 } \
433 }
435 /* Factor way, cache number, index number */
436 #define ARM_AR_MEM_DCCISW_SET(dccisw_ref, level, numsets, assoc, l_offset, a_offset) \
437 { \
438 *dccisw_ref = (level | (numsets << l_offset) | (assoc << a_offset)); \
439 }
441 /* This macro extracts line size, assoc and set size from CCSIDR */
442 #define ARM_AR_MEM_CCSIDR_VALS_GET(linesize_ref, assoc_ref, numsets_ref, \
443 l_offset_ref, a_offset_ref) \
444 { \
445 unsigned int ccsidr_val; \
446 \
447 /* Read the selected cache's CCSIDR */ \
448 ARM_AR_CP_READ(ARM_AR_CP15, 1, &ccsidr_val, \
449 ARM_AR_C0, ARM_AR_C0, 0); \
450 \
451 /* Extract 'encoded' line length of the cache */ \
452 *linesize_ref = ARM_AR_MEM_CCSIDR_LINESIZE_GET(ccsidr_val); \
453 \
454 /* Extract 'encoded' way size of the cache */ \
455 *assoc_ref = ARM_AR_MEM_CCSIDR_ASSOC_GET(ccsidr_val); \
456 \
457 /* Extract 'encoded' maximum number of index size */ \
458 *numsets_ref = ARM_AR_MEM_CCSIDR_NUMSET_GET(ccsidr_val); \
459 \
460 /* Calculate # of bits to be shifted for set size and way size */ \
461 \
462 /* log2(line size in bytes) = ccsidr_linesize + 2 + log2(4) */ \
463 *l_offset_ref = ARM_AR_MEM_L_CALCULATE(*linesize_ref); \
464 \
465 /* log2(nsets) = 32 - way_size_bit_pos */ \
466 ARM_AR_MEM_A_CALCULATE(*assoc_ref, a_offset_ref); \
467 }
469 /* This macro invalidates all of the instruction cache at the core level. */
470 #define ARM_AR_MEM_ICACHE_ALL_INVALIDATE() \
471 { \
472 ARM_AR_CP_WRITE(ARM_AR_CP15, 0, \
473 0, ARM_AR_C7, \
474 ARM_AR_C5, 0); \
475 }
477 /* This macro invalidates all of the data cache at the core level. */
478 void ARM_AR_MEM_DCACHE_ALL_OP(int type)
479 {
480 unsigned int clidr_val = 0;
481 unsigned int clidr_loc = 0;
482 unsigned int cache_number = 0;
483 unsigned int cache_type = 0;
484 unsigned int ccsidr_linesize = 0;
485 unsigned int ccsidr_assoc = 0;
486 int ccsidr_numsets = 0;
487 int way_size_copy = 0;
488 unsigned int set_size_bit_pos = 0;
489 unsigned int cache_number_pos = 0;
490 unsigned int way_size_bit_pos = 0;
491 unsigned int set_way_value = 0;
493 /* Read CLIDR to extract level of coherence (LOC) */
494 ARM_AR_CP_READ(ARM_AR_CP15, 1, &clidr_val, ARM_AR_C0, ARM_AR_C0, 1);
496 /* Extract LOC from CLIDR and align it at bit 1 */
497 clidr_loc = (clidr_val & ARM_AR_MEM_CLIDR_LOC_MASK) >>
498 ARM_AR_MEM_CLIDR_LOC_RSHT_OFFSET;
500 /* Proceed only iff LOC is non-zero */
501 if (clidr_loc != 0) {
502 do {
503 /* Extract cache type from CLIDR */
504 cache_number_pos = cache_number + (cache_number >> 1);
505 cache_type = (clidr_val >> cache_number_pos) & 0x7;
507 /* Continue only iff data cache */
508 if (cache_type >= 2) {
509 /* Select desired cache level in CSSELR */
510 ARM_AR_CP_WRITE(ARM_AR_CP15, 2, cache_number,
511 ARM_AR_C0, ARM_AR_C0, 0);
513 ARM_AR_ISB_EXECUTE();
515 /* Get data like linesize, assoc and set size */
516 ARM_AR_MEM_CCSIDR_VALS_GET(&ccsidr_linesize,
517 &ccsidr_assoc,
518 &ccsidr_numsets,
519 &set_size_bit_pos,
520 &way_size_bit_pos);
522 do {
523 way_size_copy = ccsidr_assoc;
525 do {
526 /* Factor way, cache number, index number */
527 ARM_AR_MEM_DCCISW_SET
528 (&set_way_value,
529 cache_number,
530 ccsidr_numsets,
531 way_size_copy,
532 set_size_bit_pos,
533 way_size_bit_pos);
535 /* Execute invalidate if type = 0 */
536 if (type == 0) {
537 ARM_AR_CP_WRITE
538 (ARM_AR_CP15, 0,
539 set_way_value,
540 ARM_AR_C7,
541 ARM_AR_C6, 2);
542 } else {
543 ARM_AR_CP_WRITE
544 (ARM_AR_CP15, 0,
545 set_way_value,
546 ARM_AR_C7,
547 ARM_AR_C14, 2);
548 }
550 /* decrement the way */
551 } while ((--way_size_copy) >= 0);
553 /* decrement the set */
554 } while ((--ccsidr_numsets) >= 0);
556 }
558 /* end if */
559 /* Increment cache number */
560 cache_number += 2;
562 /* end do-while */
563 } while (clidr_loc >= cache_number);
565 }
567 /* Switch back to cache level 0 in CSSELR */
568 ARM_AR_CP_WRITE(ARM_AR_CP15, 2, 0, ARM_AR_C0, ARM_AR_C0, 0);
570 /* Sync */
571 ARM_AR_DSB_EXECUTE();
572 ARM_AR_ISB_EXECUTE();
573 }
575 /* This macro invalidates all of the data cache at the core level. */
576 void ARM_AR_MEM_DCACHE_ALL_INVALIDATE(void)
577 {
578 ARM_AR_MEM_DCACHE_ALL_OP(0);
579 }
581 /* This macro invalidates all of the cache at the core level. */
582 void ARM_AR_MEM_CACHE_ALL_INVALIDATE(void)
583 {
584 ARM_AR_MEM_ICACHE_ALL_INVALIDATE();
585 ARM_AR_MEM_DCACHE_ALL_INVALIDATE();
586 }
588 static void clean_system(void)
589 {
591 invalidate_cache();
593 }
595 static void invalidate_cache(void)
596 {
597 ARM_AR_MEM_CACHE_ALL_INVALIDATE();
598 }