1 /*
2 * @file hw_mbox.c
3 *
4 * @brief Functions required to program MMU
5 *
6 *
7 * ============================================================================
8 *
9 * Copyright (c) 2010-2011, Texas Instruments Incorporated
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * * Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * * Neither the name of Texas Instruments Incorporated nor the names of
23 * its contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
33 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
34 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
35 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
36 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Contact information for paper mail:
38 * Texas Instruments
39 * Post Office Box 655303
40 * Dallas, Texas 75265
41 * Contact information:
42 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
43 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
44 * ============================================================================
45 *
46 */
50 #include <ti/syslink/Std.h>
51 #include <GlobalTypes.h>
52 #include <MMURegAcM.h>
53 #include <hw_defs.h>
54 #include <hw_mmu.h>
56 #define MMU_BASE_VAL_MASK 0xFC00
57 #define MMU_PAGE_MAX 3
58 #define MMU_ELEMENTSIZE_MAX 3
59 #define MMU_ADDR_MASK 0xFFFFF000
60 #define MMU_TTB_MASK 0xFFFFC000
61 #define MMU_SECTION_ADDR_MASK 0xFFF00000
62 #define MMU_SSECTION_ADDR_MASK 0xFF000000
63 #define MMU_PAGE_TABLE_MASK 0xFFFFFC00
64 #define MMU_LARGE_PAGE_MASK 0xFFFF0000
65 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
67 #define MMU_LOAD_TLB 0x00000001
68 #define NUM_TLB_ENTRIES 32
70 /*
71 * type: hw_mmu_pgsiz_t
72 *
73 * desc: Enumerated Type used to specify the MMU Page Size(SLSS)
74 *
75 *
76 */
77 enum hw_mmu_pgsiz_t {
78 HW_MMU_SECTION,
79 HW_MMU_LARGE_PAGE,
80 HW_MMU_SMALL_PAGE,
81 HW_MMU_SUPERSECTION
83 };
85 /*
86 * function : mmu_flsh_entry
87 */
89 static hw_status mmu_flsh_entry(const UInt32 base_address);
91 /*
92 * function : mme_set_cam_entry
93 *
94 */
96 static hw_status mme_set_cam_entry(const UInt32 base_address,
97 const UInt32 page_size,
98 const UInt32 preserve_bit,
99 const UInt32 valid_bit,
100 const UInt32 virt_addr_tag);
102 /*
103 * function : mmu_set_ram_entry
104 */
105 static hw_status mmu_set_ram_entry(const UInt32 base_address,
106 const UInt32 physical_addr,
107 enum hw_endianism_t endianism,
108 enum hw_elemnt_siz_t element_size,
109 enum hw_mmu_mixed_size_t mixedSize);
111 /*
112 * hw functions
113 *
114 */
116 hw_status hw_mmu_enable(const UInt32 base_address)
117 {
118 hw_status status = RET_OK;
120 MMUMMU_CNTLMMUEnableWrite32(base_address, HW_SET);
122 return status;
123 }
125 hw_status hw_mmu_disable(const UInt32 base_address)
126 {
127 hw_status status = RET_OK;
129 MMUMMU_CNTLMMUEnableWrite32(base_address, HW_CLEAR);
131 return status;
132 }
134 hw_status hw_mmu_nulck_set(const UInt32 base_address, UInt32 *num_lcked_entries)
135 {
136 hw_status status = RET_OK;
138 *num_lcked_entries = MMUMMU_LOCKBaseValueRead32(base_address);
140 return status;
141 }
144 hw_status hw_mmu_numlocked_set(const UInt32 base_address, UInt32 num_lcked_entries)
145 {
146 hw_status status = RET_OK;
148 MMUMMU_LOCKBaseValueWrite32(base_address, num_lcked_entries);
150 return status;
151 }
155 hw_status hw_mmu_vctm_numget(const UInt32 base_address, UInt32 *vctm_entry_num)
156 {
157 hw_status status = RET_OK;
159 *vctm_entry_num = MMUMMU_LOCKCurrentVictimRead32(base_address);
161 return status;
162 }
166 hw_status hw_mmu_victim_numset(const UInt32 base_address, UInt32 vctm_entry_num)
167 {
168 hw_status status = RET_OK;
170 mmu_lck_crnt_vctmwite32(base_address, vctm_entry_num);
172 return status;
173 }
175 hw_status hw_mmu_tlb_flushAll(const UInt32 base_address)
176 {
177 hw_status status = RET_OK;
179 MMUMMU_GFLUSHGlobalFlushWrite32(base_address, HW_SET);
181 return status;
182 }
184 hw_status hw_mmu_eventack(const UInt32 base_address, UInt32 irq_mask)
185 {
186 hw_status status = RET_OK;
188 MMUMMU_IRQSTATUSWriteRegister32(base_address, irq_mask);
190 return status;
191 }
193 hw_status hw_mmu_event_disable(const UInt32 base_address, UInt32 irq_mask)
194 {
195 hw_status status = RET_OK;
196 UInt32 irqReg;
197 irqReg = MMUMMU_IRQENABLEReadRegister32(base_address);
199 MMUMMU_IRQENABLEWriteRegister32(base_address, irqReg & ~irq_mask);
201 return status;
202 }
204 hw_status hw_mmu_event_enable(const UInt32 base_address, UInt32 irq_mask)
205 {
206 hw_status status = RET_OK;
207 UInt32 irqReg;
209 irqReg = MMUMMU_IRQENABLEReadRegister32(base_address);
211 MMUMMU_IRQENABLEWriteRegister32(base_address, irqReg | irq_mask);
213 return status;
214 }
216 hw_status hw_mmu_event_status(const UInt32 base_address, UInt32 *irq_mask)
217 {
218 hw_status status = RET_OK;
220 *irq_mask = MMUMMU_IRQSTATUSReadRegister32(base_address);
222 return status;
223 }
225 hw_status hw_mmu_flt_adr_rd(const UInt32 base_address, UInt32 *addr)
226 {
227 hw_status status = RET_OK;
229 /*Check the input Parameters*/
230 CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
231 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
232 /* read values from register */
233 *addr = MMUMMU_FAULT_ADReadRegister32(base_address);
235 return status;
236 }
239 hw_status hw_mmu_ttbset(const UInt32 base_address, UInt32 ttb_phys_addr)
240 {
241 hw_status status = RET_OK;
242 UInt32 loadTTB;
244 /*Check the input Parameters*/
245 CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
246 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
248 loadTTB = ttb_phys_addr & ~0x7FUL;
249 /* write values to register */
250 MMUMMU_TTBWriteRegister32(base_address, loadTTB);
252 return status;
253 }
255 hw_status hw_mmu_twl_enable(const UInt32 base_address)
256 {
257 hw_status status = RET_OK;
259 MMUMMU_CNTLTWLEnableWrite32(base_address, HW_SET);
261 return status;
262 }
264 hw_status hw_mmu_twl_disable(const UInt32 base_address)
265 {
266 hw_status status = RET_OK;
268 MMUMMU_CNTLTWLEnableWrite32(base_address, HW_CLEAR);
270 return status;
271 }
274 hw_status hw_mmu_tlb_flush(const UInt32 base_address,
275 UInt32 virtual_addr,
276 UInt32 page_size)
277 {
278 hw_status status = RET_OK;
279 UInt32 virt_addr_tag;
280 enum hw_mmu_pgsiz_t pg_sizeBits;
282 switch (page_size) {
283 case HW_PAGE_SIZE_4KB:
284 pg_sizeBits = HW_MMU_SMALL_PAGE;
285 break;
287 case HW_PAGE_SIZE_64KB:
288 pg_sizeBits = HW_MMU_LARGE_PAGE;
289 break;
291 case HW_PAGE_SIZE_1MB:
292 pg_sizeBits = HW_MMU_SECTION;
293 break;
295 case HW_PAGE_SIZE_16MB:
296 pg_sizeBits = HW_MMU_SUPERSECTION;
297 break;
299 default:
300 return RET_FAIL;
301 }
303 /* Generate the 20-bit tag from virtual address */
304 virt_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
306 mme_set_cam_entry(base_address, pg_sizeBits, 0, 0, virt_addr_tag);
308 mmu_flsh_entry(base_address);
310 return status;
311 }
314 hw_status hw_mmu_tlb_add(const UInt32 base_address,
315 UInt32 physical_addr,
316 UInt32 virtual_addr,
317 UInt32 page_size,
318 UInt32 entryNum,
319 struct hw_mmu_map_attrs_t *map_attrs,
320 enum hw_set_clear_t preserve_bit,
321 enum hw_set_clear_t valid_bit)
322 {
323 hw_status status = RET_OK;
324 UInt32 lockReg;
325 UInt32 virt_addr_tag;
326 enum hw_mmu_pgsiz_t mmu_pg_size;
328 /*Check the input Parameters*/
329 CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
330 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
331 CHECK_INPUT_RANGE_MIN0(page_size, MMU_PAGE_MAX, RET_PARAM_OUT_OF_RANGE,
332 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
333 CHECK_INPUT_RANGE_MIN0(map_attrs->element_size,
334 MMU_ELEMENTSIZE_MAX, RET_PARAM_OUT_OF_RANGE,
335 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
337 switch (page_size) {
338 case HW_PAGE_SIZE_4KB:
339 mmu_pg_size = HW_MMU_SMALL_PAGE;
340 break;
342 case HW_PAGE_SIZE_64KB:
343 mmu_pg_size = HW_MMU_LARGE_PAGE;
344 break;
346 case HW_PAGE_SIZE_1MB:
347 mmu_pg_size = HW_MMU_SECTION;
348 break;
350 case HW_PAGE_SIZE_16MB:
351 mmu_pg_size = HW_MMU_SUPERSECTION;
352 break;
354 default:
355 return RET_FAIL;
356 }
358 lockReg = mmu_lckread_reg_32(base_address);
360 /* Generate the 20-bit tag from virtual address */
361 virt_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
363 /* Write the fields in the CAM Entry Register */
364 mme_set_cam_entry(base_address, mmu_pg_size, preserve_bit, valid_bit,
365 virt_addr_tag);
367 /* Write the different fields of the RAM Entry Register */
368 /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
369 mmu_set_ram_entry(base_address, physical_addr,
370 map_attrs->endianism, map_attrs->element_size, map_attrs->mixedSize);
372 /* Update the MMU Lock Register */
373 /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
374 mmu_lck_crnt_vctmwite32(base_address, entryNum);
376 /* Enable loading of an entry in TLB by writing 1 into LD_TLB_REG
377 register */
378 mmu_ld_tlbwrt_reg32(base_address, MMU_LOAD_TLB);
381 mmu_lck_write_reg32(base_address, lockReg);
383 return status;
384 }
388 hw_status hw_mmu_pte_set(const UInt32 pg_tbl_va,
389 UInt32 physical_addr,
390 UInt32 virtual_addr,
391 UInt32 page_size,
392 struct hw_mmu_map_attrs_t *map_attrs)
393 {
394 hw_status status = RET_OK;
395 UInt32 pte_addr, pte_val;
396 long int num_entries = 1;
398 switch (page_size) {
400 case HW_PAGE_SIZE_4KB:
401 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
402 MMU_SMALL_PAGE_MASK);
403 pte_val = ((physical_addr & MMU_SMALL_PAGE_MASK) |
404 (map_attrs->endianism << 9) |
405 (map_attrs->element_size << 4) |
406 (map_attrs->mixedSize << 11) | 2
407 );
408 break;
410 case HW_PAGE_SIZE_64KB:
411 num_entries = 16;
412 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
413 MMU_LARGE_PAGE_MASK);
414 pte_val = ((physical_addr & MMU_LARGE_PAGE_MASK) |
415 (map_attrs->endianism << 9) |
416 (map_attrs->element_size << 4) |
417 (map_attrs->mixedSize << 11) | 1
418 );
419 break;
421 case HW_PAGE_SIZE_1MB:
422 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
423 MMU_SECTION_ADDR_MASK);
424 pte_val = ((((physical_addr & MMU_SECTION_ADDR_MASK) |
425 (map_attrs->endianism << 15) |
426 (map_attrs->element_size << 10) |
427 (map_attrs->mixedSize << 17)) &
428 ~0x40000) | 0x2
429 );
430 break;
432 case HW_PAGE_SIZE_16MB:
433 num_entries = 16;
434 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
435 MMU_SSECTION_ADDR_MASK);
436 pte_val = ((physical_addr & MMU_SSECTION_ADDR_MASK) |0x40022);
437 break;
439 case HW_MMU_COARSE_PAGE_SIZE:
440 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
441 MMU_SECTION_ADDR_MASK);
442 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
443 break;
445 default:
446 return RET_FAIL;
447 }
449 while (--num_entries >= 0)
450 ((ULONG*)pte_addr)[num_entries] = pte_val;
453 return status;
454 }
456 hw_status hw_mmu_pte_clear(const UInt32 pg_tbl_va,
457 UInt32 virtual_addr,
458 UInt32 pg_size)
459 {
460 hw_status status = RET_OK;
461 UInt32 pte_addr;
462 long int num_entries = 1;
464 switch (pg_size) {
465 case HW_PAGE_SIZE_4KB:
466 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
467 virtual_addr & MMU_SMALL_PAGE_MASK);
468 break;
470 case HW_PAGE_SIZE_64KB:
471 num_entries = 16;
472 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
473 virtual_addr & MMU_LARGE_PAGE_MASK);
474 break;
476 case HW_PAGE_SIZE_1MB:
477 case HW_MMU_COARSE_PAGE_SIZE:
478 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
479 virtual_addr & MMU_SECTION_ADDR_MASK);
480 break;
482 case HW_PAGE_SIZE_16MB:
483 num_entries = 16;
484 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
485 virtual_addr & MMU_SSECTION_ADDR_MASK);
486 break;
488 default:
489 return RET_FAIL;
490 }
492 while (--num_entries >= 0)
493 ((UInt32 *)pte_addr)[num_entries] = 0;
495 return status;
496 }
498 /*
499 * function: mmu_flsh_entry
500 */
501 static hw_status mmu_flsh_entry(const UInt32 base_address)
502 {
503 hw_status status = RET_OK;
504 UInt32 flushEntryData = 0x1;
506 /*Check the input Parameters*/
507 CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
508 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
510 /* write values to register */
511 MMUMMU_FLUSH_ENTRYWriteRegister32(base_address, flushEntryData);
513 return status;
514 }
515 /*
516 * function : mme_set_cam_entry
517 */
518 static hw_status mme_set_cam_entry(const UInt32 base_address,
519 const UInt32 page_size,
520 const UInt32 preserve_bit,
521 const UInt32 valid_bit,
522 const UInt32 virt_addr_tag)
523 {
524 hw_status status = RET_OK;
525 UInt32 mmuCamReg;
527 /*Check the input Parameters*/
528 CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
529 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
531 mmuCamReg = (virt_addr_tag << 12);
532 mmuCamReg = (mmuCamReg) | (page_size) | (valid_bit << 2)
533 | (preserve_bit << 3);
535 /* write values to register */
536 MMUMMU_CAMWriteRegister32(base_address, mmuCamReg);
538 return status;
539 }
540 /*
541 * function: mmu_set_ram_entry
542 */
543 static hw_status mmu_set_ram_entry(const UInt32 base_address,
544 const UInt32 physical_addr,
545 enum hw_endianism_t endianism,
546 enum hw_elemnt_siz_t element_size,
547 enum hw_mmu_mixed_size_t mixedSize)
548 {
549 hw_status status = RET_OK;
550 UInt32 mmuRamReg;
552 /*Check the input Parameters*/
553 CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
554 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
555 CHECK_INPUT_RANGE_MIN0(element_size, MMU_ELEMENTSIZE_MAX,
556 RET_PARAM_OUT_OF_RANGE,
557 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
560 mmuRamReg = (physical_addr & MMU_ADDR_MASK);
561 mmuRamReg = (mmuRamReg) | ((endianism << 9) | (element_size << 7)
562 | (mixedSize << 6));
564 /* write values to register */
565 MMUMMU_RAMWriteRegister32(base_address, mmuRamReg);
567 return status;
569 }
571 UInt32 hw_mmu_pte_phyaddr(UInt32 pte_val, UInt32 pte_size)
572 {
573 UInt32 ret_val = 0;
575 switch (pte_size) {
577 case HW_PAGE_SIZE_4KB:
578 ret_val = pte_val & MMU_SMALL_PAGE_MASK;
579 break;
580 case HW_PAGE_SIZE_64KB:
581 ret_val = pte_val & MMU_LARGE_PAGE_MASK;
582 break;
584 case HW_PAGE_SIZE_1MB:
585 ret_val = pte_val & MMU_SECTION_ADDR_MASK;
586 break;
587 case HW_PAGE_SIZE_16MB:
588 ret_val = pte_val & MMU_SSECTION_ADDR_MASK;
589 break;
590 default:
591 /* Invalid */
592 break;
594 }
596 return ret_val;
597 }