]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - ipc/ipcdev.git/blob - qnx/src/ipc3x_dev/ti/syslink/family/omap5430/ipu/omap5430BenelliEnabler.c
Fix error from calling MultiProc_getId with non-SMP IPU core name on OMAP5 QNX
[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / family / omap5430 / ipu / omap5430BenelliEnabler.c
1 /*
2  *  @file  omap5430BenelliEnabler.c
3  *
4  *  @brief  MMU programming module
5  *
6  *
7  *  ============================================================================
8  *
9  *  Copyright (c) 2010-2013, Texas Instruments Incorporated
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  *  Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *
18  *  *  Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *
22  *  *  Neither the name of Texas Instruments Incorporated nor the names of
23  *     its contributors may be used to endorse or promote products derived
24  *     from this software without specific prior written permission.
25  *
26  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  *  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  *  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  *  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  *  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  *  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
33  *  OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
34  *  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
35  *  OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
36  *  EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *  Contact information for paper mail:
38  *  Texas Instruments
39  *  Post Office Box 655303
40  *  Dallas, Texas 75265
41  *  Contact information:
42  *  http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
43  *  DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
44  *  ============================================================================
45  *
46  */
48 #include <errno.h>
49 #include <unistd.h>
50 #include <ti/syslink/Std.h>
52 /* OSAL and utils headers */
53 #include <ti/syslink/utils/List.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/OsalPrint.h>
57 /* Module level headers */
58 #include <OsalDrv.h>
59 #include <_ProcDefs.h>
60 #include <Processor.h>
61 #include <hw/inout.h>
62 #include <sys/mman.h>
64 #include <hw_defs.h>
65 #include <hw_mmu.h>
66 #include <OMAP5430BenelliHal.h>
67 #include <OMAP5430BenelliHalMmu.h>
68 #include <OMAP5430BenelliPhyShmem.h>
69 #include <OMAP5430BenelliEnabler.h>
70 #include <stdbool.h>
71 #include <stdint.h>
73 #define MMU_REGS_SIZE (sizeof(OMAP5430Benelli_MMURegs) / sizeof (UInt32))
74 static UInt32 mmu_regs[MMU_REGS_SIZE];
77 #define PAGE_SIZE 0x1000
78 static UInt32 nr_tlbs = 0;
79 #define NR_TLBS_MAX 32
80 struct iotlb_entry ipu_tlbs[NR_TLBS_MAX];
81 //struct iotlb_entry dsp_tlbs[NR_TLBS_MAX];
83 /* Attributes of L2 page tables for DSP MMU.*/
84 struct page_info {
85     /* Number of valid PTEs in the L2 PT*/
86     UInt32 num_entries;
87 };
90 /* Attributes used to manage the DSP MMU page tables */
91 struct pg_table_attrs {
92     struct sync_cs_object *hcs_object;/* Critical section object handle */
93     UInt32 l1_base_pa; /* Physical address of the L1 PT */
94     UInt32 l1_base_va; /* Virtual  address of the L1 PT */
95     UInt32 l1_size; /* Size of the L1 PT */
96     UInt32 l1_tbl_alloc_pa;
97     /* Physical address of Allocated mem for L1 table. May not be aligned */
98     UInt32 l1_tbl_alloc_va;
99     /* Virtual address of Allocated mem for L1 table. May not be aligned */
100     UInt32 l1_tbl_alloc_sz;
101     /* Size of consistent memory allocated for L1 table.
102      * May not be aligned */
103     UInt32 l2_base_pa;        /* Physical address of the L2 PT */
104     UInt32 l2_base_va;        /* Virtual  address of the L2 PT */
105     UInt32 l2_size;        /* Size of the L2 PT */
106     UInt32 l2_tbl_alloc_pa;
107     /* Physical address of Allocated mem for L2 table. May not be aligned */
108     UInt32 l2_tbl_alloc_va;
109     /* Virtual address of Allocated mem for L2 table. May not be aligned */
110     UInt32 ls_tbl_alloc_sz;
111     /* Size of consistent memory allocated for L2 table.
112      * May not be aligned */
113     UInt32 l2_num_pages;    /* Number of allocated L2 PT */
114     struct page_info *pg_info;
115 };
118 static struct pg_table_attrs *p_pt_attrs = NULL;
119 static struct pg_table_attrs *tesla_p_pt_attrs = NULL;
121 enum pagetype {
122     SECTION = 0,
123     LARGE_PAGE = 1,
124     SMALL_PAGE = 2,
125     SUPER_SECTION  = 3
126 };
128 static UInt32 shm_phys_addr;
129 static UInt32 shm_phys_addr_dsp;
131 #define INREG32(x) in32((uintptr_t)x)
132 #define OUTREG32(x, y) out32((uintptr_t)x, y)
133 #define SIZE 0x4
135 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf);
136 static Int load_iotlb_entry (OMAP5430BENELLI_HalObject * halObject,
137                              struct iotlb_entry *e);
138 static Int iotlb_cr_valid (struct cr_regs *cr);
140 static Int benelli_mem_map (OMAP5430BENELLI_HalObject * halObject,
141                             UInt32 mpu_addr, UInt32 ul_virt_addr,
142                             UInt32 num_bytes, UInt32 map_attr);
143 static Int benelli_mem_unmap (OMAP5430BENELLI_HalObject * halObject, UInt32 da,
144                               UInt32 num_bytes);
147 static Void iotlb_cr_to_e (struct cr_regs *cr, struct iotlb_entry *e)
149     e->da       = cr->cam & MMU_CAM_VATAG_MASK;
150     e->pa       = cr->ram & MMU_RAM_PADDR_MASK;
151     e->valid    = cr->cam & MMU_CAM_V;
152     e->prsvd    = cr->cam & MMU_CAM_P;
153     e->pgsz     = cr->cam & MMU_CAM_PGSZ_MASK;
154     e->endian   = cr->ram & MMU_RAM_ENDIAN_MASK;
155     e->elsz     = cr->ram & MMU_RAM_ELSZ_MASK;
156     e->mixed    = cr->ram & MMU_RAM_MIXED;
159 static Void iotlb_getLock (OMAP5430BENELLI_HalObject * halObject,
160                            struct iotlb_lock *l)
162     ULONG reg;
163     OMAP5430Benelli_MMURegs * mmuRegs =
164                                   (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
166     reg = INREG32(&mmuRegs->LOCK);
167     l->base = MMU_LOCK_BASE(reg);
168     l->vict = MMU_LOCK_VICT(reg);
171 static Void iotlb_setLock (OMAP5430BENELLI_HalObject * halObject,
172                            struct iotlb_lock *l)
174     ULONG reg;
175     OMAP5430Benelli_MMURegs * mmuRegs =
176                                   (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
178     reg = (l->base << MMU_LOCK_BASE_SHIFT);
179     reg |= (l->vict << MMU_LOCK_VICT_SHIFT);
180     OUTREG32(&mmuRegs->LOCK, reg);
183 static void omap5_tlb_read_cr (OMAP5430BENELLI_HalObject * halObject,
184                                struct cr_regs *cr)
186     OMAP5430Benelli_MMURegs * mmuRegs =
187                                   (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
189     cr->cam = INREG32(&mmuRegs->READ_CAM);
190     cr->ram = INREG32(&mmuRegs->READ_RAM);
193 /* only used for iotlb iteration in for-loop */
194 static struct cr_regs __iotlb_read_cr (OMAP5430BENELLI_HalObject * halObject,
195                                        int n)
197      struct cr_regs cr;
198      struct iotlb_lock l;
199      iotlb_getLock(halObject, &l);
200      l.vict = n;
201      iotlb_setLock(halObject, &l);
202      omap5_tlb_read_cr(halObject, &cr);
203      return cr;
206 #define for_each_iotlb_cr(n, __i, cr)                \
207     for (__i = 0;                            \
208          (__i < (n)) && (cr = __iotlb_read_cr(halObject, __i), TRUE);    \
209          __i++)
211 static Int save_tlbs (OMAP5430BENELLI_HalObject * halObject, UINT32 procId)
213     Int i =0;
214     struct cr_regs cr_tmp;
215     struct iotlb_lock l;
217     iotlb_getLock(halObject, &l);
219     nr_tlbs = l.base;
220 #ifdef SYSLINK_SYSBIOS_SMP
221     if (procId == PROCTYPE_IPU0)
222 #else
223     if (procId == PROCTYPE_IPU0 || procId == PROCTYPE_IPU1)
224 #endif
225     {
226         for_each_iotlb_cr(nr_tlbs, i, cr_tmp) {
227             iotlb_cr_to_e(&cr_tmp, &ipu_tlbs[i]);
228         }
229     }
230     //else if (procId == PROCTYPE_DSP) {
231             //TODO: Add along with the DSP support.
232     //}
233     else {
234         GT_setFailureReason(curTrace,GT_2CLASS,"save_tlbs",
235                             EINVAL, "Invalid Processor Id");
236         return -EINVAL;
237     }
239     return 0;
243 static Int restore_tlbs (OMAP5430BENELLI_HalObject * halObject, UInt32 procId)
245     Int i = 0;
246     Int status = -1;
247     struct iotlb_lock save;
249     /* Reset the base and victim values */
250     save.base = 0;
251     save.vict = 0;
252     iotlb_setLock(halObject, &save);
254 #ifdef SYSLINK_SYSBIOS_SMP
255     if (procId == PROCTYPE_IPU0)
256 #else
257     if (procId == PROCTYPE_IPU0 || procId == PROCTYPE_IPU1)
258 #endif
259     {
260         for (i = 0; i < nr_tlbs; i++) {
261             status = load_iotlb_entry(halObject, &ipu_tlbs[i]);
262             if (status < 0) {
263                 GT_setFailureReason (curTrace,
264                                      GT_4CLASS,
265                                      "restore_tlbs",
266                                      status,
267                                      "Error restoring the tlbs");
268                 goto err;
269             }
270         }
271     }
272     //else if (procId == PROCTYPE_DSP) {
273         //TODO: Add along with the DSP support.
274     //}
275     else {
276         status = -EINVAL;
277         GT_setFailureReason (curTrace,
278                              GT_4CLASS,
279                              "restore_tlbs",
280                              status,
281                              "Invalid ProcId");
282         goto err;
283     }
285     return 0;
287 err:
288     return status;
291 static Int save_mmu_regs (OMAP5430BENELLI_HalObject * halObject, UInt32 procId)
293     UInt32 i = 0;
295     if (halObject == NULL) {
296         GT_setFailureReason (curTrace,
297                              GT_4CLASS,
298                              "save_mmu_regs",
299                              -ENOMEM,
300                              "halObject is NULL");
301         return -ENOMEM;
302     }
304     if (halObject->mmuBase == 0) {
305         GT_setFailureReason (curTrace,
306                              GT_4CLASS,
307                              "save_mmu_regs",
308                              -ENOMEM,
309                              "halObject->mmuBase is 0");
310         return -ENOMEM;
311     }
313     for (i = 0; i < MMU_REGS_SIZE; i++) {
314         mmu_regs[i] = INREG32(halObject->mmuBase + (i * 4));
315     }
317     return 0;
320 static Int restore_mmu_regs (OMAP5430BENELLI_HalObject * halObject,
321                              UInt32 procId)
323     UInt32 i = 0;
325     if (halObject == NULL) {
326         GT_setFailureReason (curTrace,
327                              GT_4CLASS,
328                              "restore_mmu_regs",
329                              -ENOMEM,
330                              "halObject is NULL");
331         return -ENOMEM;
332     }
334     if (halObject->mmuBase == 0) {
335         GT_setFailureReason (curTrace,
336                              GT_4CLASS,
337                              "restore_mmu_regs",
338                              -ENOMEM,
339                              "halObject->mmuBase is 0");
340         return -ENOMEM;
341     }
343     for (i = 0; i < MMU_REGS_SIZE; i++) {
344         OUTREG32(halObject->mmuBase + (i * 4), mmu_regs[i]);
345     }
347     return 0;
350 Int save_mmu_ctxt (OMAP5430BENELLI_HalObject * halObject, UInt32 procId)
352     Int status = -1;
354     status = save_mmu_regs(halObject, procId);
355     if (status < 0) {
356         GT_setFailureReason (curTrace,
357                              GT_4CLASS,
358                              "save_mmu_ctxt",
359                              status,
360                              "Unable to save MMU Regs");
361         return status;
362     }
364     status = save_tlbs(halObject, procId);
365     if (status < 0) {
366         GT_setFailureReason (curTrace,
367                              GT_4CLASS,
368                              "save_mmu_ctxt",
369                              status,
370                              "Unable to save TLBs");
371         return status;
372     }
373     return status;
377 Int restore_mmu_ctxt (OMAP5430BENELLI_HalObject * halObject, UInt32 procId)
379     Int status = -1;
381     status = restore_mmu_regs(halObject, procId);
382     if (status < 0) {
383         GT_setFailureReason (curTrace,
384                              GT_4CLASS,
385                              "restore_mmu_ctxt",
386                              status,
387                              "Unable to restore MMU Regs");
388         return status;
389     }
391     status = restore_tlbs(halObject, procId);
392     if (status < 0) {
393         GT_setFailureReason (curTrace,
394                              GT_4CLASS,
395                              "restore_mmu_ctxt",
396                              status,
397                              "Unable to restore TLBS");
398         return status;
399     }
401     return status;
405  /*=========================================
406  * Decides a TLB entry size
407  *
408  */
409 static Int get_mmu_entry_size (UInt32 pa, UInt32 size, enum pagetype *size_tlb,
410                                UInt32 *entry_size)
412     Int     status = 0;
413     Bool    page_align_4kb  = false;
414     Bool    page_align_64kb = false;
415     Bool    page_align_1mb = false;
416     Bool    page_align_16mb = false;
417     UInt32  phys_addr = pa;
420     /*  First check the page alignment*/
421     if ((phys_addr % PAGE_SIZE_4KB)  == 0)
422         page_align_4kb  = true;
423     if ((phys_addr % PAGE_SIZE_64KB) == 0)
424         page_align_64kb = true;
425     if ((phys_addr % PAGE_SIZE_1MB)  == 0)
426         page_align_1mb  = true;
427     if ((phys_addr % PAGE_SIZE_16MB)  == 0)
428         page_align_16mb  = true;
430     if ((!page_align_64kb) && (!page_align_1mb)  && (!page_align_4kb)) {
431         status = -EINVAL;
432         GT_setFailureReason (curTrace,
433                              GT_4CLASS,
434                              "get_mmu_entry_size",
435                              status,
436                              "phys_addr is not properly aligned");
437         goto error_exit;
438     }
440     /*  Now decide the entry size */
441     if (size >= PAGE_SIZE_16MB) {
442         if (page_align_16mb) {
443             *size_tlb   = SUPER_SECTION;
444             *entry_size = PAGE_SIZE_16MB;
445         } else if (page_align_1mb) {
446             *size_tlb   = SECTION;
447             *entry_size = PAGE_SIZE_1MB;
448         } else if (page_align_64kb) {
449             *size_tlb   = LARGE_PAGE;
450             *entry_size = PAGE_SIZE_64KB;
451         } else if (page_align_4kb) {
452             *size_tlb   = SMALL_PAGE;
453             *entry_size = PAGE_SIZE_4KB;
454         } else {
455             status = -EINVAL;
456             GT_setFailureReason (curTrace,
457                                  GT_4CLASS,
458                                  "get_mmu_entry_size",
459                                  status,
460                                  "size and alignment are invalid");
461             goto error_exit;
462         }
463     } else if (size >= PAGE_SIZE_1MB && size < PAGE_SIZE_16MB) {
464         if (page_align_1mb) {
465             *size_tlb   = SECTION;
466             *entry_size = PAGE_SIZE_1MB;
467         } else if (page_align_64kb) {
468             *size_tlb   = LARGE_PAGE;
469             *entry_size = PAGE_SIZE_64KB;
470         } else if (page_align_4kb) {
471             *size_tlb   = SMALL_PAGE;
472             *entry_size = PAGE_SIZE_4KB;
473         } else {
474             status = -EINVAL;
475             GT_setFailureReason (curTrace,
476                                  GT_4CLASS,
477                                  "get_mmu_entry_size",
478                                  status,
479                                  "size and alignment are invalid");
480             goto error_exit;
481         }
482     } else if (size > PAGE_SIZE_4KB && size < PAGE_SIZE_1MB) {
483         if (page_align_64kb) {
484             *size_tlb   = LARGE_PAGE;
485             *entry_size = PAGE_SIZE_64KB;
486         } else if (page_align_4kb) {
487             *size_tlb   = SMALL_PAGE;
488             *entry_size = PAGE_SIZE_4KB;
489         } else {
490             status = -EINVAL;
491             GT_setFailureReason (curTrace,
492                                  GT_4CLASS,
493                                  "get_mmu_entry_size",
494                                  status,
495                                  "size and alignment are invalid");
496             goto error_exit;
497         }
498     } else if (size == PAGE_SIZE_4KB) {
499         if (page_align_4kb) {
500             *size_tlb   = SMALL_PAGE;
501             *entry_size = PAGE_SIZE_4KB;
502         } else {
503             status = -EINVAL;
504             GT_setFailureReason (curTrace,
505                                  GT_4CLASS,
506                                  "get_mmu_entry_size",
507                                  status,
508                                  "size and alignment are invalid");
509             goto error_exit;
510         }
511     } else {
512         status = -EINVAL;
513         GT_setFailureReason (curTrace,
514                              GT_4CLASS,
515                              "get_mmu_entry_size",
516                              status,
517                              "size is invalid");
518         goto error_exit;
519     }
520     return 0;
522 error_exit:
523     return status;
527 #if 0
528 /*=========================================
529  * Add DSP MMU entries corresponding to given MPU-Physical address
530  * and DSP-virtual address
531  */
532 static Int add_dsp_mmu_entry (OMAP5430BENELLI_HalObject * halObject,
533                               UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
535     UInt32 mapped_size = 0;
536     enum pagetype size_tlb = SECTION;
537     UInt32 entry_size = 0;
538     int status = 0;
539     struct iotlb_entry tlb_entry;
540     int retval = 0;
542     while ((mapped_size < size) && (status == 0)) {
543         status = get_mmu_entry_size(*phys_addr, (size - mapped_size),
544                                     &size_tlb, &entry_size);
545         if (status < 0) {
546             GT_setFailureReason (curTrace,
547                                  GT_4CLASS,
548                                  "add_dsp_mmu_entry",
549                                  status,
550                                  "get_mmu_entry_size failed");
551             goto error_exit;
552         }
554         if (size_tlb == SUPER_SECTION)
555             tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
557         else if (size_tlb == SECTION)
558             tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
560         else if (size_tlb == LARGE_PAGE)
561             tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
563         else if (size_tlb == SMALL_PAGE)
564             tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
566         tlb_entry.elsz = MMU_RAM_ELSZ_16;
567         tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
568         tlb_entry.mixed = MMU_RAM_MIXED;
569         tlb_entry.prsvd = MMU_CAM_P;
570         tlb_entry.valid = MMU_CAM_V;
572         tlb_entry.da = *dsp_addr;
573         tlb_entry.pa = *phys_addr;
574         retval = load_iotlb_entry(halObject, &tlb_entry);
575         if (retval < 0) {
576             GT_setFailureReason (curTrace,
577                                  GT_4CLASS,
578                                  "add_dsp_mmu_entry",
579                                  retval,
580                                  "load_iotlb_entry failed");
581             goto error_exit;
582         }
583         mapped_size  += entry_size;
584         *phys_addr   += entry_size;
585         *dsp_addr   += entry_size;
586     }
588     return 0;
590 error_exit:
591     printf("pte set failure retval = 0x%x, status = 0x%x \n",
592                             retval, status);
594     return retval;
596 #endif
599 static Int add_entry_ext (OMAP5430BENELLI_HalObject * halObject,
600                           UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
602     UInt32 mapped_size = 0;
603     enum pagetype     size_tlb = SECTION;
604     UInt32 entry_size = 0;
605     Int status = 0;
606     UInt32 page_size = HW_PAGE_SIZE_1MB;
607     UInt32 flags = 0;
609     flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
610                     DSP_MAPPHYSICALADDR);
611     while ((mapped_size < size) && (status == 0)) {
613         /*  get_mmu_entry_size fills the size_tlb and entry_size
614         based on alignment and size of memory to map
615         to DSP - size */
616         status = get_mmu_entry_size (*phys_addr,
617                                      (size - mapped_size),
618                                      &size_tlb,
619                                      &entry_size);
620         if (status < 0) {
621             GT_setFailureReason (curTrace,
622                                  GT_4CLASS,
623                                  "add_entry_ext",
624                                  status,
625                                  "get_mmu_entry_size failed");
626             break;
627         }
628         else {
629             if (size_tlb == SUPER_SECTION)
630                 page_size = HW_PAGE_SIZE_16MB;
631             else if (size_tlb == SECTION)
632                 page_size = HW_PAGE_SIZE_1MB;
633             else if (size_tlb == LARGE_PAGE)
634                 page_size = HW_PAGE_SIZE_64KB;
635             else if (size_tlb == SMALL_PAGE)
636                 page_size = HW_PAGE_SIZE_4KB;
638             if (status == 0) {
639                 status = benelli_mem_map (halObject,
640                                           *phys_addr,
641                                           *dsp_addr,
642                                           page_size,
643                                           flags);
644                 if (status < 0) {
645                     GT_setFailureReason (curTrace,
646                                          GT_4CLASS,
647                                          "add_entry_ext",
648                                          status,
649                                          "benelli_mem_map failed");
650                     break;
651                 }
652                 mapped_size  += entry_size;
653                 *phys_addr   += entry_size;
654                 *dsp_addr   += entry_size;
655             }
656         }
657     }
658     return status;
661 static Int __dump_tlb_entries (OMAP5430BENELLI_HalObject * halObject,
662                                struct cr_regs *crs, int num)
664     int i;
665     struct iotlb_lock saved;
666     struct cr_regs tmp;
667     struct cr_regs *p = crs;
669     iotlb_getLock(halObject, &saved);
670     for_each_iotlb_cr(num, i, tmp) {
671         if (!iotlb_cr_valid(&tmp))
672             continue;
673         *p++ = tmp;
674     }
675     iotlb_setLock(halObject, &saved);
676     return  p - crs;
679 UInt32 get_BenelliVirtAdd(OMAP5430BENELLI_HalObject * halObject, UInt32 physAdd)
681     int i, num;
682     struct cr_regs *cr;
683     struct cr_regs *p = NULL;
684     //DWORD dwPhys;
685     UInt32 lRetVal = 0;
686     num = 32;
687     if((halObject->procId != PROCTYPE_DSP) && (shm_phys_addr == 0))
688         return 0;
689     if((halObject->procId == PROCTYPE_DSP) && (shm_phys_addr_dsp == 0))
690         return 0;
691     cr = mmap(NULL,
692               sizeof(struct cr_regs) * num,
693               PROT_NOCACHE | PROT_READ | PROT_WRITE,
694               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
695               NOFD,
696               0);
697     if (cr == MAP_FAILED)
698     {
699         return NULL;
700     }
702     memset(cr, 0, sizeof(struct cr_regs) * num);
704     num = __dump_tlb_entries(halObject, cr, num);
705     for (i = 0; i < num; i++)
706     {
707         p = cr + i;
708         if(physAdd >= (p->ram & 0xFFFFF000) &&  physAdd < ((p + 1)->ram & 0xFFFFF000))
709         {
710             lRetVal = ((p->cam & 0xFFFFF000) + (physAdd - (p->ram & 0xFFFFF000)));
711         }
712     }
713     munmap(cr, sizeof(struct cr_regs) * num);
715     return lRetVal;
719 /**
720  * dump_tlb_entries - dump cr arrays to given buffer
721  * @obj:    target iommu
722  * @buf:    output buffer
723  **/
724 static UInt32 dump_tlb_entries (OMAP5430BENELLI_HalObject * halObject,
725                                 char *buf, UInt32 bytes)
727     Int i, num;
728     struct cr_regs *cr;
729     Char *p = buf;
731     num = bytes / sizeof(*cr);
732     num = min(32, num);
733     cr = mmap(NULL,
734             sizeof(struct cr_regs) * num,
735               PROT_NOCACHE | PROT_READ | PROT_WRITE,
736               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
737               NOFD,
738               0);
739     if (!cr)
740     {
741         return NULL;
743     }
744     memset(cr, 0, sizeof(struct cr_regs) * num);
746     num = __dump_tlb_entries(halObject, cr, num);
747     for (i = 0; i < num; i++)
748         p += iotlb_dump_cr(cr + i, p);
749     munmap(cr, sizeof(struct cr_regs) * num);
750     return p - buf;
754 static Void benelli_tlb_dump (OMAP5430BENELLI_HalObject * halObject)
756     Char *p;
758     p = mmap(NULL,
759              1000,
760              PROT_NOCACHE | PROT_READ | PROT_WRITE,
761              MAP_ANON | MAP_PHYS | MAP_PRIVATE,
762              NOFD,
763              0);
764     if (MAP_FAILED != p)
765     {
766         dump_tlb_entries(halObject, p, 1000);
767         munmap(p, 1000);
768     }
770     return;
774 /*================================
775  * Initialize the Benelli MMU.
776  *===============================*/
778 static Int benelli_mmu_init (OMAP5430BENELLI_HalObject * halObject,
779                              ProcMgr_AddrInfo * memEntries,
780                              UInt32 numMemEntries)
782     Int ret_val = 0;
783     UInt32 phys_addr = 0;
784     UInt32 i = 0;
785     UInt32 virt_addr = 0;
786     OMAP5430Benelli_MMURegs * mmuRegs = NULL;
788     if (halObject == NULL) {
789         ret_val = -ENOMEM;
790         GT_setFailureReason (curTrace,
791                              GT_4CLASS,
792                              "benelli_mmu_init",
793                              ret_val,
794                              "halObject is NULL");
795         goto error_exit;
796     }
798     if (halObject->mmuBase == 0) {
799         ret_val = -ENOMEM;
800         GT_setFailureReason (curTrace,
801                              GT_4CLASS,
802                              "benelli_mmu_init",
803                              ret_val,
804                              "halObject->mmuBase is 0");
805         goto error_exit;
806     }
807     mmuRegs = (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
809     /*  Disable the MMU & TWL */
810     hw_mmu_disable(halObject->mmuBase);
811     hw_mmu_twl_disable(halObject->mmuBase);
814     printf("  Programming Benelli memory regions\n");
815     printf("=========================================\n");
817     for (i = 0; i < numMemEntries; i++) {
818         phys_addr = memEntries[i].addr[ProcMgr_AddrType_MasterPhys];
819         if (phys_addr == (UInt32)(-1) || phys_addr == 0) {
820             GT_setFailureReason (curTrace,
821                                  GT_4CLASS,
822                                  "benelli_mmu_init",
823                                  ret_val,
824                                  "Processor_translateAddr failed");
825             goto error_exit;
826         }
827         printf( "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
828                 memEntries[i].addr[ProcMgr_AddrType_SlaveVirt],
829                 memEntries[i].size,
830                 (unsigned int)phys_addr);
832         /* OMAP5430 SDC code */
833         /* Adjust below logic if using cacheable shared memory */
834         shm_phys_addr = 1;
835         virt_addr = memEntries[i].addr[ProcMgr_AddrType_SlaveVirt];
837         ret_val = add_entry_ext(halObject, &phys_addr, &virt_addr,
838                                 (memEntries[i].size));
839         if (ret_val < 0) {
840             GT_setFailureReason (curTrace,
841                                  GT_4CLASS,
842                                  "benelli_mmu_init",
843                                  ret_val,
844                                  "add_entry_ext failed");
845             goto error_exit;
846         }
847     }
849     /* Set the TTB to point to the L1 page table's physical address */
850     OUTREG32(&mmuRegs->TTB, p_pt_attrs->l1_base_pa);
852     /* Enable the TWL */
853     hw_mmu_twl_enable(halObject->mmuBase);
855     hw_mmu_enable(halObject->mmuBase);
857     benelli_tlb_dump(halObject);
858     return 0;
859 error_exit:
860     return ret_val;
863 /****************************************************
865 *  Function which sets the TWL of the tesla
868 *****************************************************/
870 static Int tesla_set_twl (OMAP5430BENELLI_HalObject * halObject, Bool on)
872    Int status = 0;
873    OMAP5430Benelli_MMURegs * mmuRegs = NULL;
874    ULONG reg;
877    if (halObject == NULL) {
878        status = -ENOMEM;
879        GT_setFailureReason (curTrace,
880                             GT_4CLASS,
881                             "tesla_set_twl",
882                             status,
883                             "halObject is NULL");
884    }
885    else if (halObject->mmuBase == 0) {
886        status = -ENOMEM;
887        GT_setFailureReason (curTrace,
888                             GT_4CLASS,
889                             "tesla_set_twl",
890                             status,
891                             "halObject->mmuBase is NULL");
892    }
893    else {
894        mmuRegs = (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
896        /* Setting MMU to Smart Idle Mode */
897        reg = INREG32(&mmuRegs->SYSCONFIG);
898        reg &= ~MMU_SYS_IDLE_MASK;
899        reg |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
900        OUTREG32(&mmuRegs->SYSCONFIG, reg);
902        /* Enabling MMU */
903        reg =  INREG32(&mmuRegs->CNTL);
905        if (on)
906            OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TWL_MASK);
907        else
908            OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TLB_MISS_MASK);
910        reg &= ~MMU_CNTL_MASK;
911        if (on)
912            reg |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
913        else
914            reg |= (MMU_CNTL_MMU_EN);
916        OUTREG32(&mmuRegs->CNTL, reg);
917    }
919    return status;
922 static Int tesla_mmu_init(OMAP5430BENELLI_HalObject * halObject,
923                           ProcMgr_AddrInfo * memEntries,
924                           UInt32 numMemEntries)
926     Int ret_val = 0;
927     UInt32 phys_addr = 0;
928     UInt32 i = 0;
929     UInt32 virt_addr = 0;
930     UInt32 reg;
931     OMAP5430Benelli_MMURegs * mmuRegs = NULL;
933     if (halObject == NULL) {
934         ret_val = -ENOMEM;
935         GT_setFailureReason (curTrace,
936                              GT_4CLASS,
937                              "tesla_mmu_init",
938                              ret_val,
939                              "halObject is NULL");
940         goto error_exit;
941     }
942     if (halObject->mmuBase == 0) {
943         ret_val = -ENOMEM;
944         GT_setFailureReason (curTrace,
945                              GT_4CLASS,
946                              "tesla_mmu_init",
947                              ret_val,
948                              "halObject->mmuBase is 0");
949         goto error_exit;
950     }
951     mmuRegs = (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
953     /*  Disable the MMU & TWL */
954     hw_mmu_disable(halObject->mmuBase);
955     hw_mmu_twl_disable(halObject->mmuBase);
957     printf("  Programming Tesla memory regions\n");
958     printf("=========================================\n");
960     for (i = 0; i < numMemEntries; i++) {
961         phys_addr = memEntries[i].addr[ProcMgr_AddrType_MasterPhys];
962         printf( "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
963                 (unsigned int)memEntries[i].addr[ProcMgr_AddrType_SlaveVirt],
964                 (unsigned int)memEntries[i].size,
965                 (unsigned int)phys_addr);
967         shm_phys_addr_dsp = 1;
969         virt_addr = memEntries[i].addr[ProcMgr_AddrType_SlaveVirt];
970         ret_val = add_entry_ext(halObject, &phys_addr, &virt_addr,
971                                 (memEntries[i].size));
973         if (ret_val < 0)
974             goto error_exit;
975     }
977     /* Set the TTB to point to the L1 page table's physical address */
978     OUTREG32(&mmuRegs->TTB, tesla_p_pt_attrs->l1_base_pa);
979     /* Enable the TWL */
980     hw_mmu_twl_enable(halObject->mmuBase);
981     hw_mmu_enable(halObject->mmuBase);
983     //Set the SYSCONFIG
984     reg = INREG32(halObject->mmuBase + 0x10);
985     reg&=0xFFFFFFEF;
986     reg|=0x11;
987     OUTREG32(halObject->mmuBase+0x10, reg);
988     benelli_tlb_dump(halObject);
989     return 0;
990     error_exit:
991     return ret_val;
994 static Int init_tesla_page_attributes()
997     UInt32 pg_tbl_pa = 0;
998     off64_t offset = 0;
999     UInt32 pg_tbl_va = 0;
1000     UInt32 align_size = 0;
1001     UInt32 len = 0;
1002     UInt32 l1_size = 0x10000;
1003     UInt32 ls_num_of_pages = 128;
1004     int status = 0;
1006     tesla_p_pt_attrs = Memory_alloc (NULL, sizeof(struct pg_table_attrs), 0, NULL);
1007     if (tesla_p_pt_attrs)
1008         Memory_set (tesla_p_pt_attrs, 0, sizeof(struct pg_table_attrs));
1009     else {
1010         status = -ENOMEM;
1011         GT_setFailureReason (curTrace,
1012                              GT_4CLASS,
1013                              "init_tesla_page_attributes",
1014                              status,
1015                              "Memory_alloc failed");
1016         goto error_exit;
1017     }
1019     tesla_p_pt_attrs->l1_size = l1_size;
1020     align_size = tesla_p_pt_attrs->l1_size;
1021     tesla_p_pt_attrs->l1_tbl_alloc_sz = 0x100000;
1022     /* Align sizes are expected to be power of 2 */
1023     /* we like to get aligned on L1 table size */
1024     pg_tbl_va = (UInt32) mmap64 (NULL,
1025                                  tesla_p_pt_attrs->l1_tbl_alloc_sz,
1026                                  PROT_NOCACHE | PROT_READ | PROT_WRITE,
1027                                  MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1028                                  NOFD,
1029                                  0x0);
1030     if (pg_tbl_va == (UInt32)MAP_FAILED) {
1031         pg_tbl_va = 0;
1032         status = -ENOMEM;
1033         GT_setFailureReason (curTrace,
1034                              GT_4CLASS,
1035                              "init_tesla_page_attributes",
1036                              status,
1037                              "mmap64 failed");
1038         goto error_exit;
1039     }
1040     else {
1041         /* Make sure the memory is contiguous */
1042         status = mem_offset64 ((void *)pg_tbl_va, NOFD,
1043                                tesla_p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
1044         pg_tbl_pa = (UInt32)offset;
1045         if (len != tesla_p_pt_attrs->l1_tbl_alloc_sz) {
1046             status = -ENOMEM;
1047             GT_setFailureReason (curTrace,
1048                                  GT_4CLASS,
1049                                  "init_tesla_page_attributes",
1050                                  status,
1051                                  "phys mem is not contiguous");
1052         }
1053         if (status != 0) {
1054             GT_setFailureReason (curTrace,
1055                                  GT_4CLASS,
1056                                  "init_tesla_page_attributes",
1057                                  status,
1058                                  "mem_offset64 failed");
1059             goto error_exit;
1060         }
1061     }
1062     /* Check if the PA is aligned for us */
1063     if ((pg_tbl_pa) & (align_size-1)) {
1064         /* PA not aligned to page table size ,*/
1065         /* try with more allocation and align */
1066         munmap((void *)pg_tbl_va, tesla_p_pt_attrs->l1_tbl_alloc_sz);
1067         tesla_p_pt_attrs->l1_tbl_alloc_sz = tesla_p_pt_attrs->l1_tbl_alloc_sz*2;
1068         /* we like to get aligned on L1 table size */
1069         pg_tbl_va = (UInt32) mmap64 (NULL,
1070                                      tesla_p_pt_attrs->l1_tbl_alloc_sz,
1071                                      PROT_NOCACHE | PROT_READ | PROT_WRITE,
1072                                      MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1073                                      NOFD,
1074                                      0);
1075         if (pg_tbl_va == (UInt32)MAP_FAILED) {
1076             pg_tbl_va = 0;
1077             status = -ENOMEM;
1078             GT_setFailureReason (curTrace,
1079                                  GT_4CLASS,
1080                                  "init_tesla_page_attributes",
1081                                  status,
1082                                  "mmap64 failed");
1083             goto error_exit;
1084         }
1085         else {
1086             /* Make sure the memory is contiguous */
1087             status = mem_offset64 ((void *)pg_tbl_va, NOFD,
1088                                    tesla_p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
1089             pg_tbl_pa = (UInt32)offset;
1090             if (len != tesla_p_pt_attrs->l1_tbl_alloc_sz) {
1091                 status = -ENOMEM;
1092                 GT_setFailureReason (curTrace,
1093                                      GT_4CLASS,
1094                                      "init_tesla_page_attributes",
1095                                      status,
1096                                      "phys mem is not contiguous");
1097             }
1098             if (status != 0) {
1099                 GT_setFailureReason (curTrace,
1100                                      GT_4CLASS,
1101                                      "init_tesla_page_attributes",
1102                                      status,
1103                                      "mem_offset64 failed");
1104                 goto error_exit;
1105             }
1106         }
1107         /* We should be able to get aligned table now */
1108         tesla_p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1109         tesla_p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1110         /* Align the PA to the next 'align'  boundary */
1111         tesla_p_pt_attrs->l1_base_pa = ((pg_tbl_pa) + (align_size-1)) &
1112                             (~(align_size-1));
1113         tesla_p_pt_attrs->l1_base_va = pg_tbl_va + (tesla_p_pt_attrs->l1_base_pa -
1114                                 pg_tbl_pa);
1115     } else {
1116         /* We got aligned PA, cool */
1117         tesla_p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1118         tesla_p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1119         tesla_p_pt_attrs->l1_base_pa = pg_tbl_pa;
1120         tesla_p_pt_attrs->l1_base_va = pg_tbl_va;
1121     }
1123     if (tesla_p_pt_attrs->l1_base_va)
1124         memset((UInt8*)tesla_p_pt_attrs->l1_base_va, 0x00, tesla_p_pt_attrs->l1_size);
1125     tesla_p_pt_attrs->l2_num_pages = ls_num_of_pages;
1126     tesla_p_pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * tesla_p_pt_attrs->l2_num_pages;
1127     align_size = 4; /* Make it UInt32 aligned  */
1128     /* we like to get aligned on L1 table size */
1129     pg_tbl_va = tesla_p_pt_attrs->l1_base_va + 0x80000;
1130     pg_tbl_pa = tesla_p_pt_attrs->l1_base_pa + 0x80000;
1131     tesla_p_pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
1132     tesla_p_pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
1133     tesla_p_pt_attrs->ls_tbl_alloc_sz = tesla_p_pt_attrs->l2_size;
1134     tesla_p_pt_attrs->l2_base_pa = pg_tbl_pa;
1135     tesla_p_pt_attrs->l2_base_va = pg_tbl_va;
1136     if (tesla_p_pt_attrs->l2_base_va)
1137         memset((UInt8*)tesla_p_pt_attrs->l2_base_va, 0x00, tesla_p_pt_attrs->l2_size);
1139     tesla_p_pt_attrs->pg_info = Memory_alloc(NULL, sizeof(struct page_info), 0, NULL);
1140     if (tesla_p_pt_attrs->pg_info)
1141         Memory_set (tesla_p_pt_attrs->pg_info, 0, sizeof(struct page_info));
1142     else {
1143         status = -ENOMEM;
1144         GT_setFailureReason (curTrace,
1145                              GT_4CLASS,
1146                              "init_tesla_page_attributes",
1147                              status,
1148                              "Memory_alloc failed");
1149         goto error_exit;
1150     }
1152     return 0;
1154 error_exit:
1155     if (tesla_p_pt_attrs) {
1156         if (tesla_p_pt_attrs->pg_info)
1157             Memory_free (NULL, tesla_p_pt_attrs->pg_info, sizeof(struct page_info));
1158         if (tesla_p_pt_attrs->l1_tbl_alloc_va) {
1159             munmap ((void *)tesla_p_pt_attrs->l1_tbl_alloc_va,
1160                     tesla_p_pt_attrs->l1_tbl_alloc_sz);
1161         }
1162         Memory_free (NULL, tesla_p_pt_attrs, sizeof(struct pg_table_attrs));
1163         tesla_p_pt_attrs = NULL;
1164     }
1166     return status;
1169 /****************************************************
1171 *  Function which sets the TWL of the benelli
1174 *****************************************************/
1176 static Int benelli_set_twl (OMAP5430BENELLI_HalObject * halObject, Bool on)
1178     Int status = 0;
1179     OMAP5430Benelli_MMURegs * mmuRegs = NULL;
1180     ULONG reg;
1182     if (halObject == NULL) {
1183         status = -ENOMEM;
1184         GT_setFailureReason (curTrace,
1185                              GT_4CLASS,
1186                              "benelli_set_twl",
1187                              status,
1188                              "halObject is NULL");
1189     }
1190     else if (halObject->mmuBase == 0) {
1191         status = -ENOMEM;
1192         GT_setFailureReason (curTrace,
1193                              GT_4CLASS,
1194                              "benelli_set_twl",
1195                              status,
1196                              "halObject->mmuBase is NULL");
1197     }
1198     else {
1199         mmuRegs = (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
1201         /* Setting MMU to Smart Idle Mode */
1202         reg = INREG32(&mmuRegs->SYSCONFIG);
1203         reg &= ~MMU_SYS_IDLE_MASK;
1204         reg |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
1205         OUTREG32(&mmuRegs->SYSCONFIG, reg);
1207         /* Enabling MMU */
1208         reg =  INREG32(&mmuRegs->CNTL);
1210         if (on)
1211             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TWL_MASK);
1212         else
1213             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TLB_MISS_MASK);
1215         reg &= ~MMU_CNTL_MASK;
1216         if (on)
1217             reg |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
1218         else
1219             reg |= (MMU_CNTL_MMU_EN);
1221         OUTREG32(&mmuRegs->CNTL, reg);
1222     }
1224     return status;
1228 /*========================================
1229  * This sets up the Benelli processor MMU Page tables
1230  *
1231  */
1232 static Int init_mmu_page_attribs (UInt32 l1_size,
1233                                   UInt32 l1_allign,
1234                                   UInt32 ls_num_of_pages)
1236     UInt32 pg_tbl_pa = 0;
1237     off64_t offset = 0;
1238     UInt32 pg_tbl_va = 0;
1239     UInt32 align_size = 0;
1240     UInt32 len = 0;
1241     int status = 0;
1243     p_pt_attrs = Memory_alloc (NULL, sizeof(struct pg_table_attrs), 0, NULL);
1244     if (p_pt_attrs)
1245         Memory_set (p_pt_attrs, 0, sizeof(struct pg_table_attrs));
1246     else {
1247         status = -ENOMEM;
1248         GT_setFailureReason (curTrace,
1249                              GT_4CLASS,
1250                              "init_mmu_page_attribs",
1251                              status,
1252                              "Memory_alloc failed");
1253         goto error_exit;
1254     }
1256     p_pt_attrs->l1_size = l1_size;
1257     align_size = p_pt_attrs->l1_size;
1258     p_pt_attrs->l1_tbl_alloc_sz = 0x100000;
1259     /* Align sizes are expected to be power of 2 */
1260     /* we like to get aligned on L1 table size */
1261     pg_tbl_va = (UInt32) mmap64 (NULL,
1262                                  p_pt_attrs->l1_tbl_alloc_sz,
1263                                  PROT_NOCACHE | PROT_READ | PROT_WRITE,
1264                                  MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1265                                  NOFD,
1266                                  0x0);
1267     if (pg_tbl_va == (UInt32)MAP_FAILED) {
1268         pg_tbl_va = 0;
1269         status = -ENOMEM;
1270         GT_setFailureReason (curTrace,
1271                              GT_4CLASS,
1272                              "init_mmu_page_attribs",
1273                              status,
1274                              "mmap64 failed");
1275         goto error_exit;
1276     }
1277     else {
1278         /* Make sure the memory is contiguous */
1279         status = mem_offset64 ((void *)pg_tbl_va, NOFD,
1280                                p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
1281         pg_tbl_pa = (UInt32)offset;
1282         if (len != p_pt_attrs->l1_tbl_alloc_sz) {
1283             status = -ENOMEM;
1284             GT_setFailureReason (curTrace,
1285                                  GT_4CLASS,
1286                                  "init_mmu_page_attribs",
1287                                  status,
1288                                  "phys mem is not contiguous");
1289         }
1290         if (status != 0) {
1291             GT_setFailureReason (curTrace,
1292                                  GT_4CLASS,
1293                                  "init_mmu_page_attribs",
1294                                  status,
1295                                  "mem_offset64 failed");
1296             goto error_exit;
1297         }
1298     }
1299     /* Check if the PA is aligned for us */
1300     if ((pg_tbl_pa) & (align_size-1)) {
1301         /* PA not aligned to page table size ,*/
1302         /* try with more allocation and align */
1303         munmap((void *)pg_tbl_va, p_pt_attrs->l1_tbl_alloc_sz);
1304         p_pt_attrs->l1_tbl_alloc_sz = p_pt_attrs->l1_tbl_alloc_sz*2;
1305         /* we like to get aligned on L1 table size */
1306         pg_tbl_va = (UInt32) mmap64 (NULL,
1307                                      p_pt_attrs->l1_tbl_alloc_sz,
1308                                      PROT_NOCACHE | PROT_READ | PROT_WRITE,
1309                                      MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1310                                      NOFD,
1311                                      0);
1312         if (pg_tbl_va == (UInt32)MAP_FAILED) {
1313             pg_tbl_va = 0;
1314             status = -ENOMEM;
1315             GT_setFailureReason (curTrace,
1316                                  GT_4CLASS,
1317                                  "init_mmu_page_attribs",
1318                                  status,
1319                                  "mmap64 failed");
1320             goto error_exit;
1321         }
1322         else {
1323             /* Make sure the memory is contiguous */
1324             status = mem_offset64 ((void *)pg_tbl_va, NOFD,
1325                                    p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
1326             pg_tbl_pa = (UInt32)offset;
1327             if (len != p_pt_attrs->l1_tbl_alloc_sz) {
1328                 status = -ENOMEM;
1329                 GT_setFailureReason (curTrace,
1330                                      GT_4CLASS,
1331                                      "init_mmu_page_attribs",
1332                                      status,
1333                                      "phys mem is not contiguous");
1334             }
1335             if (status != 0) {
1336                 GT_setFailureReason (curTrace,
1337                                      GT_4CLASS,
1338                                      "init_mmu_page_attribs",
1339                                      status,
1340                                      "mem_offset64 failed");
1341                 goto error_exit;
1342             }
1343         }
1344         /* We should be able to get aligned table now */
1345         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1346         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1347         /* Align the PA to the next 'align'  boundary */
1348         p_pt_attrs->l1_base_pa = ((pg_tbl_pa) + (align_size-1)) &
1349                             (~(align_size-1));
1350         p_pt_attrs->l1_base_va = pg_tbl_va + (p_pt_attrs->l1_base_pa -
1351                                 pg_tbl_pa);
1352     } else {
1353         /* We got aligned PA, cool */
1354         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1355         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1356         p_pt_attrs->l1_base_pa = pg_tbl_pa;
1357         p_pt_attrs->l1_base_va = pg_tbl_va;
1358     }
1360     if (p_pt_attrs->l1_base_va)
1361         memset((UInt8*)p_pt_attrs->l1_base_va, 0x00, p_pt_attrs->l1_size);
1362     p_pt_attrs->l2_num_pages = ls_num_of_pages;
1363     p_pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * p_pt_attrs->l2_num_pages;
1364     align_size = 4; /* Make it UInt32 aligned  */
1365     /* we like to get aligned on L1 table size */
1366     pg_tbl_va = p_pt_attrs->l1_base_va + 0x80000;
1367     pg_tbl_pa = p_pt_attrs->l1_base_pa + 0x80000;
1368     p_pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
1369     p_pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
1370     p_pt_attrs->ls_tbl_alloc_sz = p_pt_attrs->l2_size;
1371     p_pt_attrs->l2_base_pa = pg_tbl_pa;
1372     p_pt_attrs->l2_base_va = pg_tbl_va;
1373     if (p_pt_attrs->l2_base_va)
1374         memset((UInt8*)p_pt_attrs->l2_base_va, 0x00, p_pt_attrs->l2_size);
1376     p_pt_attrs->pg_info = Memory_alloc(NULL, sizeof(struct page_info) * p_pt_attrs->l2_num_pages, 0, NULL);
1377     if (p_pt_attrs->pg_info)
1378         Memory_set (p_pt_attrs->pg_info, 0, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1379     else {
1380         status = -ENOMEM;
1381         GT_setFailureReason (curTrace,
1382                              GT_4CLASS,
1383                              "init_mmu_page_attribs",
1384                              status,
1385                              "Memory_alloc failed");
1386         goto error_exit;
1387     }
1388     return 0;
1390 error_exit:
1391     if (p_pt_attrs) {
1392         if (p_pt_attrs->pg_info)
1393             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1394         if (p_pt_attrs->l1_tbl_alloc_va) {
1395             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1396                     p_pt_attrs->l1_tbl_alloc_sz);
1397         }
1398         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1399         p_pt_attrs = NULL;
1400     }
1402     return status;
1406 /*========================================
1407  * This destroys the Benelli processor MMU Page tables
1408  *
1409  */
1410 static Void deinit_mmu_page_attribs (Void)
1412     if (p_pt_attrs) {
1413         if (p_pt_attrs->pg_info)
1414             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1415         if (p_pt_attrs->l1_tbl_alloc_va) {
1416             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1417                     p_pt_attrs->l1_tbl_alloc_sz);
1418         }
1419         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1420         p_pt_attrs = NULL;
1421     }
1425 /*========================================
1426  * This destroys the DSP MMU Page tables
1427  *
1428  */
1429 static Void deinit_dsp_mmu_page_attribs(Void)
1431     if (tesla_p_pt_attrs) {
1432         if (tesla_p_pt_attrs->pg_info)
1433             Memory_free (NULL, tesla_p_pt_attrs->pg_info, sizeof(struct page_info));
1434         if (tesla_p_pt_attrs->l1_tbl_alloc_va) {
1435             munmap ((void *)tesla_p_pt_attrs->l1_tbl_alloc_va,
1436                     tesla_p_pt_attrs->l1_tbl_alloc_sz);
1437         }
1438         Memory_free (NULL, tesla_p_pt_attrs, sizeof(struct pg_table_attrs));
1439         tesla_p_pt_attrs = NULL;
1440     }
1444 /*============================================
1445  * This function calculates PTE address (MPU virtual) to be updated
1446  *  It also manages the L2 page tables
1447  */
1448 static Int pte_set (UInt32 pa, UInt32 va, UInt32 size,
1449                     struct hw_mmu_map_attrs_t *attrs, struct pg_table_attrs *pt_Table)
1451     UInt32 i;
1452     UInt32 pte_val;
1453     UInt32 pte_addr_l1;
1454     UInt32 pte_size;
1455     UInt32 pg_tbl_va; /* Base address of the PT that will be updated */
1456     UInt32 l1_base_va;
1457      /* Compiler warns that the next three variables might be used
1458      * uninitialized in this function. Doesn't seem so. Working around,
1459      * anyways.  */
1460     UInt32 l2_base_va = 0;
1461     UInt32 l2_base_pa = 0;
1462     UInt32 l2_page_num = 0;
1463     struct pg_table_attrs *pt = pt_Table;
1464     struct iotlb_entry    *mapAttrs;
1465     int status = 0;
1466     OMAP5430BENELLI_HalMmuEntryInfo setPteInfo;
1467     mapAttrs = Memory_alloc(0, sizeof(struct iotlb_entry), 0, NULL);
1469     l1_base_va = pt->l1_base_va;
1470     pg_tbl_va = l1_base_va;
1471     if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
1472         /* Find whether the L1 PTE points to a valid L2 PT */
1473         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1474         if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1475             pte_val = *(UInt32 *)pte_addr_l1;
1476             pte_size = hw_mmu_pte_sizel1(pte_val);
1477         } else {
1478             return -EINVAL;
1479         }
1480         /* FIX ME */
1481         /* TODO: ADD synchronication element*/
1482         /*        sync_enter_cs(pt->hcs_object);*/
1483         if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1484             /* Get the L2 PA from the L1 PTE, and find
1485              * corresponding L2 VA */
1486             l2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1487             l2_base_va = l2_base_pa - pt->l2_base_pa +
1488             pt->l2_base_va;
1489             l2_page_num = (l2_base_pa - pt->l2_base_pa) /
1490                     HW_MMU_COARSE_PAGE_SIZE;
1491         } else if (pte_size == 0) {
1492             /* L1 PTE is invalid. Allocate a L2 PT and
1493              * point the L1 PTE to it */
1494             /* Find a free L2 PT. */
1495             for (i = 0; (i < pt->l2_num_pages) &&
1496                 (pt->pg_info[i].num_entries != 0); i++)
1497                 ;;
1498             if (i < pt->l2_num_pages) {
1499                 l2_page_num = i;
1500                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1501                        HW_MMU_COARSE_PAGE_SIZE);
1502                 l2_base_va = pt->l2_base_va + (l2_page_num *
1503                        HW_MMU_COARSE_PAGE_SIZE);
1504                 /* Endianness attributes are ignored for
1505                  * HW_MMU_COARSE_PAGE_SIZE */
1506                 mapAttrs->endian = attrs->endianism;
1507                 mapAttrs->mixed = attrs->mixedSize;
1508                 mapAttrs->elsz= attrs->element_size;
1509                 mapAttrs->da = va;
1510                 mapAttrs->pa = pa;
1511                 status = hw_mmu_pte_set(pg_tbl_va, l2_base_pa, va,
1512                                         HW_MMU_COARSE_PAGE_SIZE, attrs);
1513             } else {
1514                 status = -ENOMEM;
1515             }
1516         } else {
1517             /* Found valid L1 PTE of another size.
1518              * Should not overwrite it. */
1519             status = -EINVAL;
1520         }
1521         if (status == 0) {
1522             pg_tbl_va = l2_base_va;
1523             if (size == HW_PAGE_SIZE_64KB)
1524                 pt->pg_info[l2_page_num].num_entries += 16;
1525             else
1526                 pt->pg_info[l2_page_num].num_entries++;
1527         }
1528     }
1529     if (status == 0) {
1530         mapAttrs->endian = attrs->endianism;
1531         mapAttrs->mixed = attrs->mixedSize;
1532         mapAttrs->elsz= attrs->element_size;
1533         mapAttrs->da = va;
1534         mapAttrs->pa = pa;
1535         mapAttrs->pgsz = MMU_CAM_PGSZ_16M;
1536         setPteInfo.elementSize = attrs->element_size;
1537         setPteInfo.endianism = attrs->endianism;
1538         setPteInfo.masterPhyAddr = pa;
1539         setPteInfo.mixedSize = attrs->mixedSize;
1540         setPteInfo.size = size;
1541         setPteInfo.slaveVirtAddr = va;
1543         status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1544         if (status == RET_OK)
1545             status = 0;
1546     }
1547     Memory_free(0, mapAttrs, sizeof(struct iotlb_entry));
1548     return status;
1552 /*=============================================
1553  * This function calculates the optimum page-aligned addresses and sizes
1554  * Caller must pass page-aligned values
1555  */
1556 static Int pte_update (UInt32 pa, UInt32 va, UInt32 size,
1557                        struct hw_mmu_map_attrs_t *map_attrs, struct pg_table_attrs *pt_Table)
1559     UInt32 i;
1560     UInt32 all_bits;
1561     UInt32 pa_curr = pa;
1562     UInt32 va_curr = va;
1563     UInt32 num_bytes = size;
1564     Int status = 0;
1565     UInt32 pg_size[] = {HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB,
1566                HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB};
1567     while (num_bytes && (status == 0)) {
1568         /* To find the max. page size with which both PA & VA are
1569          * aligned */
1570         all_bits = pa_curr | va_curr;
1571         for (i = 0; i < 4; i++) {
1572             if ((num_bytes >= pg_size[i]) && ((all_bits &
1573                (pg_size[i] - 1)) == 0)) {
1574                 status = pte_set(pa_curr,
1575                     va_curr, pg_size[i], map_attrs, pt_Table);
1576                 pa_curr += pg_size[i];
1577                 va_curr += pg_size[i];
1578                 num_bytes -= pg_size[i];
1579                  /* Don't try smaller sizes. Hopefully we have
1580                  * reached an address aligned to a bigger page
1581                  * size */
1582                 break;
1583             }
1584         }
1585     }
1586     return status;
1590 /*============================================
1591  * This function maps MPU buffer to the DSP address space. It performs
1592 * linear to physical address translation if required. It translates each
1593 * page since linear addresses can be physically non-contiguous
1594 * All address & size arguments are assumed to be page aligned (in proc.c)
1595  *
1596  */
1597 static Int benelli_mem_map (OMAP5430BENELLI_HalObject * halObject,
1598                             UInt32 mpu_addr, UInt32 ul_virt_addr,
1599                             UInt32 num_bytes, UInt32 map_attr)
1601     UInt32 attrs;
1602     Int status = 0;
1603     struct hw_mmu_map_attrs_t hw_attrs;
1604     Int pg_i = 0;
1606     if (halObject == NULL) {
1607         status = -ENOMEM;
1608         GT_setFailureReason (curTrace,
1609                              GT_4CLASS,
1610                              "benelli_mem_map",
1611                              status,
1612                              "halObject is NULL");
1613     }
1614     else if (halObject->mmuBase == 0) {
1615         status = -ENOMEM;
1616         GT_setFailureReason (curTrace,
1617                              GT_4CLASS,
1618                              "benelli_mem_map",
1619                              status,
1620                              "halObject->mmuBase is 0");
1621     }
1622     else if (num_bytes == 0) {
1623         status = -EINVAL;
1624         GT_setFailureReason (curTrace,
1625                              GT_4CLASS,
1626                              "benelli_mem_map",
1627                              status,
1628                              "num_bytes is 0");
1629     }
1630     else {
1631         if (map_attr != 0) {
1632             attrs = map_attr;
1633             attrs |= DSP_MAPELEMSIZE32;
1634         } else {
1635             /* Assign default attributes */
1636             attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE32;
1637         }
1638         /* Take mapping properties */
1639         if (attrs & DSP_MAPBIGENDIAN)
1640             hw_attrs.endianism = HW_BIG_ENDIAN;
1641         else
1642             hw_attrs.endianism = HW_LITTLE_ENDIAN;
1644         hw_attrs.mixedSize = (enum hw_mmu_mixed_size_t)
1645                      ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1646         /* Ignore element_size if mixedSize is enabled */
1647         if (hw_attrs.mixedSize == 0) {
1648             if (attrs & DSP_MAPELEMSIZE8) {
1649                 /* Size is 8 bit */
1650                 hw_attrs.element_size = HW_ELEM_SIZE_8BIT;
1651             } else if (attrs & DSP_MAPELEMSIZE16) {
1652                 /* Size is 16 bit */
1653                 hw_attrs.element_size = HW_ELEM_SIZE_16BIT;
1654             } else if (attrs & DSP_MAPELEMSIZE32) {
1655                 /* Size is 32 bit */
1656                 hw_attrs.element_size = HW_ELEM_SIZE_32BIT;
1657             } else if (attrs & DSP_MAPELEMSIZE64) {
1658                 /* Size is 64 bit */
1659                 hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
1660             } else {
1661                 /* Mixedsize isn't enabled, so size can't be
1662                  * zero here */
1663                 status = -EINVAL;
1664                 GT_setFailureReason (curTrace,
1665                                      GT_4CLASS,
1666                                      "benelli_mem_map",
1667                                      status,
1668                                      "MMU element size is zero");
1669             }
1670         }
1672         if (status >= 0) {
1673             /*
1674              * Do OS-specific user-va to pa translation.
1675              * Combine physically contiguous regions to reduce TLBs.
1676              * Pass the translated pa to PteUpdate.
1677              */
1678             if ((attrs & DSP_MAPPHYSICALADDR)) {
1679                 status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
1680                                     &hw_attrs, (halObject->procId == MultiProc_getId("DSP"))? tesla_p_pt_attrs: p_pt_attrs);
1681             }
1683             /* Don't propogate Linux or HW status to upper layers */
1684             if (status < 0) {
1685                 /*
1686                  * Roll out the mapped pages incase it failed in middle of
1687                  * mapping
1688                  */
1689                 if (pg_i)
1690                     benelli_mem_unmap(halObject, ul_virt_addr,
1691                                       (pg_i * PAGE_SIZE));
1692             }
1694             /* In any case, flush the TLB
1695              * This is called from here instead from pte_update to avoid
1696              * unnecessary repetition while mapping non-contiguous physical
1697              * regions of a virtual region */
1698             hw_mmu_tlb_flushAll(halObject->mmuBase);
1699         }
1700     }
1701     return status;
1706 /*
1707  *  ======== benelli_mem_unmap ========
1708  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1709  *
1710  *      PTEs of a mapped memory block are contiguous in any page table
1711  *      So, instead of looking up the PTE address for every 4K block,
1712  *      we clear consecutive PTEs until we unmap all the bytes
1713  */
1714 static Int benelli_mem_unmap (OMAP5430BENELLI_HalObject * halObject,
1715                               UInt32 da, UInt32 num_bytes)
1717     UInt32 L1_base_va;
1718     UInt32 L2_base_va;
1719     UInt32 L2_base_pa;
1720     UInt32 L2_page_num;
1721     UInt32 pte_val;
1722     UInt32 pte_size;
1723     UInt32 pte_count;
1724     UInt32 pte_addr_l1;
1725     UInt32 pte_addr_l2 = 0;
1726     UInt32 rem_bytes;
1727     UInt32 rem_bytes_l2;
1728     UInt32 vaCurr;
1729     Int status = 0;
1730     UInt32 temp;
1731     UInt32 pAddr;
1732     UInt32 numof4Kpages = 0;
1734     if (halObject == NULL) {
1735         status = -ENOMEM;
1736         GT_setFailureReason (curTrace,
1737                              GT_4CLASS,
1738                              "benelli_mem_unmap",
1739                              status,
1740                              "halObject is NULL");
1741     }
1742     else if (halObject->mmuBase == 0) {
1743         status = -ENOMEM;
1744         GT_setFailureReason (curTrace,
1745                              GT_4CLASS,
1746                              "benelli_mem_unmap",
1747                              status,
1748                              "halObject->mmuBase is 0");
1749     }
1750     else {
1751         vaCurr = da;
1752         rem_bytes = num_bytes;
1753         rem_bytes_l2 = 0;
1754         L1_base_va = p_pt_attrs->l1_base_va;
1755         pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1756         while (rem_bytes) {
1757             UInt32 vaCurrOrig = vaCurr;
1758             /* Find whether the L1 PTE points to a valid L2 PT */
1759             pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1760             pte_val = *(UInt32 *)pte_addr_l1;
1761             pte_size = hw_mmu_pte_sizel1(pte_val);
1762             if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1763                 /*
1764                  * Get the L2 PA from the L1 PTE, and find
1765                  * corresponding L2 VA
1766                  */
1767                 L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1768                 L2_base_va = L2_base_pa - p_pt_attrs->l2_base_pa
1769                             + p_pt_attrs->l2_base_va;
1770                 L2_page_num = (L2_base_pa - p_pt_attrs->l2_base_pa) /
1771                         HW_MMU_COARSE_PAGE_SIZE;
1772                 /*
1773                  * Find the L2 PTE address from which we will start
1774                  * clearing, the number of PTEs to be cleared on this
1775                  * page, and the size of VA space that needs to be
1776                  * cleared on this L2 page
1777                  */
1778                 pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
1779                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1780                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) /
1781                         sizeof(UInt32);
1782                 if (rem_bytes < (pte_count * PAGE_SIZE))
1783                     pte_count = rem_bytes / PAGE_SIZE;
1785                 rem_bytes_l2 = pte_count * PAGE_SIZE;
1786                 /*
1787                  * Unmap the VA space on this L2 PT. A quicker way
1788                  * would be to clear pte_count entries starting from
1789                  * pte_addr_l2. However, below code checks that we don't
1790                  * clear invalid entries or less than 64KB for a 64KB
1791                  * entry. Similar checking is done for L1 PTEs too
1792                  * below
1793                  */
1794                 while (rem_bytes_l2) {
1795                     pte_val = *(UInt32 *)pte_addr_l2;
1796                     pte_size = hw_mmu_pte_sizel2(pte_val);
1797                     /* vaCurr aligned to pte_size? */
1798                     if ((pte_size != 0) && (rem_bytes_l2
1799                         >= pte_size) &&
1800                         !(vaCurr & (pte_size - 1))) {
1801                         /* Collect Physical addresses from VA */
1802                         pAddr = (pte_val & ~(pte_size - 1));
1803                         if (pte_size == HW_PAGE_SIZE_64KB)
1804                             numof4Kpages = 16;
1805                         else
1806                             numof4Kpages = 1;
1807                         temp = 0;
1809                         if (hw_mmu_pte_clear(pte_addr_l2,
1810                             vaCurr, pte_size) == RET_OK) {
1811                             rem_bytes_l2 -= pte_size;
1812                             vaCurr += pte_size;
1813                             pte_addr_l2 += (pte_size >> 12)
1814                                 * sizeof(UInt32);
1815                         } else {
1816                             status = -EFAULT;
1817                             goto EXIT_LOOP;
1818                         }
1819                     } else
1820                         status = -EFAULT;
1821                 }
1822                 if (rem_bytes_l2 != 0) {
1823                     status = -EFAULT;
1824                     goto EXIT_LOOP;
1825                 }
1826                 p_pt_attrs->pg_info[L2_page_num].num_entries -=
1827                             pte_count;
1828                 if (p_pt_attrs->pg_info[L2_page_num].num_entries
1829                                     == 0) {
1830                     /*
1831                      * Clear the L1 PTE pointing to the
1832                      * L2 PT
1833                      */
1834                     if (RET_OK != hw_mmu_pte_clear(L1_base_va,
1835                         vaCurrOrig, HW_MMU_COARSE_PAGE_SIZE)) {
1836                         status = -EFAULT;
1837                         goto EXIT_LOOP;
1838                     }
1839                 }
1840                 rem_bytes -= pte_count * PAGE_SIZE;
1841             } else
1842                 /* vaCurr aligned to pte_size? */
1843                 /* pte_size = 1 MB or 16 MB */
1844                 if ((pte_size != 0) && (rem_bytes >= pte_size) &&
1845                    !(vaCurr & (pte_size - 1))) {
1846                     if (pte_size == HW_PAGE_SIZE_1MB)
1847                         numof4Kpages = 256;
1848                     else
1849                         numof4Kpages = 4096;
1850                     temp = 0;
1851                     /* Collect Physical addresses from VA */
1852                     pAddr = (pte_val & ~(pte_size - 1));
1853                     if (hw_mmu_pte_clear(L1_base_va, vaCurr,
1854                             pte_size) == RET_OK) {
1855                         rem_bytes -= pte_size;
1856                         vaCurr += pte_size;
1857                     } else {
1858                         status = -EFAULT;
1859                         goto EXIT_LOOP;
1860                     }
1861             } else {
1862                 status = -EFAULT;
1863             }
1864         }
1865     }
1866     /*
1867      * It is better to flush the TLB here, so that any stale old entries
1868      * get flushed
1869      */
1870 EXIT_LOOP:
1871     hw_mmu_tlb_flushAll(halObject->mmuBase);
1872     return status;
1876 /*========================================
1877  * This sets up the Benelli processor
1878  *
1879  */
1880 Int ipu_setup (OMAP5430BENELLI_HalObject * halObject,
1881                ProcMgr_AddrInfo * memEntries,
1882                UInt32 numMemEntries)
1884     Int ret_val = 0;
1886     if (halObject->procId == PROCTYPE_IPU0) {
1887         ret_val = init_mmu_page_attribs(0x10000, 14, 128);
1888         if (ret_val < 0) {
1889             GT_setFailureReason (curTrace,
1890                                  GT_4CLASS,
1891                                  "ipu_setup",
1892                                  ret_val,
1893                                  "init_mmu_page_attribs failed");
1894         }
1895         else {
1896             /* Disable TWL  */
1897             ret_val = benelli_set_twl(halObject, FALSE);
1898             if (ret_val < 0) {
1899                 GT_setFailureReason (curTrace,
1900                                      GT_4CLASS,
1901                                      "ipu_setup",
1902                                      ret_val,
1903                                      "benelli_set_twl to FALSE failed");
1904             }
1905             else {
1906                 ret_val = benelli_mmu_init (halObject, memEntries,
1907                                             numMemEntries);
1908                 if (ret_val < 0) {
1909                     GT_setFailureReason (curTrace,
1910                                          GT_4CLASS,
1911                                          "ipu_setup",
1912                                          ret_val,
1913                                          "benelli_mmu_init failed");
1914                 }
1915                 else {
1916                     ret_val = benelli_set_twl(halObject, TRUE);
1917                     if (ret_val < 0) {
1918                         GT_setFailureReason (curTrace,
1919                                              GT_4CLASS,
1920                                              "ipu_setup",
1921                                              ret_val,
1922                                              "ducati_set_twl to TRUE failed");
1923                     }
1924                 }
1925             }
1926         }
1927     }
1928     return ret_val;
1933 Void ipu_destroy(OMAP5430BENELLI_HalObject * halObject)
1935     if (halObject->procId == PROCTYPE_IPU0) {
1936         shm_phys_addr = 0;
1937         deinit_mmu_page_attribs();
1938     }
1939     else if (halObject->procId == PROCTYPE_DSP) {
1940         shm_phys_addr_dsp = 0;
1941         deinit_dsp_mmu_page_attribs();
1942     }
1945 int tesla_setup (OMAP5430BENELLI_HalObject * halObject,
1946                  ProcMgr_AddrInfo * memEntries,
1947                  UInt32 numMemEntries)
1949     int ret_val = 0;
1951     /* Disable TWL  */
1952     ret_val = init_tesla_page_attributes();
1953     if(ret_val < 0){
1954             ret_val = -ENOMEM;
1955             GT_setFailureReason (curTrace,
1956                                  GT_4CLASS,
1957                                  "tesla_setup",
1958                                  ret_val ,
1959                                  "init_tesla_page_attributes failed");
1960         }
1961     else {
1962         ret_val = tesla_set_twl(halObject, FALSE);
1963         if(ret_val < 0){
1964             ret_val = -ENOMEM;
1965             GT_setFailureReason (curTrace,
1966                                  GT_4CLASS,
1967                                  "tesla_setup",
1968                                  ret_val ,
1969                                  "tesla_set_twl failed");
1970         }
1971         else {
1972             ret_val = tesla_mmu_init(halObject, memEntries, numMemEntries);
1973             if(ret_val < 0){
1974                 ret_val = -ENOMEM;
1975                 GT_setFailureReason (curTrace,
1976                                      GT_4CLASS,
1977                                      "tesla_setup",
1978                                      ret_val ,
1979                                      "tesla_mmu_init failed");
1980             }
1981             else {
1982                 ret_val = tesla_set_twl(halObject, TRUE);
1983                 if(ret_val < 0){
1984                     ret_val = -ENOMEM;
1985                     GT_setFailureReason (curTrace,
1986                                          GT_4CLASS,
1987                                          "tesla_setup",
1988                                          ret_val ,
1989                                          "tesla_set_twl failed");
1990                 }
1991             }
1992         }
1993     }
1995     return ret_val;
1998 static Void iotlb_load_cr (OMAP5430BENELLI_HalObject * halObject,
1999                            struct cr_regs *cr)
2001     ULONG reg;
2002     OMAP5430Benelli_MMURegs * mmuRegs =
2003                                   (OMAP5430Benelli_MMURegs *)halObject->mmuBase;
2005     reg = cr->cam | MMU_CAM_V;
2006     OUTREG32(&mmuRegs->CAM, reg);
2008     reg = cr->ram;
2009     OUTREG32(&mmuRegs->RAM, reg);
2011     reg = 1;
2012     OUTREG32(&mmuRegs->FLUSH_ENTRY, reg);
2014     reg = 1;
2015     OUTREG32(&mmuRegs->LD_TLB, reg);
2019 /**
2020  * iotlb_dump_cr - Dump an iommu tlb entry into buf
2021  * @obj:    target iommu
2022  * @cr:        contents of cam and ram register
2023  * @buf:    output buffer
2024  **/
2025 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf)
2027     Char *p = buf;
2029     if(!cr || !buf)
2030         return 0;
2032     /* FIXME: Need more detail analysis of cam/ram */
2033     p += sprintf(p, "%08x %08x %01x\n", (unsigned int)cr->cam,
2034                     (unsigned int)cr->ram,
2035                     (cr->cam & MMU_CAM_P) ? 1 : 0);
2036     return (p - buf);
2041 static Int iotlb_cr_valid (struct cr_regs *cr)
2043     if (!cr)
2044         return -EINVAL;
2046     return (cr->cam & MMU_CAM_V);
2051 static struct cr_regs *omap5_alloc_cr (struct iotlb_entry *e)
2053     struct cr_regs *cr;
2055     if (e->da & ~(get_cam_va_mask(e->pgsz))) {
2056         GT_setFailureReason (curTrace,
2057                              GT_4CLASS,
2058                              "omap5_alloc_cr",
2059                              -EINVAL,
2060                              "failed mask check");
2061         return NULL;
2062     }
2064     cr = mmap(NULL,
2065               sizeof(struct cr_regs),
2066               PROT_NOCACHE | PROT_READ | PROT_WRITE,
2067               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
2068               NOFD,
2069               0);
2071     if (MAP_FAILED == cr)
2072     {
2073         GT_setFailureReason (curTrace,
2074                              GT_4CLASS,
2075                              "omap5_alloc_cr",
2076                              -EINVAL,
2077                              "mmap failed");
2078         return NULL;
2079     }
2081     cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
2082     cr->ram = e->pa | e->endian | e->elsz | e->mixed;
2083     return cr;
2088 static struct cr_regs *iotlb_alloc_cr (struct iotlb_entry *e)
2090     if (!e) {
2091         GT_setFailureReason (curTrace,
2092                              GT_4CLASS,
2093                              "iotlb_alloc_cr",
2094                              -EINVAL,
2095                              "e is NULL");
2096         return NULL;
2097     }
2099     return omap5_alloc_cr(e);
2104 /**
2105  * load_iotlb_entry - Set an iommu tlb entry
2106  * @obj:    target iommu
2107  * @e:        an iommu tlb entry info
2108  **/
2109 static Int load_iotlb_entry (OMAP5430BENELLI_HalObject * halObject,
2110                              struct iotlb_entry *e)
2112     Int err = 0;
2113     struct iotlb_lock l;
2114     struct cr_regs *cr;
2116     if (halObject == NULL) {
2117         err = -EINVAL;
2118         GT_setFailureReason (curTrace,
2119                              GT_4CLASS,
2120                              "load_iotlb_entry",
2121                              err,
2122                              "halObject is NULL");
2123         goto out;
2124     }
2126     if (halObject->mmuBase == NULL) {
2127         err = -EINVAL;
2128         GT_setFailureReason (curTrace,
2129                              GT_4CLASS,
2130                              "load_iotlb_entry",
2131                              err,
2132                              "halObject->mmuBase is NULL");
2133         goto out;
2134     }
2136     if (!e) {
2137         err = -EINVAL;
2138         GT_setFailureReason (curTrace,
2139                              GT_4CLASS,
2140                              "load_iotlb_entry",
2141                              err,
2142                              "e is NULL");
2143         goto out;
2144     }
2146     iotlb_getLock(halObject, &l);
2148     if (l.base == 32) {
2149         err = -EBUSY;
2150         GT_setFailureReason (curTrace,
2151                              GT_4CLASS,
2152                              "load_iotlb_entry",
2153                              err,
2154                              "l.base is full");
2155         goto out;
2156     }
2157     if (!e->prsvd) {
2158         int i;
2159         struct cr_regs tmp;
2161         for_each_iotlb_cr(32, i, tmp)
2162             if (!iotlb_cr_valid(&tmp))
2163                 break;
2165         if (i == 32) {
2166             err = -EBUSY;
2167             GT_setFailureReason (curTrace,
2168                                  GT_4CLASS,
2169                                  "load_iotlb_entry",
2170                                  err,
2171                                  "i == 32");
2172             goto out;
2173         }
2175         iotlb_getLock(halObject, &l);
2176     } else {
2177         l.vict = l.base;
2178         iotlb_setLock(halObject, &l);
2179     }
2181     cr = iotlb_alloc_cr(e);
2182     if (!cr){
2183         err = -ENOMEM;
2184         GT_setFailureReason (curTrace,
2185                              GT_4CLASS,
2186                              "load_iotlb_entry",
2187                              err,
2188                              "iotlb_alloc_cr failed");
2189         goto out;
2190     }
2192     iotlb_load_cr(halObject, cr);
2193     munmap(cr, sizeof(struct cr_regs));
2195     if (e->prsvd)
2196         l.base++;
2197     /* increment victim for next tlb load */
2198     if (++l.vict == 32)
2199         l.vict = l.base;
2200     iotlb_setLock(halObject, &l);
2202 out:
2203     return err;