d92c2bcf2f7a46e112cbdde06837ac4f7ad562cb
[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / family / vayu / vayuipu / VAYUIpuEnabler.c
1 /*
2  *  @file  VAYUIpuEnabler.c
3  *
4  *  @brief  MMU programming module
5  *
6  *
7  *  ============================================================================
8  *
9  *  Copyright (c) 2013-2015, Texas Instruments Incorporated
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  *  Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *
18  *  *  Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *
22  *  *  Neither the name of Texas Instruments Incorporated nor the names of
23  *     its contributors may be used to endorse or promote products derived
24  *     from this software without specific prior written permission.
25  *
26  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  *  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  *  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  *  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  *  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  *  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
33  *  OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
34  *  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
35  *  OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
36  *  EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *  Contact information for paper mail:
38  *  Texas Instruments
39  *  Post Office Box 655303
40  *  Dallas, Texas 75265
41  *  Contact information:
42  *  http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
43  *  DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
44  *  ============================================================================
45  *
46  */
48 #include <errno.h>
49 #include <unistd.h>
50 #include <ti/syslink/Std.h>
52 /* OSAL and utils headers */
53 #include <ti/syslink/utils/List.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/OsalPrint.h>
57 /* Module level headers */
58 #include <OsalDrv.h>
59 #include <_ProcDefs.h>
60 #include <Processor.h>
61 #include <hw/inout.h>
62 #include <sys/mman.h>
64 #include <hw_defs.h>
65 #include <hw_mmu.h>
66 #include <VAYUIpuHal.h>
67 #include <VAYUIpuHalMmu.h>
68 #include <VAYUIpuEnabler.h>
69 #include <stdbool.h>
70 #include <stdint.h>
73 #define PAGE_SIZE 0x1000
75 /* Size of L1 translation table */
76 #define TRANSLATION_TABLE_SIZE 0x4000
78 /* Attributes of L2 page tables for DSP MMU.*/
79 struct page_info {
80     /* Number of valid PTEs in the L2 PT*/
81     UInt32 num_entries;
82 };
85 /* Attributes used to manage the DSP MMU page tables */
86 struct pg_table_attrs {
87     struct sync_cs_object *hcs_object;/* Critical section object handle */
88     UInt32 l1_base_pa; /* Physical address of the L1 PT */
89     UInt32 l1_base_va; /* Virtual  address of the L1 PT */
90     UInt32 l1_size; /* Size of the L1 PT */
91     UInt32 l1_tbl_alloc_pa;
92     /* Physical address of Allocated mem for L1 table. May not be aligned */
93     UInt32 l1_tbl_alloc_va;
94     /* Virtual address of Allocated mem for L1 table. May not be aligned */
95     UInt32 l1_tbl_alloc_sz;
96     /* Size of consistent memory allocated for L1 table.
97      * May not be aligned */
98     UInt32 l2_base_pa;        /* Physical address of the L2 PT */
99     UInt32 l2_base_va;        /* Virtual  address of the L2 PT */
100     UInt32 l2_size;        /* Size of the L2 PT */
101     UInt32 l2_tbl_alloc_pa;
102     /* Physical address of Allocated mem for L2 table. May not be aligned */
103     UInt32 l2_tbl_alloc_va;
104     /* Virtual address of Allocated mem for L2 table. May not be aligned */
105     UInt32 ls_tbl_alloc_sz;
106     /* Size of consistent memory allocated for L2 table.
107      * May not be aligned */
108     UInt32 l2_num_pages;    /* Number of allocated L2 PT */
109     struct page_info *pg_info;
110 };
113 enum pagetype {
114     SECTION = 0,
115     LARGE_PAGE = 1,
116     SMALL_PAGE = 2,
117     SUPER_SECTION  = 3
118 };
120 static UInt32 shm_phys_addr;
122 #define INREG32(x) in32((uintptr_t)x)
123 #define OUTREG32(x, y) out32((uintptr_t)x, y)
124 #define SIZE 0x4
126 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf);
127 static Int load_iotlb_entry (VAYUIPU_HalObject * halObject,
128                              struct iotlb_entry *e);
129 static Int iotlb_cr_valid (struct cr_regs *cr);
131 static Int rproc_mem_map (VAYUIPU_HalObject * halObject,
132                           UInt32 mpu_addr, UInt32 ul_virt_addr,
133                           UInt32 num_bytes, UInt32 map_attr);
134 static Int rproc_mem_unmap (VAYUIPU_HalObject * halObject, UInt32 da,
135                             UInt32 num_bytes);
138 static Void iotlb_cr_to_e (struct cr_regs *cr, struct iotlb_entry *e)
140     e->da       = cr->cam & MMU_CAM_VATAG_MASK;
141     e->pa       = cr->ram & MMU_RAM_PADDR_MASK;
142     e->valid    = cr->cam & MMU_CAM_V;
143     e->prsvd    = cr->cam & MMU_CAM_P;
144     e->pgsz     = cr->cam & MMU_CAM_PGSZ_MASK;
145     e->endian   = cr->ram & MMU_RAM_ENDIAN_MASK;
146     e->elsz     = cr->ram & MMU_RAM_ELSZ_MASK;
147     e->mixed    = cr->ram & MMU_RAM_MIXED;
150 static Void iotlb_getLock (VAYUIPU_HalObject * halObject,
151                            struct iotlb_lock *l)
153     ULONG reg;
154     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
156     reg = INREG32(&mmuRegs->LOCK);
157     l->base = MMU_LOCK_BASE(reg);
158     l->vict = MMU_LOCK_VICT(reg);
161 static Void iotlb_setLock (VAYUIPU_HalObject * halObject,
162                            struct iotlb_lock *l)
164     ULONG reg;
165     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
167     reg = (l->base << MMU_LOCK_BASE_SHIFT);
168     reg |= (l->vict << MMU_LOCK_VICT_SHIFT);
169     OUTREG32(&mmuRegs->LOCK, reg);
172 static void omap5_tlb_read_cr (VAYUIPU_HalObject * halObject,
173                                struct cr_regs *cr)
175     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
177     cr->cam = INREG32(&mmuRegs->READ_CAM);
178     cr->ram = INREG32(&mmuRegs->READ_RAM);
181 /* only used for iotlb iteration in for-loop */
182 static struct cr_regs __iotlb_read_cr (VAYUIPU_HalObject * halObject,
183                                        int n)
185      struct cr_regs cr;
186      struct iotlb_lock l;
187      iotlb_getLock(halObject, &l);
188      l.vict = n;
189      iotlb_setLock(halObject, &l);
190      omap5_tlb_read_cr(halObject, &cr);
191      return cr;
194 #define for_each_iotlb_cr(n, __i, cr)                \
195     for (__i = 0;                            \
196          (__i < (n)) && (cr = __iotlb_read_cr(halObject, __i), TRUE);    \
197          __i++)
199 static Int save_tlbs (VAYUIPU_HalObject * halObject, UINT32 procId)
201     Int i =0;
202     struct cr_regs cr_tmp;
203     struct iotlb_lock l;
205     iotlb_getLock(halObject, &l);
207     halObject->mmuObj.nrTlbs = l.base;
208     for_each_iotlb_cr(halObject->mmuObj.nrTlbs, i, cr_tmp) {
209         iotlb_cr_to_e(&cr_tmp, &halObject->mmuObj.tlbs[i]);
210     }
212     return 0;
216 static Int restore_tlbs (VAYUIPU_HalObject * halObject, UInt32 procId)
218     Int i = 0;
219     Int status = -1;
220     struct iotlb_lock save;
222     /* Reset the base and victim values */
223     save.base = 0;
224     save.vict = 0;
225     iotlb_setLock(halObject, &save);
227     for (i = 0; i < halObject->mmuObj.nrTlbs; i++) {
228         status = load_iotlb_entry(halObject, &halObject->mmuObj.tlbs[i]);
229         if (status < 0) {
230             GT_setFailureReason (curTrace,
231                                  GT_4CLASS,
232                                  "restore_tlbs",
233                                  status,
234                                  "Error restoring the tlbs");
235             goto err;
236         }
237     }
239     return 0;
241 err:
242     return status;
245 static Int save_mmu_regs (VAYUIPU_HalObject * halObject, UInt32 procId)
247     UInt32 i = 0;
249     if (halObject == NULL) {
250         GT_setFailureReason (curTrace,
251                              GT_4CLASS,
252                              "save_mmu_regs",
253                              -ENOMEM,
254                              "halObject is NULL");
255         return -ENOMEM;
256     }
258     if (halObject->mmuBase == 0) {
259         GT_setFailureReason (curTrace,
260                              GT_4CLASS,
261                              "save_mmu_regs",
262                              -ENOMEM,
263                              "halObject->mmuBase is 0");
264         return -ENOMEM;
265     }
267     for (i = 0; i < MMU_REGS_SIZE; i++) {
268         halObject->mmuObj.mmuRegs[i] = INREG32(halObject->mmuBase + (i * 4));
269     }
271     return 0;
274 static Int restore_mmu_regs (VAYUIPU_HalObject * halObject,
275                              UInt32 procId)
277     UInt32 i = 0;
279     if (halObject == NULL) {
280         GT_setFailureReason (curTrace,
281                              GT_4CLASS,
282                              "restore_mmu_regs",
283                              -ENOMEM,
284                              "halObject is NULL");
285         return -ENOMEM;
286     }
288     if (halObject->mmuBase == 0) {
289         GT_setFailureReason (curTrace,
290                              GT_4CLASS,
291                              "restore_mmu_regs",
292                              -ENOMEM,
293                              "halObject->mmuBase is 0");
294         return -ENOMEM;
295     }
297     for (i = 0; i < MMU_REGS_SIZE; i++) {
298         OUTREG32(halObject->mmuBase + (i * 4), halObject->mmuObj.mmuRegs[i]);
299     }
301     return 0;
304 Int save_ipucore0_mmu_ctxt (VAYUIPU_HalObject * halObject, UInt32 procId)
306     Int status = -1;
308     status = save_mmu_regs(halObject, procId);
309     if (status < 0) {
310         GT_setFailureReason (curTrace,
311                              GT_4CLASS,
312                              "save_ipucore0_mmu_ctxt",
313                              status,
314                              "Unable to save MMU Regs");
315         return status;
316     }
318     status = save_tlbs(halObject, procId);
319     if (status < 0) {
320         GT_setFailureReason (curTrace,
321                              GT_4CLASS,
322                              "save_ipucore0_mmu_ctxt",
323                              status,
324                              "Unable to save TLBs");
325         return status;
326     }
327     return status;
331 Int restore_ipucore0_mmu_ctxt (VAYUIPU_HalObject * halObject,
332                                UInt32 procId)
334     Int status = -1;
336     status = restore_mmu_regs(halObject, procId);
337     if (status < 0) {
338         GT_setFailureReason (curTrace,
339                              GT_4CLASS,
340                              "restore_ipucore0_mmu_ctxt",
341                              status,
342                              "Unable to restore MMU Regs");
343         return status;
344     }
346     status = restore_tlbs(halObject, procId);
347     if (status < 0) {
348         GT_setFailureReason (curTrace,
349                              GT_4CLASS,
350                              "restore_ipucore0_mmu_ctxt",
351                              status,
352                              "Unable to restore TLBS");
353         return status;
354     }
356     return status;
360  /*=========================================
361  * Decides a TLB entry size
362  *
363  */
364 static Int get_mmu_entry_size (UInt32 pa, UInt32 size, enum pagetype *size_tlb,
365                                UInt32 *entry_size)
367     Int     status = 0;
368     Bool    page_align_4kb  = false;
369     Bool    page_align_64kb = false;
370     Bool    page_align_1mb = false;
371     Bool    page_align_16mb = false;
372     UInt32  phys_addr = pa;
375     /*  First check the page alignment*/
376     if ((phys_addr % PAGE_SIZE_4KB)  == 0)
377         page_align_4kb  = true;
378     if ((phys_addr % PAGE_SIZE_64KB) == 0)
379         page_align_64kb = true;
380     if ((phys_addr % PAGE_SIZE_1MB)  == 0)
381         page_align_1mb  = true;
382     if ((phys_addr % PAGE_SIZE_16MB)  == 0)
383         page_align_16mb  = true;
385     if ((!page_align_64kb) && (!page_align_1mb)  && (!page_align_4kb)) {
386         status = -EINVAL;
387         GT_setFailureReason (curTrace,
388                              GT_4CLASS,
389                              "get_mmu_entry_size",
390                              status,
391                              "phys_addr is not properly aligned");
392         goto error_exit;
393     }
395     /*  Now decide the entry size */
396     if (size >= PAGE_SIZE_16MB) {
397         if (page_align_16mb) {
398             *size_tlb   = SUPER_SECTION;
399             *entry_size = PAGE_SIZE_16MB;
400         } else if (page_align_1mb) {
401             *size_tlb   = SECTION;
402             *entry_size = PAGE_SIZE_1MB;
403         } else if (page_align_64kb) {
404             *size_tlb   = LARGE_PAGE;
405             *entry_size = PAGE_SIZE_64KB;
406         } else if (page_align_4kb) {
407             *size_tlb   = SMALL_PAGE;
408             *entry_size = PAGE_SIZE_4KB;
409         } else {
410             status = -EINVAL;
411             GT_setFailureReason (curTrace,
412                                  GT_4CLASS,
413                                  "get_mmu_entry_size",
414                                  status,
415                                  "size and alignment are invalid");
416             goto error_exit;
417         }
418     } else if (size >= PAGE_SIZE_1MB && size < PAGE_SIZE_16MB) {
419         if (page_align_1mb) {
420             *size_tlb   = SECTION;
421             *entry_size = PAGE_SIZE_1MB;
422         } else if (page_align_64kb) {
423             *size_tlb   = LARGE_PAGE;
424             *entry_size = PAGE_SIZE_64KB;
425         } else if (page_align_4kb) {
426             *size_tlb   = SMALL_PAGE;
427             *entry_size = PAGE_SIZE_4KB;
428         } else {
429             status = -EINVAL;
430             GT_setFailureReason (curTrace,
431                                  GT_4CLASS,
432                                  "get_mmu_entry_size",
433                                  status,
434                                  "size and alignment are invalid");
435             goto error_exit;
436         }
437     } else if (size > PAGE_SIZE_4KB && size < PAGE_SIZE_1MB) {
438         if (page_align_64kb) {
439             *size_tlb   = LARGE_PAGE;
440             *entry_size = PAGE_SIZE_64KB;
441         } else if (page_align_4kb) {
442             *size_tlb   = SMALL_PAGE;
443             *entry_size = PAGE_SIZE_4KB;
444         } else {
445             status = -EINVAL;
446             GT_setFailureReason (curTrace,
447                                  GT_4CLASS,
448                                  "get_mmu_entry_size",
449                                  status,
450                                  "size and alignment are invalid");
451             goto error_exit;
452         }
453     } else if (size == PAGE_SIZE_4KB) {
454         if (page_align_4kb) {
455             *size_tlb   = SMALL_PAGE;
456             *entry_size = PAGE_SIZE_4KB;
457         } else {
458             status = -EINVAL;
459             GT_setFailureReason (curTrace,
460                                  GT_4CLASS,
461                                  "get_mmu_entry_size",
462                                  status,
463                                  "size and alignment are invalid");
464             goto error_exit;
465         }
466     } else {
467         status = -EINVAL;
468         GT_setFailureReason (curTrace,
469                              GT_4CLASS,
470                              "get_mmu_entry_size",
471                              status,
472                              "size is invalid");
473         goto error_exit;
474     }
475     return 0;
477 error_exit:
478     return status;
481 /*
482  * Note: Leaving add_dsp_mmu_entry here, but commented out, so that it is
483  * available in the future if static tlbs are needed to be added outside
484  * of the translation table for faster access.
485  */
486 #if 0
487 /*=========================================
488  * Add DSP MMU entries corresponding to given MPU-Physical address
489  * and DSP-virtual address
490  */
491 static Int add_dsp_mmu_entry (VAYUIPU_HalObject * halObject,
492                               UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
494     UInt32 mapped_size = 0;
495     enum pagetype size_tlb = SECTION;
496     UInt32 entry_size = 0;
497     int status = 0;
498     struct iotlb_entry tlb_entry;
499     int retval = 0;
501     while ((mapped_size < size) && (status == 0)) {
502         status = get_mmu_entry_size(*phys_addr, (size - mapped_size),
503                                     &size_tlb, &entry_size);
504         if (status < 0) {
505             GT_setFailureReason (curTrace,
506                                  GT_4CLASS,
507                                  "add_dsp_mmu_entry",
508                                  status,
509                                  "get_mmu_entry_size failed");
510             goto error_exit;
511         }
513         if (size_tlb == SUPER_SECTION)
514             tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
516         else if (size_tlb == SECTION)
517             tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
519         else if (size_tlb == LARGE_PAGE)
520             tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
522         else if (size_tlb == SMALL_PAGE)
523             tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
525         tlb_entry.elsz = MMU_RAM_ELSZ_16;
526         tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
527         tlb_entry.mixed = MMU_RAM_MIXED;
528         tlb_entry.prsvd = MMU_CAM_P;
529         tlb_entry.valid = MMU_CAM_V;
531         tlb_entry.da = *dsp_addr;
532         tlb_entry.pa = *phys_addr;
533         retval = load_iotlb_entry(halObject, &tlb_entry);
534         if (retval < 0) {
535             GT_setFailureReason (curTrace,
536                                  GT_4CLASS,
537                                  "add_dsp_mmu_entry",
538                                  retval,
539                                  "load_iotlb_entry failed");
540             goto error_exit;
541         }
542         mapped_size  += entry_size;
543         *phys_addr   += entry_size;
544         *dsp_addr   += entry_size;
545     }
547     return 0;
549 error_exit:
550     printf("pte set failure retval = 0x%x, status = 0x%x \n",
551                             retval, status);
553     return retval;
555 #endif
557 static Int add_entry_ext (VAYUIPU_HalObject * halObject,
558                           UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
560     UInt32 mapped_size = 0;
561     enum pagetype     size_tlb = SECTION;
562     UInt32 entry_size = 0;
563     Int status = 0;
564     UInt32 page_size = HW_PAGE_SIZE_1MB;
565     UInt32 flags = 0;
567     flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
568                     DSP_MAPPHYSICALADDR);
569     while ((mapped_size < size) && (status == 0)) {
571         /*  get_mmu_entry_size fills the size_tlb and entry_size
572         based on alignment and size of memory to map
573         to DSP - size */
574         status = get_mmu_entry_size (*phys_addr,
575                                      (size - mapped_size),
576                                      &size_tlb,
577                                      &entry_size);
578         if (status < 0) {
579             GT_setFailureReason (curTrace,
580                                  GT_4CLASS,
581                                  "add_entry_ext",
582                                  status,
583                                  "get_mmu_entry_size failed");
584             break;
585         }
586         else {
587             if (size_tlb == SUPER_SECTION)
588                 page_size = HW_PAGE_SIZE_16MB;
589             else if (size_tlb == SECTION)
590                 page_size = HW_PAGE_SIZE_1MB;
591             else if (size_tlb == LARGE_PAGE)
592                 page_size = HW_PAGE_SIZE_64KB;
593             else if (size_tlb == SMALL_PAGE)
594                 page_size = HW_PAGE_SIZE_4KB;
596             if (status == 0) {
597                 status = rproc_mem_map (halObject,
598                                         *phys_addr,
599                                         *dsp_addr,
600                                         page_size,
601                                         flags);
602                 if (status < 0) {
603                     GT_setFailureReason (curTrace,
604                                          GT_4CLASS,
605                                          "add_entry_ext",
606                                          status,
607                                          "benelli_mem_map failed");
608                     break;
609                 }
610                 mapped_size  += entry_size;
611                 *phys_addr   += entry_size;
612                 *dsp_addr   += entry_size;
613             }
614         }
615     }
616     return status;
619 static Int __dump_tlb_entries (VAYUIPU_HalObject * halObject,
620                                struct cr_regs *crs, int num)
622     int i;
623     struct iotlb_lock saved;
624     struct cr_regs tmp;
625     struct cr_regs *p = crs;
627     iotlb_getLock(halObject, &saved);
628     for_each_iotlb_cr(num, i, tmp) {
629         if (!iotlb_cr_valid(&tmp))
630             continue;
631         *p++ = tmp;
632     }
633     iotlb_setLock(halObject, &saved);
634     return  p - crs;
637 UInt32 get_IpuCore0VirtAdd(VAYUIPU_HalObject * halObject, UInt32 physAdd)
639     int i, num;
640     struct cr_regs *cr;
641     struct cr_regs *p = NULL;
642     //DWORD dwPhys;
643     UInt32 lRetVal = 0;
644     num = 32;
645     if(shm_phys_addr == 0)
646         return 0;
647     cr = mmap(NULL,
648               sizeof(struct cr_regs) * num,
649               PROT_NOCACHE | PROT_READ | PROT_WRITE,
650               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
651               NOFD,
652               0);
653     if (cr == MAP_FAILED)
654     {
655         return NULL;
656     }
658     memset(cr, 0, sizeof(struct cr_regs) * num);
660     num = __dump_tlb_entries(halObject, cr, num);
661     for (i = 0; i < num; i++)
662     {
663         p = cr + i;
664         if(physAdd >= (p->ram & 0xFFFFF000) &&  physAdd < ((p + 1)->ram & 0xFFFFF000))
665         {
666             lRetVal = ((p->cam & 0xFFFFF000) + (physAdd - (p->ram & 0xFFFFF000)));
667         }
668     }
669     munmap(cr, sizeof(struct cr_regs) * num);
671     return lRetVal;
675 /**
676  * dump_tlb_entries - dump cr arrays to given buffer
677  * @obj:    target iommu
678  * @buf:    output buffer
679  **/
680 static UInt32 dump_tlb_entries (VAYUIPU_HalObject * halObject,
681                                 char *buf, UInt32 bytes)
683     Int i, num;
684     struct cr_regs *cr;
685     Char *p = buf;
687     num = bytes / sizeof(*cr);
688     num = min(32, num);
689     cr = mmap(NULL,
690             sizeof(struct cr_regs) * num,
691               PROT_NOCACHE | PROT_READ | PROT_WRITE,
692               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
693               NOFD,
694               0);
695     if (!cr)
696     {
697         return NULL;
699     }
700     memset(cr, 0, sizeof(struct cr_regs) * num);
702     num = __dump_tlb_entries(halObject, cr, num);
703     for (i = 0; i < num; i++)
704         p += iotlb_dump_cr(cr + i, p);
705     munmap(cr, sizeof(struct cr_regs) * num);
706     return p - buf;
710 static Void rproc_tlb_dump (VAYUIPU_HalObject * halObject)
712     Char *p;
714     p = mmap(NULL,
715              1000,
716              PROT_NOCACHE | PROT_READ | PROT_WRITE,
717              MAP_ANON | MAP_PHYS | MAP_PRIVATE,
718              NOFD,
719              0);
720     if (MAP_FAILED != p)
721     {
722         dump_tlb_entries(halObject, p, 1000);
723         munmap(p, 1000);
724     }
726     return;
730 /*================================
731  * Initialize the IPU MMU.
732  *===============================*/
734 static Int rproc_mmu_init (VAYUIPU_HalObject * halObject,
735                            ProcMgr_AddrInfo * memEntries,
736                            UInt32 numMemEntries)
738     Int ret_val = 0;
739     UInt32 phys_addr = 0;
740     UInt32 i = 0;
741     UInt32 virt_addr = 0;
742     VAYUIpu_MMURegs * mmuRegs = NULL;
744     if (halObject == NULL) {
745         ret_val = -ENOMEM;
746         GT_setFailureReason (curTrace,
747                              GT_4CLASS,
748                              "rproc_mmu_init",
749                              ret_val,
750                              "halObject is NULL");
751         goto error_exit;
752     }
754     if (halObject->mmuBase == 0) {
755         ret_val = -ENOMEM;
756         GT_setFailureReason (curTrace,
757                              GT_4CLASS,
758                              "rproc_mmu_init",
759                              ret_val,
760                              "halObject->mmuBase is 0");
761         goto error_exit;
762     }
763     mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
765     /*  Disable the MMU & TWL */
766     hw_mmu_disable(halObject->mmuBase);
767     hw_mmu_twl_disable(halObject->mmuBase);
769     printf("  Programming IPU memory regions\n");
770     printf("=========================================\n");
772     for (i = 0; i < numMemEntries; i++) {
773         phys_addr = memEntries[i].addr[ProcMgr_AddrType_MasterPhys];
774         if (phys_addr == (UInt32)(-1) || phys_addr == 0) {
775             GT_setFailureReason (curTrace,
776                                  GT_4CLASS,
777                                  "benelli_mmu_init",
778                                  ret_val,
779                                  "phys_addr is invalid");
780             goto error_exit;
781         }
782         printf( "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
783                 memEntries[i].addr[ProcMgr_AddrType_SlaveVirt],
784                 memEntries[i].size,
785                 (unsigned int)phys_addr);
787         /* VAYU SDC code */
788         /* Adjust below logic if using cacheable shared memory */
789         shm_phys_addr = 1;
790         virt_addr = memEntries[i].addr[ProcMgr_AddrType_SlaveVirt];
792         ret_val = add_entry_ext(halObject, &phys_addr, &virt_addr,
793                                     (memEntries[i].size));
794         if (ret_val < 0) {
795             GT_setFailureReason (curTrace,
796                                  GT_4CLASS,
797                                  "benelli_mmu_init",
798                                  ret_val,
799                                  "add_dsp_mmu_entry failed");
800             goto error_exit;
801         }
802     }
804     /* Set the TTB to point to the L1 page table's physical address */
805     OUTREG32(&mmuRegs->TTB,
806            ((struct pg_table_attrs *)(halObject->mmuObj.pPtAttrs))->l1_base_pa);
808     /* Enable the TWL */
809     hw_mmu_twl_enable(halObject->mmuBase);
811     hw_mmu_enable(halObject->mmuBase);
813     rproc_tlb_dump(halObject);
815     return 0;
816 error_exit:
817     return ret_val;
820 /****************************************************
821 * Function to enable interrupt for MMU faults
822 *****************************************************/
823 Int rproc_enable_fault_interrupt(VAYUIPU_HalObject * halObject)
825     Int status = 0;
826     UInt32 reg;
827     VAYUIpu_MMURegs * mmuRegs = NULL;
829     if (halObject == NULL) {
830         status = -ENOMEM;
831         GT_setFailureReason (curTrace,
832                              GT_4CLASS,
833                              "rproc_enable_fault_interrupt",
834                              status,
835                              "halObject is NULL");
836     }
837     else if (halObject->mmuBase == 0) {
838         status = -ENOMEM;
839         GT_setFailureReason (curTrace,
840                              GT_4CLASS,
841                              "rproc_enable_fault_interrupt",
842                              status,
843                              "halObject->mmuBase is NULL");
844     }
845     else {
846         mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
847         /*
848          * Enable generation of interrupt on fault.
849          * This also ensures the slave core stays halted in its
850          * fault state upon generating an MMU fault.
851          */
852         reg = INREG32(&mmuRegs->GP_REG);
853         reg &= ~MMU_FAULT_INTR_DIS_MASK;
854         OUTREG32(&mmuRegs->GP_REG, reg);
856         OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TLB_MISS_MASK);
857    }
859    return status;
862 /****************************************************
863 * Function to disable interrupt for MMU faults
864 *****************************************************/
865 Int rproc_disable_fault_interrupt(VAYUIPU_HalObject * halObject)
867     Int status = 0;
868     VAYUIpu_MMURegs * mmuRegs = NULL;
870     if (halObject == NULL) {
871         status = -ENOMEM;
872         GT_setFailureReason (curTrace,
873                              GT_4CLASS,
874                              "rproc_enable_fault_interrupt",
875                              status,
876                              "halObject is NULL");
877     }
878     else if (halObject->mmuBase == 0) {
879         status = -ENOMEM;
880         GT_setFailureReason (curTrace,
881                              GT_4CLASS,
882                              "rproc_enable_fault_interrupt",
883                              status,
884                              "halObject->mmuBase is NULL");
885     }
886     else {
887         mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
888         OUTREG32(&mmuRegs->IRQENABLE, 0);
889    }
891    return status;
894 /****************************************************
896 *  Function which sets the TWL of the remote core
899 *****************************************************/
901 static Int rproc_set_twl (VAYUIPU_HalObject * halObject, Bool on)
903     Int status = 0;
904     VAYUIpu_MMURegs * mmuRegs = NULL;
905     ULONG reg;
907     if (halObject == NULL) {
908         status = -ENOMEM;
909         GT_setFailureReason (curTrace,
910                              GT_4CLASS,
911                              "benelli_set_twl",
912                              status,
913                              "halObject is NULL");
914     }
915     else if (halObject->mmuBase == 0) {
916         status = -ENOMEM;
917         GT_setFailureReason (curTrace,
918                              GT_4CLASS,
919                              "benelli_set_twl",
920                              status,
921                              "halObject->mmuBase is NULL");
922     }
923     else {
924         mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
926         /* Setting MMU to Smart Idle Mode */
927         reg = INREG32(&mmuRegs->SYSCONFIG);
928         reg &= ~MMU_SYS_IDLE_MASK;
929         reg |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
930         OUTREG32(&mmuRegs->SYSCONFIG, reg);
932         /* Enabling MMU */
933         reg =  INREG32(&mmuRegs->CNTL);
935         if (on)
936             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TWL_MASK);
937         else
938             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TLB_MISS_MASK);
940         reg &= ~MMU_CNTL_MASK;
941         if (on)
942             reg |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
943         else
944             reg |= (MMU_CNTL_MMU_EN);
946         OUTREG32(&mmuRegs->CNTL, reg);
947     }
949     return status;
953 /*========================================
954  * This sets up the IPU processor MMU Page tables
955  *
956  */
957 static struct pg_table_attrs * init_mmu_page_attribs (UInt32 l1_size,
958                                                       UInt32 l1_allign,
959                                                       UInt32 ls_num_of_pages)
961     struct pg_table_attrs * p_pt_attrs = NULL;
962     UInt32 pg_tbl_pa = 0;
963     off64_t offset = 0;
964     UInt32 pg_tbl_va = 0;
965     UInt32 align_size = 0;
966     UInt32 len = 0;
967     int status = 0;
969     p_pt_attrs = Memory_alloc (NULL, sizeof(struct pg_table_attrs), 0, NULL);
970     if (p_pt_attrs)
971         Memory_set (p_pt_attrs, 0, sizeof(struct pg_table_attrs));
972     else {
973         status = -ENOMEM;
974         GT_setFailureReason (curTrace,
975                              GT_4CLASS,
976                              "init_mmu_page_attribs",
977                              status,
978                              "Memory_alloc failed");
979         goto error_exit;
980     }
982     p_pt_attrs->l1_size = l1_size;
983     align_size = p_pt_attrs->l1_size;
984     p_pt_attrs->l1_tbl_alloc_sz = 0x100000;
985     /* Align sizes are expected to be power of 2 */
986     /* we like to get aligned on L1 table size */
987     pg_tbl_va = (UInt32) mmap64 (NULL,
988                                  p_pt_attrs->l1_tbl_alloc_sz,
989                                  PROT_NOCACHE | PROT_READ | PROT_WRITE,
990                                  MAP_ANON | MAP_PHYS | MAP_PRIVATE,
991                                  NOFD,
992                                  0x0);
993     if (pg_tbl_va == (UInt32)MAP_FAILED) {
994         pg_tbl_va = 0;
995         status = -ENOMEM;
996         GT_setFailureReason (curTrace,
997                              GT_4CLASS,
998                              "init_mmu_page_attribs",
999                              status,
1000                              "mmap64 failed");
1001         goto error_exit;
1002     }
1003     else {
1004         /* Make sure the memory is contiguous */
1005         status = mem_offset64 ((void *)pg_tbl_va, NOFD,
1006                                p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
1007         pg_tbl_pa = (UInt32)offset;
1008         if (len != p_pt_attrs->l1_tbl_alloc_sz) {
1009             status = -ENOMEM;
1010             GT_setFailureReason (curTrace,
1011                                  GT_4CLASS,
1012                                  "init_mmu_page_attribs",
1013                                  status,
1014                                  "phys mem is not contiguous");
1015         }
1016         if (status != 0) {
1017             GT_setFailureReason (curTrace,
1018                                  GT_4CLASS,
1019                                  "init_mmu_page_attribs",
1020                                  status,
1021                                  "mem_offset64 failed");
1022             goto error_exit;
1023         }
1024     }
1025     /* Check if the PA is aligned for us */
1026     if ((pg_tbl_pa) & (align_size-1)) {
1027         /* PA not aligned to page table size ,*/
1028         /* try with more allocation and align */
1029         munmap((void *)pg_tbl_va, p_pt_attrs->l1_tbl_alloc_sz);
1030         p_pt_attrs->l1_tbl_alloc_sz = p_pt_attrs->l1_tbl_alloc_sz*2;
1031         /* we like to get aligned on L1 table size */
1032         pg_tbl_va = (UInt32) mmap64 (NULL,
1033                                      p_pt_attrs->l1_tbl_alloc_sz,
1034                                      PROT_NOCACHE | PROT_READ | PROT_WRITE,
1035                                      MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1036                                      NOFD,
1037                                      0);
1038         if (pg_tbl_va == (UInt32)MAP_FAILED) {
1039             pg_tbl_va = 0;
1040             status = -ENOMEM;
1041             GT_setFailureReason (curTrace,
1042                                  GT_4CLASS,
1043                                  "init_mmu_page_attribs",
1044                                  status,
1045                                  "mmap64 failed");
1046             goto error_exit;
1047         }
1048         else {
1049             /* Make sure the memory is contiguous */
1050             status = mem_offset64 ((void *)pg_tbl_va, NOFD,
1051                                    p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
1052             pg_tbl_pa = (UInt32)offset;
1053             if (len != p_pt_attrs->l1_tbl_alloc_sz) {
1054                 status = -ENOMEM;
1055                 GT_setFailureReason (curTrace,
1056                                      GT_4CLASS,
1057                                      "init_mmu_page_attribs",
1058                                      status,
1059                                      "phys mem is not contiguous");
1060             }
1061             if (status != 0) {
1062                 GT_setFailureReason (curTrace,
1063                                      GT_4CLASS,
1064                                      "init_mmu_page_attribs",
1065                                      status,
1066                                      "mem_offset64 failed");
1067                 goto error_exit;
1068             }
1069         }
1070         /* We should be able to get aligned table now */
1071         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1072         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1073         /* Align the PA to the next 'align'  boundary */
1074         p_pt_attrs->l1_base_pa = ((pg_tbl_pa) + (align_size-1)) &
1075                             (~(align_size-1));
1076         p_pt_attrs->l1_base_va = pg_tbl_va + (p_pt_attrs->l1_base_pa -
1077                                 pg_tbl_pa);
1078     } else {
1079         /* We got aligned PA, cool */
1080         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1081         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1082         p_pt_attrs->l1_base_pa = pg_tbl_pa;
1083         p_pt_attrs->l1_base_va = pg_tbl_va;
1084     }
1086     if (p_pt_attrs->l1_base_va)
1087         memset((UInt8*)p_pt_attrs->l1_base_va, 0x00, p_pt_attrs->l1_size);
1088     p_pt_attrs->l2_num_pages = ls_num_of_pages;
1089     p_pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * p_pt_attrs->l2_num_pages;
1090     align_size = 4; /* Make it UInt32 aligned  */
1091     /* we like to get aligned on L1 table size */
1092     pg_tbl_va = p_pt_attrs->l1_base_va + 0x80000;
1093     pg_tbl_pa = p_pt_attrs->l1_base_pa + 0x80000;
1094     p_pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
1095     p_pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
1096     p_pt_attrs->ls_tbl_alloc_sz = p_pt_attrs->l2_size;
1097     p_pt_attrs->l2_base_pa = pg_tbl_pa;
1098     p_pt_attrs->l2_base_va = pg_tbl_va;
1099     if (p_pt_attrs->l2_base_va)
1100         memset((UInt8*)p_pt_attrs->l2_base_va, 0x00, p_pt_attrs->l2_size);
1102     p_pt_attrs->pg_info = Memory_alloc(NULL, sizeof(struct page_info) * p_pt_attrs->l2_num_pages, 0, NULL);
1103     if (p_pt_attrs->pg_info)
1104         Memory_set (p_pt_attrs->pg_info, 0, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1105     else {
1106         status = -ENOMEM;
1107         GT_setFailureReason (curTrace,
1108                              GT_4CLASS,
1109                              "init_mmu_page_attribs",
1110                              status,
1111                              "Memory_alloc failed");
1112         goto error_exit;
1113     }
1114     return p_pt_attrs;
1116 error_exit:
1117     if (p_pt_attrs) {
1118         if (p_pt_attrs->pg_info)
1119             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1120         if (p_pt_attrs->l1_tbl_alloc_va) {
1121             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1122                     p_pt_attrs->l1_tbl_alloc_sz);
1123         }
1124         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1125         p_pt_attrs = NULL;
1126     }
1128     return NULL;
1132 /*========================================
1133  * This destroys the IPU processor MMU Page tables
1134  *
1135  */
1136 static Void deinit_mmu_page_attribs (struct pg_table_attrs * p_pt_attrs)
1138     if (p_pt_attrs) {
1139         if (p_pt_attrs->pg_info)
1140             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1141         if (p_pt_attrs->l1_tbl_alloc_va) {
1142             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1143                     p_pt_attrs->l1_tbl_alloc_sz);
1144         }
1145         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1146         p_pt_attrs = NULL;
1147     }
1151 /*============================================
1152  * This function calculates PTE address (MPU virtual) to be updated
1153  *  It also manages the L2 page tables
1154  */
1155 static Int pte_set (UInt32 pa, UInt32 va, UInt32 size,
1156                     struct hw_mmu_map_attrs_t *attrs, struct pg_table_attrs *pt_Table)
1158     UInt32 i;
1159     UInt32 pte_val;
1160     UInt32 pte_addr_l1;
1161     UInt32 pte_size;
1162     UInt32 pg_tbl_va; /* Base address of the PT that will be updated */
1163     UInt32 l1_base_va;
1164      /* Compiler warns that the next three variables might be used
1165      * uninitialized in this function. Doesn't seem so. Working around,
1166      * anyways.  */
1167     UInt32 l2_base_va = 0;
1168     UInt32 l2_base_pa = 0;
1169     UInt32 l2_page_num = 0;
1170     struct pg_table_attrs *pt = pt_Table;
1171     int status = 0;
1173     l1_base_va = pt->l1_base_va;
1174     pg_tbl_va = l1_base_va;
1176     if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
1177         /* Find whether the L1 PTE points to a valid L2 PT */
1178         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1179         if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1180             pte_val = *(UInt32 *)pte_addr_l1;
1181             pte_size = hw_mmu_pte_sizel1(pte_val);
1182         } else {
1183             return -EINVAL;
1184         }
1185         /* FIX ME */
1186         /* TODO: ADD synchronication element*/
1187         /*        sync_enter_cs(pt->hcs_object);*/
1188         if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1189             /* Get the L2 PA from the L1 PTE, and find
1190              * corresponding L2 VA */
1191             l2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1192             l2_base_va = l2_base_pa - pt->l2_base_pa +
1193             pt->l2_base_va;
1194             l2_page_num = (l2_base_pa - pt->l2_base_pa) /
1195                     HW_MMU_COARSE_PAGE_SIZE;
1196         } else if (pte_size == 0) {
1197             /* L1 PTE is invalid. Allocate a L2 PT and
1198              * point the L1 PTE to it */
1199             /* Find a free L2 PT. */
1200             for (i = 0; (i < pt->l2_num_pages) &&
1201                 (pt->pg_info[i].num_entries != 0); i++)
1202                 ;;
1203             if (i < pt->l2_num_pages) {
1204                 l2_page_num = i;
1205                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1206                        HW_MMU_COARSE_PAGE_SIZE);
1207                 l2_base_va = pt->l2_base_va + (l2_page_num *
1208                        HW_MMU_COARSE_PAGE_SIZE);
1209                 /* Endianness attributes are ignored for
1210                  * HW_MMU_COARSE_PAGE_SIZE */
1211                 status = hw_mmu_pte_set(pg_tbl_va, l2_base_pa, va,
1212                                         HW_MMU_COARSE_PAGE_SIZE, attrs);
1213             } else {
1214                 status = -ENOMEM;
1215             }
1216         } else {
1217             /* Found valid L1 PTE of another size.
1218              * Should not overwrite it. */
1219             status = -EINVAL;
1220         }
1221         if (status == 0) {
1222             pg_tbl_va = l2_base_va;
1223             if (size == HW_PAGE_SIZE_64KB)
1224                 pt->pg_info[l2_page_num].num_entries += 16;
1225             else
1226                 pt->pg_info[l2_page_num].num_entries++;
1227         }
1228     }
1229     if (status == 0) {
1230         status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1231         if (status == RET_OK)
1232             status = 0;
1233     }
1234     return status;
1238 /*=============================================
1239  * This function calculates the optimum page-aligned addresses and sizes
1240  * Caller must pass page-aligned values
1241  */
1242 static Int pte_update (UInt32 pa, UInt32 va, UInt32 size,
1243                        struct hw_mmu_map_attrs_t *map_attrs, struct pg_table_attrs *pt_Table)
1245     UInt32 i;
1246     UInt32 all_bits;
1247     UInt32 pa_curr = pa;
1248     UInt32 va_curr = va;
1249     UInt32 num_bytes = size;
1250     Int status = 0;
1251     UInt32 pg_size[] = {HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB,
1252                HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB};
1253     while (num_bytes && (status == 0)) {
1254         /* To find the max. page size with which both PA & VA are
1255          * aligned */
1256         all_bits = pa_curr | va_curr;
1257         for (i = 0; i < 4; i++) {
1258             if ((num_bytes >= pg_size[i]) && ((all_bits &
1259                (pg_size[i] - 1)) == 0)) {
1260                 status = pte_set(pa_curr,
1261                     va_curr, pg_size[i], map_attrs, pt_Table);
1262                 pa_curr += pg_size[i];
1263                 va_curr += pg_size[i];
1264                 num_bytes -= pg_size[i];
1265                  /* Don't try smaller sizes. Hopefully we have
1266                  * reached an address aligned to a bigger page
1267                  * size */
1268                 break;
1269             }
1270         }
1271     }
1272     return status;
1276 /*============================================
1277  * This function maps MPU buffer to the DSP address space. It performs
1278 * linear to physical address translation if required. It translates each
1279 * page since linear addresses can be physically non-contiguous
1280 * All address & size arguments are assumed to be page aligned (in proc.c)
1281  *
1282  */
1283 static Int rproc_mem_map (VAYUIPU_HalObject * halObject,
1284                           UInt32 mpu_addr, UInt32 ul_virt_addr,
1285                           UInt32 num_bytes, UInt32 map_attr)
1287     UInt32 attrs;
1288     Int status = 0;
1289     struct hw_mmu_map_attrs_t hw_attrs;
1290     Int pg_i = 0;
1292     if (halObject == NULL) {
1293         status = -ENOMEM;
1294         GT_setFailureReason (curTrace,
1295                              GT_4CLASS,
1296                              "benelli_mem_map",
1297                              status,
1298                              "halObject is NULL");
1299     }
1300     else if (halObject->mmuBase == 0) {
1301         status = -ENOMEM;
1302         GT_setFailureReason (curTrace,
1303                              GT_4CLASS,
1304                              "benelli_mem_map",
1305                              status,
1306                              "halObject->mmuBase is 0");
1307     }
1308     else if (num_bytes == 0) {
1309         status = -EINVAL;
1310         GT_setFailureReason (curTrace,
1311                              GT_4CLASS,
1312                              "benelli_mem_map",
1313                              status,
1314                              "num_bytes is 0");
1315     }
1316     else {
1317         if (map_attr != 0) {
1318             attrs = map_attr;
1319             attrs |= DSP_MAPELEMSIZE32;
1320         } else {
1321             /* Assign default attributes */
1322             attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE32;
1323         }
1324         /* Take mapping properties */
1325         if (attrs & DSP_MAPBIGENDIAN)
1326             hw_attrs.endianism = HW_BIG_ENDIAN;
1327         else
1328             hw_attrs.endianism = HW_LITTLE_ENDIAN;
1330         hw_attrs.mixedSize = (enum hw_mmu_mixed_size_t)
1331                      ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1332         /* Ignore element_size if mixedSize is enabled */
1333         if (hw_attrs.mixedSize == 0) {
1334             if (attrs & DSP_MAPELEMSIZE8) {
1335                 /* Size is 8 bit */
1336                 hw_attrs.element_size = HW_ELEM_SIZE_8BIT;
1337             } else if (attrs & DSP_MAPELEMSIZE16) {
1338                 /* Size is 16 bit */
1339                 hw_attrs.element_size = HW_ELEM_SIZE_16BIT;
1340             } else if (attrs & DSP_MAPELEMSIZE32) {
1341                 /* Size is 32 bit */
1342                 hw_attrs.element_size = HW_ELEM_SIZE_32BIT;
1343             } else if (attrs & DSP_MAPELEMSIZE64) {
1344                 /* Size is 64 bit */
1345                 hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
1346             } else {
1347                 /* Mixedsize isn't enabled, so size can't be
1348                  * zero here */
1349                 status = -EINVAL;
1350                 GT_setFailureReason (curTrace,
1351                                      GT_4CLASS,
1352                                      "benelli_mem_map",
1353                                      status,
1354                                      "MMU element size is zero");
1355             }
1356         }
1358         if (status >= 0) {
1359             /*
1360              * Do OS-specific user-va to pa translation.
1361              * Combine physically contiguous regions to reduce TLBs.
1362              * Pass the translated pa to PteUpdate.
1363              */
1364             if ((attrs & DSP_MAPPHYSICALADDR)) {
1365                 status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
1366                            &hw_attrs,
1367                            (struct pg_table_attrs *)halObject->mmuObj.pPtAttrs);
1368             }
1370             /* Don't propogate Linux or HW status to upper layers */
1371             if (status < 0) {
1372                 /*
1373                  * Roll out the mapped pages incase it failed in middle of
1374                  * mapping
1375                  */
1376                 if (pg_i)
1377                     rproc_mem_unmap(halObject, ul_virt_addr,
1378                                     (pg_i * PAGE_SIZE));
1379             }
1381             /* In any case, flush the TLB
1382              * This is called from here instead from pte_update to avoid
1383              * unnecessary repetition while mapping non-contiguous physical
1384              * regions of a virtual region */
1385             hw_mmu_tlb_flushAll(halObject->mmuBase);
1386         }
1387     }
1388     return status;
1391 /*
1392  *  ======== rproc_mem_lookup ========
1393  *  Look up the physical address of a virtual address based on PTEs
1394  */
1395 Int rproc_mem_lookup(VAYUIPU_HalObject * halObject,
1396     UInt32 da, UInt32 * pAddr)
1398     UInt32 L1_base_va = 0;
1399     UInt32 L2_base_va = 0;
1400     UInt32 L2_base_pa;
1401     UInt32 pte_val;
1402     UInt32 pte_size;
1403     UInt32 pte_addr_l1;
1404     UInt32 pte_addr_l2 = 0;
1405     UInt32 vaCurr;
1406     Int status = 0;
1407     VAYUIpu_MMURegs * mmuRegs;
1408     UInt32 tableBaseAddr = 0;
1410     if (halObject == NULL) {
1411         status = -ENOMEM;
1412         GT_setFailureReason (curTrace,
1413                              GT_4CLASS,
1414                              "rproc_mem_lookup",
1415                              status,
1416                              "halObject is NULL");
1417     }
1418     else if (halObject->mmuBase == 0) {
1419         status = -ENOMEM;
1420         GT_setFailureReason (curTrace,
1421                              GT_4CLASS,
1422                              "rproc_mem_lookup",
1423                              status,
1424                              "halObject->mmuBase is 0");
1425     }
1426     else {
1427         /* Retrieve the L1 page table's physical address from TTB */
1428         mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
1429         tableBaseAddr = INREG32(&mmuRegs->TTB);
1430         vaCurr = da;
1432         /* Temporarily map to virtual address space */
1433         L1_base_va = (UInt32) mmap(NULL,
1434                     TRANSLATION_TABLE_SIZE,
1435                     PROT_NOCACHE | PROT_READ | PROT_WRITE,
1436                     MAP_PHYS | MAP_PRIVATE,
1437                     NOFD,
1438                     (off_t)tableBaseAddr);
1439         if (L1_base_va == (UInt32)MAP_FAILED) {
1440             status = -ENOMEM;
1441             GT_setFailureReason (curTrace,
1442                 GT_4CLASS,
1443                 "rproc_mem_lookup",
1444                 status,
1445                 "Memory map failed.");
1446                 goto EXIT_LOOP;
1447         }
1449         /* Lookup entry in L1 page table */
1450         pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1451         pte_val = *(UInt32 *)pte_addr_l1;
1452         pte_size = hw_mmu_pte_sizel1(pte_val);
1454         if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1455             /*
1456              * Get the L2 PA from the L1 PTE, and find
1457              * corresponding L2 VA
1458              */
1459             L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1461             /* Temporarily map to virtual address space */
1462             L2_base_va = (UInt32)mmap(NULL, HW_MMU_COARSE_PAGE_SIZE,
1463                 PROT_NOCACHE | PROT_READ | PROT_WRITE,
1464                 MAP_PHYS | MAP_PRIVATE,
1465                 NOFD,
1466                 (off_t)L2_base_pa);
1467             if (L2_base_va == (UInt32)MAP_FAILED) {
1468                 status = -ENOMEM;
1469                 GT_setFailureReason (curTrace,
1470                          GT_4CLASS,
1471                          "rproc_mem_lookup",
1472                          status,
1473                          "Memory map failed.");
1474                 goto EXIT_LOOP;
1475             }
1477             /*
1478              * Find the L2 PTE address from which we will start
1479              * clearing, the number of PTEs to be cleared on this
1480              * page, and the size of VA space that needs to be
1481              * cleared on this L2 page
1482              */
1483             pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
1484             /*
1485              * Unmap the VA space on this L2 PT. A quicker way
1486              * would be to clear pte_count entries starting from
1487              * pte_addr_l2. However, below code checks that we don't
1488              * clear invalid entries or less than 64KB for a 64KB
1489              * entry. Similar checking is done for L1 PTEs too
1490              * below
1491              */
1492             pte_val = *(UInt32 *)pte_addr_l2;
1493             pte_size = hw_mmu_pte_sizel2(pte_val);
1494             /* vaCurr aligned to pte_size? */
1495             if (pte_size != 0) {
1496                 /* Obtain Physical address from VA */
1497                 *pAddr = (pte_val & ~(pte_size - 1));
1498                 *pAddr += (vaCurr & (pte_size - 1));
1499             }
1500             else {
1501                 /* Error. Not found */
1502                 *pAddr = 0;
1503                 status = -EFAULT;
1504             }
1505         }
1506         else if (pte_size != 0) {
1507             /* pte_size = 1 MB or 16 MB */
1508             /* entry is in L1 page table */
1509             *pAddr = (pte_val & ~(pte_size - 1));
1510             *pAddr += (vaCurr & (pte_size - 1));
1511         }
1512         else {
1513             /* Not found */
1514             *pAddr = 0;
1515             status = -EFAULT;
1516         }
1517     }
1519 EXIT_LOOP:
1521     if ((L2_base_va != 0) && (L2_base_va != (UInt32)MAP_FAILED)) {
1522         munmap((void *)L2_base_va, HW_MMU_COARSE_PAGE_SIZE);
1523     }
1525     if ((L1_base_va != 0) && (L1_base_va != (UInt32)MAP_FAILED)) {
1526         munmap((void *)L1_base_va, TRANSLATION_TABLE_SIZE);
1527     }
1529     return status;
1533 /*
1534  *  ======== benelli_mem_unmap ========
1535  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1536  *
1537  *      PTEs of a mapped memory block are contiguous in any page table
1538  *      So, instead of looking up the PTE address for every 4K block,
1539  *      we clear consecutive PTEs until we unmap all the bytes
1540  */
1541 static Int rproc_mem_unmap (VAYUIPU_HalObject * halObject,
1542                             UInt32 da, UInt32 num_bytes)
1544     UInt32 L1_base_va;
1545     UInt32 L2_base_va;
1546     UInt32 L2_base_pa;
1547     UInt32 L2_page_num;
1548     UInt32 pte_val;
1549     UInt32 pte_size;
1550     UInt32 pte_count;
1551     UInt32 pte_addr_l1;
1552     UInt32 pte_addr_l2 = 0;
1553     UInt32 rem_bytes;
1554     UInt32 rem_bytes_l2;
1555     UInt32 vaCurr;
1556     Int status = 0;
1557     struct pg_table_attrs * p_pt_attrs = NULL;
1559     if (halObject == NULL) {
1560         status = -ENOMEM;
1561         GT_setFailureReason (curTrace,
1562                              GT_4CLASS,
1563                              "rproc_mem_unmap",
1564                              status,
1565                              "halObject is NULL");
1566     }
1567     else if (halObject->mmuBase == 0) {
1568         status = -ENOMEM;
1569         GT_setFailureReason (curTrace,
1570                              GT_4CLASS,
1571                              "rproc_mem_unmap",
1572                              status,
1573                              "halObject->mmuBase is 0");
1574     }
1575     else if (halObject->mmuObj.pPtAttrs == NULL) {
1576         status = -ENOMEM;
1577         GT_setFailureReason (curTrace,
1578                              GT_4CLASS,
1579                              "rproc_mem_unmap",
1580                              status,
1581                              "halObject->mmuObj.pPtAttrs is 0");
1582     }
1583     else {
1584         p_pt_attrs = (struct pg_table_attrs *)halObject->mmuObj.pPtAttrs;
1585         vaCurr = da;
1586         rem_bytes = num_bytes;
1587         rem_bytes_l2 = 0;
1588         L1_base_va = p_pt_attrs->l1_base_va;
1589         pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1590         while (rem_bytes) {
1591             UInt32 vaCurrOrig = vaCurr;
1592             /* Find whether the L1 PTE points to a valid L2 PT */
1593             pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1594             pte_val = *(UInt32 *)pte_addr_l1;
1595             pte_size = hw_mmu_pte_sizel1(pte_val);
1596             if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1597                 /*
1598                  * Get the L2 PA from the L1 PTE, and find
1599                  * corresponding L2 VA
1600                  */
1601                 L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1602                 L2_base_va = L2_base_pa - p_pt_attrs->l2_base_pa
1603                             + p_pt_attrs->l2_base_va;
1604                 L2_page_num = (L2_base_pa - p_pt_attrs->l2_base_pa) /
1605                         HW_MMU_COARSE_PAGE_SIZE;
1606                 /*
1607                  * Find the L2 PTE address from which we will start
1608                  * clearing, the number of PTEs to be cleared on this
1609                  * page, and the size of VA space that needs to be
1610                  * cleared on this L2 page
1611                  */
1612                 pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
1613                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1614                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) /
1615                         sizeof(UInt32);
1616                 if (rem_bytes < (pte_count * PAGE_SIZE))
1617                     pte_count = rem_bytes / PAGE_SIZE;
1619                 rem_bytes_l2 = pte_count * PAGE_SIZE;
1620                 /*
1621                  * Unmap the VA space on this L2 PT. A quicker way
1622                  * would be to clear pte_count entries starting from
1623                  * pte_addr_l2. However, below code checks that we don't
1624                  * clear invalid entries or less than 64KB for a 64KB
1625                  * entry. Similar checking is done for L1 PTEs too
1626                  * below
1627                  */
1628                 while (rem_bytes_l2) {
1629                     pte_val = *(UInt32 *)pte_addr_l2;
1630                     pte_size = hw_mmu_pte_sizel2(pte_val);
1631                     /* vaCurr aligned to pte_size? */
1632                     if ((pte_size != 0) && (rem_bytes_l2
1633                         >= pte_size) &&
1634                         !(vaCurr & (pte_size - 1))) {
1635                         /* Collect Physical addresses from VA */
1636                         if (hw_mmu_pte_clear(pte_addr_l2,
1637                             vaCurr, pte_size) == RET_OK) {
1638                             rem_bytes_l2 -= pte_size;
1639                             vaCurr += pte_size;
1640                             pte_addr_l2 += (pte_size >> 12)
1641                                 * sizeof(UInt32);
1642                         } else {
1643                             status = -EFAULT;
1644                             goto EXIT_LOOP;
1645                         }
1646                     } else
1647                         status = -EFAULT;
1648                 }
1649                 if (rem_bytes_l2 != 0) {
1650                     status = -EFAULT;
1651                     goto EXIT_LOOP;
1652                 }
1653                 p_pt_attrs->pg_info[L2_page_num].num_entries -=
1654                             pte_count;
1655                 if (p_pt_attrs->pg_info[L2_page_num].num_entries
1656                                     == 0) {
1657                     /*
1658                      * Clear the L1 PTE pointing to the
1659                      * L2 PT
1660                      */
1661                     if (RET_OK != hw_mmu_pte_clear(L1_base_va,
1662                         vaCurrOrig, HW_MMU_COARSE_PAGE_SIZE)) {
1663                         status = -EFAULT;
1664                         goto EXIT_LOOP;
1665                     }
1666                 }
1667                 rem_bytes -= pte_count * PAGE_SIZE;
1668             } else
1669                 /* vaCurr aligned to pte_size? */
1670                 /* pte_size = 1 MB or 16 MB */
1671                 if ((pte_size != 0) && (rem_bytes >= pte_size) &&
1672                    !(vaCurr & (pte_size - 1))) {
1673                     /* Collect Physical addresses from VA */
1674                     if (hw_mmu_pte_clear(L1_base_va, vaCurr,
1675                             pte_size) == RET_OK) {
1676                         rem_bytes -= pte_size;
1677                         vaCurr += pte_size;
1678                     } else {
1679                         status = -EFAULT;
1680                         goto EXIT_LOOP;
1681                     }
1682             } else {
1683                 status = -EFAULT;
1684             }
1685         }
1686     }
1687     /*
1688      * It is better to flush the TLB here, so that any stale old entries
1689      * get flushed
1690      */
1691 EXIT_LOOP:
1692     hw_mmu_tlb_flushAll(halObject->mmuBase);
1693     return status;
1697 /*========================================
1698  * This sets up the Ipu processor
1699  *
1700  */
1701 Int rproc_ipu_setup (VAYUIPU_HalObject * halObject,
1702                      ProcMgr_AddrInfo * memEntries,
1703                      UInt32 numMemEntries)
1705     Int ret_val = 0;
1706     struct pg_table_attrs * p_pt_attrs = NULL;
1708     p_pt_attrs = init_mmu_page_attribs(0x10000, 14, 128);
1709     if (!p_pt_attrs) {
1710         GT_setFailureReason (curTrace,
1711                              GT_4CLASS,
1712                              "rproc_setup",
1713                              ret_val,
1714                              "init_mmu_page_attribs failed");
1715     }
1716     else {
1717         halObject->mmuObj.pPtAttrs = p_pt_attrs;
1718         /* Disable TWL  */
1719         ret_val = rproc_set_twl(halObject, FALSE);
1720         if (ret_val < 0) {
1721             GT_setFailureReason (curTrace,
1722                                  GT_4CLASS,
1723                                  "ipu_setup",
1724                                  ret_val,
1725                                  "rproc_set_twl to FALSE failed");
1726         }
1727         else {
1728             ret_val = rproc_mmu_init (halObject, memEntries,
1729                                       numMemEntries);
1730             if (ret_val < 0) {
1731                 GT_setFailureReason (curTrace,
1732                                      GT_4CLASS,
1733                                      "ipu_setup",
1734                                      ret_val,
1735                                      "rproc_mmu_init failed");
1736             }
1737             else {
1738     #if 0
1739                 ret_val = rproc_set_twl(halObject, TRUE);
1740                 if (ret_val < 0) {
1741                     GT_setFailureReason (curTrace,
1742                                          GT_4CLASS,
1743                                          "ipu_setup",
1744                                          ret_val,
1745                                          "rproc_set_twl to TRUE failed");
1746                 }
1747     #endif
1748             }
1749         }
1750     }
1752     if (ret_val < 0) {
1753         deinit_mmu_page_attribs(p_pt_attrs);
1754         halObject->mmuObj.pPtAttrs = NULL;
1755     }
1757     return ret_val;
1762 Void rproc_ipu_destroy(VAYUIPU_HalObject * halObject)
1764     shm_phys_addr = 0;
1766     if (halObject->mmuObj.pPtAttrs) {
1767         deinit_mmu_page_attribs(halObject->mmuObj.pPtAttrs);
1768         halObject->mmuObj.pPtAttrs = NULL;
1769     }
1773 static Void iotlb_load_cr (VAYUIPU_HalObject * halObject,
1774                            struct cr_regs *cr)
1776     ULONG reg;
1777     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
1779     reg = cr->cam | MMU_CAM_V;
1780     OUTREG32(&mmuRegs->CAM, reg);
1782     reg = cr->ram;
1783     OUTREG32(&mmuRegs->RAM, reg);
1785     reg = 1;
1786     OUTREG32(&mmuRegs->FLUSH_ENTRY, reg);
1788     reg = 1;
1789     OUTREG32(&mmuRegs->LD_TLB, reg);
1793 /**
1794  * iotlb_dump_cr - Dump an iommu tlb entry into buf
1795  * @obj:    target iommu
1796  * @cr:        contents of cam and ram register
1797  * @buf:    output buffer
1798  **/
1799 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf)
1801     Char *p = buf;
1803     if(!cr || !buf)
1804         return 0;
1806     /* FIXME: Need more detail analysis of cam/ram */
1807     p += sprintf(p, "%08x %08x %01x\n", (unsigned int)cr->cam,
1808                     (unsigned int)cr->ram,
1809                     (cr->cam & MMU_CAM_P) ? 1 : 0);
1810     return (p - buf);
1815 static Int iotlb_cr_valid (struct cr_regs *cr)
1817     if (!cr)
1818         return -EINVAL;
1820     return (cr->cam & MMU_CAM_V);
1825 static struct cr_regs *omap5_alloc_cr (struct iotlb_entry *e)
1827     struct cr_regs *cr;
1829     if (e->da & ~(get_cam_va_mask(e->pgsz))) {
1830         GT_setFailureReason (curTrace,
1831                              GT_4CLASS,
1832                              "omap5_alloc_cr",
1833                              -EINVAL,
1834                              "failed mask check");
1835         return NULL;
1836     }
1838     cr = mmap(NULL,
1839               sizeof(struct cr_regs),
1840               PROT_NOCACHE | PROT_READ | PROT_WRITE,
1841               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1842               NOFD,
1843               0);
1845     if (MAP_FAILED == cr)
1846     {
1847         GT_setFailureReason (curTrace,
1848                              GT_4CLASS,
1849                              "omap5_alloc_cr",
1850                              -EINVAL,
1851                              "mmap failed");
1852         return NULL;
1853     }
1855     cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
1856     cr->ram = e->pa | e->endian | e->elsz | e->mixed;
1857     return cr;
1862 static struct cr_regs *iotlb_alloc_cr (struct iotlb_entry *e)
1864     if (!e) {
1865         GT_setFailureReason (curTrace,
1866                              GT_4CLASS,
1867                              "iotlb_alloc_cr",
1868                              -EINVAL,
1869                              "e is NULL");
1870         return NULL;
1871     }
1873     return omap5_alloc_cr(e);
1878 /**
1879  * load_iotlb_entry - Set an iommu tlb entry
1880  * @obj:    target iommu
1881  * @e:        an iommu tlb entry info
1882  **/
1883 static Int load_iotlb_entry (VAYUIPU_HalObject * halObject,
1884                              struct iotlb_entry *e)
1886     Int err = 0;
1887     struct iotlb_lock l;
1888     struct cr_regs *cr;
1890     if (halObject == NULL) {
1891         err = -EINVAL;
1892         GT_setFailureReason (curTrace,
1893                              GT_4CLASS,
1894                              "load_iotlb_entry",
1895                              err,
1896                              "halObject is NULL");
1897         goto out;
1898     }
1900     if (halObject->mmuBase == NULL) {
1901         err = -EINVAL;
1902         GT_setFailureReason (curTrace,
1903                              GT_4CLASS,
1904                              "load_iotlb_entry",
1905                              err,
1906                              "halObject->mmuBase is NULL");
1907         goto out;
1908     }
1910     if (!e) {
1911         err = -EINVAL;
1912         GT_setFailureReason (curTrace,
1913                              GT_4CLASS,
1914                              "load_iotlb_entry",
1915                              err,
1916                              "e is NULL");
1917         goto out;
1918     }
1920     iotlb_getLock(halObject, &l);
1922     if (l.base == 32) {
1923         err = -EBUSY;
1924         GT_setFailureReason (curTrace,
1925                              GT_4CLASS,
1926                              "load_iotlb_entry",
1927                              err,
1928                              "l.base is full");
1929         goto out;
1930     }
1931     if (!e->prsvd) {
1932         int i;
1933         struct cr_regs tmp;
1935         for_each_iotlb_cr(32, i, tmp)
1936             if (!iotlb_cr_valid(&tmp))
1937                 break;
1939         if (i == 32) {
1940             err = -EBUSY;
1941             GT_setFailureReason (curTrace,
1942                                  GT_4CLASS,
1943                                  "load_iotlb_entry",
1944                                  err,
1945                                  "i == 32");
1946             goto out;
1947         }
1949         iotlb_getLock(halObject, &l);
1950     } else {
1951         l.vict = l.base;
1952         iotlb_setLock(halObject, &l);
1953     }
1955     cr = iotlb_alloc_cr(e);
1956     if (!cr){
1957         err = -ENOMEM;
1958         GT_setFailureReason (curTrace,
1959                              GT_4CLASS,
1960                              "load_iotlb_entry",
1961                              err,
1962                              "iotlb_alloc_cr failed");
1963         goto out;
1964     }
1966     iotlb_load_cr(halObject, cr);
1967     munmap(cr, sizeof(struct cr_regs));
1969     if (e->prsvd)
1970         l.base++;
1971     /* increment victim for next tlb load */
1972     if (++l.vict == 32)
1973         l.vict = l.base;
1974     iotlb_setLock(halObject, &l);
1976 out:
1977     return err;