Add late-attach support for IPUs in QNX
[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / family / vayu / vayuipu / VAYUIpuEnabler.c
1 /*
2  *  @file  VAYUIpuEnabler.c
3  *
4  *  @brief  MMU programming module
5  *
6  *
7  *  ============================================================================
8  *
9  *  Copyright (c) 2013-2014, Texas Instruments Incorporated
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  *  Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *
18  *  *  Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *
22  *  *  Neither the name of Texas Instruments Incorporated nor the names of
23  *     its contributors may be used to endorse or promote products derived
24  *     from this software without specific prior written permission.
25  *
26  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  *  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  *  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  *  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  *  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  *  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
33  *  OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
34  *  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
35  *  OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
36  *  EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *  Contact information for paper mail:
38  *  Texas Instruments
39  *  Post Office Box 655303
40  *  Dallas, Texas 75265
41  *  Contact information:
42  *  http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
43  *  DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
44  *  ============================================================================
45  *
46  */
48 #include <errno.h>
49 #include <unistd.h>
50 #include <ti/syslink/Std.h>
52 /* OSAL and utils headers */
53 #include <ti/syslink/utils/List.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/OsalPrint.h>
57 /* Module level headers */
58 #include <OsalDrv.h>
59 #include <_ProcDefs.h>
60 #include <Processor.h>
61 #include <hw/inout.h>
62 #include <sys/mman.h>
64 #include <hw_defs.h>
65 #include <hw_mmu.h>
66 #include <VAYUIpuHal.h>
67 #include <VAYUIpuHalMmu.h>
68 #include <VAYUIpuEnabler.h>
69 #include <stdbool.h>
70 #include <stdint.h>
73 #define PAGE_SIZE 0x1000
75 /* Size of L1 translation table */
76 #define TRANSLATION_TABLE_SIZE 0x4000
78 /* Attributes of L2 page tables for DSP MMU.*/
79 struct page_info {
80     /* Number of valid PTEs in the L2 PT*/
81     UInt32 num_entries;
82 };
85 /* Attributes used to manage the DSP MMU page tables */
86 struct pg_table_attrs {
87     struct sync_cs_object *hcs_object;/* Critical section object handle */
88     UInt32 l1_base_pa; /* Physical address of the L1 PT */
89     UInt32 l1_base_va; /* Virtual  address of the L1 PT */
90     UInt32 l1_size; /* Size of the L1 PT */
91     UInt32 l1_tbl_alloc_pa;
92     /* Physical address of Allocated mem for L1 table. May not be aligned */
93     UInt32 l1_tbl_alloc_va;
94     /* Virtual address of Allocated mem for L1 table. May not be aligned */
95     UInt32 l1_tbl_alloc_sz;
96     /* Size of consistent memory allocated for L1 table.
97      * May not be aligned */
98     UInt32 l2_base_pa;        /* Physical address of the L2 PT */
99     UInt32 l2_base_va;        /* Virtual  address of the L2 PT */
100     UInt32 l2_size;        /* Size of the L2 PT */
101     UInt32 l2_tbl_alloc_pa;
102     /* Physical address of Allocated mem for L2 table. May not be aligned */
103     UInt32 l2_tbl_alloc_va;
104     /* Virtual address of Allocated mem for L2 table. May not be aligned */
105     UInt32 ls_tbl_alloc_sz;
106     /* Size of consistent memory allocated for L2 table.
107      * May not be aligned */
108     UInt32 l2_num_pages;    /* Number of allocated L2 PT */
109     struct page_info *pg_info;
110 };
113 enum pagetype {
114     SECTION = 0,
115     LARGE_PAGE = 1,
116     SMALL_PAGE = 2,
117     SUPER_SECTION  = 3
118 };
120 static UInt32 shm_phys_addr;
122 #define INREG32(x) in32((uintptr_t)x)
123 #define OUTREG32(x, y) out32((uintptr_t)x, y)
124 #define SIZE 0x4
126 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf);
127 static Int load_iotlb_entry (VAYUIPU_HalObject * halObject,
128                              struct iotlb_entry *e);
129 static Int iotlb_cr_valid (struct cr_regs *cr);
131 static Int rproc_mem_map (VAYUIPU_HalObject * halObject,
132                           UInt32 mpu_addr, UInt32 ul_virt_addr,
133                           UInt32 num_bytes, UInt32 map_attr);
134 static Int rproc_mem_unmap (VAYUIPU_HalObject * halObject, UInt32 da,
135                             UInt32 num_bytes);
138 static Void iotlb_cr_to_e (struct cr_regs *cr, struct iotlb_entry *e)
140     e->da       = cr->cam & MMU_CAM_VATAG_MASK;
141     e->pa       = cr->ram & MMU_RAM_PADDR_MASK;
142     e->valid    = cr->cam & MMU_CAM_V;
143     e->prsvd    = cr->cam & MMU_CAM_P;
144     e->pgsz     = cr->cam & MMU_CAM_PGSZ_MASK;
145     e->endian   = cr->ram & MMU_RAM_ENDIAN_MASK;
146     e->elsz     = cr->ram & MMU_RAM_ELSZ_MASK;
147     e->mixed    = cr->ram & MMU_RAM_MIXED;
150 static Void iotlb_getLock (VAYUIPU_HalObject * halObject,
151                            struct iotlb_lock *l)
153     ULONG reg;
154     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
156     reg = INREG32(&mmuRegs->LOCK);
157     l->base = MMU_LOCK_BASE(reg);
158     l->vict = MMU_LOCK_VICT(reg);
161 static Void iotlb_setLock (VAYUIPU_HalObject * halObject,
162                            struct iotlb_lock *l)
164     ULONG reg;
165     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
167     reg = (l->base << MMU_LOCK_BASE_SHIFT);
168     reg |= (l->vict << MMU_LOCK_VICT_SHIFT);
169     OUTREG32(&mmuRegs->LOCK, reg);
172 static void omap5_tlb_read_cr (VAYUIPU_HalObject * halObject,
173                                struct cr_regs *cr)
175     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
177     cr->cam = INREG32(&mmuRegs->READ_CAM);
178     cr->ram = INREG32(&mmuRegs->READ_RAM);
181 /* only used for iotlb iteration in for-loop */
182 static struct cr_regs __iotlb_read_cr (VAYUIPU_HalObject * halObject,
183                                        int n)
185      struct cr_regs cr;
186      struct iotlb_lock l;
187      iotlb_getLock(halObject, &l);
188      l.vict = n;
189      iotlb_setLock(halObject, &l);
190      omap5_tlb_read_cr(halObject, &cr);
191      return cr;
194 #define for_each_iotlb_cr(n, __i, cr)                \
195     for (__i = 0;                            \
196          (__i < (n)) && (cr = __iotlb_read_cr(halObject, __i), TRUE);    \
197          __i++)
199 static Int save_tlbs (VAYUIPU_HalObject * halObject, UINT32 procId)
201     Int i =0;
202     struct cr_regs cr_tmp;
203     struct iotlb_lock l;
205     iotlb_getLock(halObject, &l);
207     halObject->mmuObj.nrTlbs = l.base;
208     for_each_iotlb_cr(halObject->mmuObj.nrTlbs, i, cr_tmp) {
209         iotlb_cr_to_e(&cr_tmp, &halObject->mmuObj.tlbs[i]);
210     }
212     return 0;
216 static Int restore_tlbs (VAYUIPU_HalObject * halObject, UInt32 procId)
218     Int i = 0;
219     Int status = -1;
220     struct iotlb_lock save;
222     /* Reset the base and victim values */
223     save.base = 0;
224     save.vict = 0;
225     iotlb_setLock(halObject, &save);
227     for (i = 0; i < halObject->mmuObj.nrTlbs; i++) {
228         status = load_iotlb_entry(halObject, &halObject->mmuObj.tlbs[i]);
229         if (status < 0) {
230             GT_setFailureReason (curTrace,
231                                  GT_4CLASS,
232                                  "restore_tlbs",
233                                  status,
234                                  "Error restoring the tlbs");
235             goto err;
236         }
237     }
239     return 0;
241 err:
242     return status;
245 static Int save_mmu_regs (VAYUIPU_HalObject * halObject, UInt32 procId)
247     UInt32 i = 0;
249     if (halObject == NULL) {
250         GT_setFailureReason (curTrace,
251                              GT_4CLASS,
252                              "save_mmu_regs",
253                              -ENOMEM,
254                              "halObject is NULL");
255         return -ENOMEM;
256     }
258     if (halObject->mmuBase == 0) {
259         GT_setFailureReason (curTrace,
260                              GT_4CLASS,
261                              "save_mmu_regs",
262                              -ENOMEM,
263                              "halObject->mmuBase is 0");
264         return -ENOMEM;
265     }
267     for (i = 0; i < MMU_REGS_SIZE; i++) {
268         halObject->mmuObj.mmuRegs[i] = INREG32(halObject->mmuBase + (i * 4));
269     }
271     return 0;
274 static Int restore_mmu_regs (VAYUIPU_HalObject * halObject,
275                              UInt32 procId)
277     UInt32 i = 0;
279     if (halObject == NULL) {
280         GT_setFailureReason (curTrace,
281                              GT_4CLASS,
282                              "restore_mmu_regs",
283                              -ENOMEM,
284                              "halObject is NULL");
285         return -ENOMEM;
286     }
288     if (halObject->mmuBase == 0) {
289         GT_setFailureReason (curTrace,
290                              GT_4CLASS,
291                              "restore_mmu_regs",
292                              -ENOMEM,
293                              "halObject->mmuBase is 0");
294         return -ENOMEM;
295     }
297     for (i = 0; i < MMU_REGS_SIZE; i++) {
298         OUTREG32(halObject->mmuBase + (i * 4), halObject->mmuObj.mmuRegs[i]);
299     }
301     return 0;
304 Int save_ipucore0_mmu_ctxt (VAYUIPU_HalObject * halObject, UInt32 procId)
306     Int status = -1;
308     status = save_mmu_regs(halObject, procId);
309     if (status < 0) {
310         GT_setFailureReason (curTrace,
311                              GT_4CLASS,
312                              "save_ipucore0_mmu_ctxt",
313                              status,
314                              "Unable to save MMU Regs");
315         return status;
316     }
318     status = save_tlbs(halObject, procId);
319     if (status < 0) {
320         GT_setFailureReason (curTrace,
321                              GT_4CLASS,
322                              "save_ipucore0_mmu_ctxt",
323                              status,
324                              "Unable to save TLBs");
325         return status;
326     }
327     return status;
331 Int restore_ipucore0_mmu_ctxt (VAYUIPU_HalObject * halObject,
332                                UInt32 procId)
334     Int status = -1;
336     status = restore_mmu_regs(halObject, procId);
337     if (status < 0) {
338         GT_setFailureReason (curTrace,
339                              GT_4CLASS,
340                              "restore_ipucore0_mmu_ctxt",
341                              status,
342                              "Unable to restore MMU Regs");
343         return status;
344     }
346     status = restore_tlbs(halObject, procId);
347     if (status < 0) {
348         GT_setFailureReason (curTrace,
349                              GT_4CLASS,
350                              "restore_ipucore0_mmu_ctxt",
351                              status,
352                              "Unable to restore TLBS");
353         return status;
354     }
356     return status;
360  /*=========================================
361  * Decides a TLB entry size
362  *
363  */
364 static Int get_mmu_entry_size (UInt32 pa, UInt32 size, enum pagetype *size_tlb,
365                                UInt32 *entry_size)
367     Int     status = 0;
368     Bool    page_align_4kb  = false;
369     Bool    page_align_64kb = false;
370     Bool    page_align_1mb = false;
371     Bool    page_align_16mb = false;
372     UInt32  phys_addr = pa;
375     /*  First check the page alignment*/
376     if ((phys_addr % PAGE_SIZE_4KB)  == 0)
377         page_align_4kb  = true;
378     if ((phys_addr % PAGE_SIZE_64KB) == 0)
379         page_align_64kb = true;
380     if ((phys_addr % PAGE_SIZE_1MB)  == 0)
381         page_align_1mb  = true;
382     if ((phys_addr % PAGE_SIZE_16MB)  == 0)
383         page_align_16mb  = true;
385     if ((!page_align_64kb) && (!page_align_1mb)  && (!page_align_4kb)) {
386         status = -EINVAL;
387         GT_setFailureReason (curTrace,
388                              GT_4CLASS,
389                              "get_mmu_entry_size",
390                              status,
391                              "phys_addr is not properly aligned");
392         goto error_exit;
393     }
395     /*  Now decide the entry size */
396     if (size >= PAGE_SIZE_16MB) {
397         if (page_align_16mb) {
398             *size_tlb   = SUPER_SECTION;
399             *entry_size = PAGE_SIZE_16MB;
400         } else if (page_align_1mb) {
401             *size_tlb   = SECTION;
402             *entry_size = PAGE_SIZE_1MB;
403         } else if (page_align_64kb) {
404             *size_tlb   = LARGE_PAGE;
405             *entry_size = PAGE_SIZE_64KB;
406         } else if (page_align_4kb) {
407             *size_tlb   = SMALL_PAGE;
408             *entry_size = PAGE_SIZE_4KB;
409         } else {
410             status = -EINVAL;
411             GT_setFailureReason (curTrace,
412                                  GT_4CLASS,
413                                  "get_mmu_entry_size",
414                                  status,
415                                  "size and alignment are invalid");
416             goto error_exit;
417         }
418     } else if (size >= PAGE_SIZE_1MB && size < PAGE_SIZE_16MB) {
419         if (page_align_1mb) {
420             *size_tlb   = SECTION;
421             *entry_size = PAGE_SIZE_1MB;
422         } else if (page_align_64kb) {
423             *size_tlb   = LARGE_PAGE;
424             *entry_size = PAGE_SIZE_64KB;
425         } else if (page_align_4kb) {
426             *size_tlb   = SMALL_PAGE;
427             *entry_size = PAGE_SIZE_4KB;
428         } else {
429             status = -EINVAL;
430             GT_setFailureReason (curTrace,
431                                  GT_4CLASS,
432                                  "get_mmu_entry_size",
433                                  status,
434                                  "size and alignment are invalid");
435             goto error_exit;
436         }
437     } else if (size > PAGE_SIZE_4KB && size < PAGE_SIZE_1MB) {
438         if (page_align_64kb) {
439             *size_tlb   = LARGE_PAGE;
440             *entry_size = PAGE_SIZE_64KB;
441         } else if (page_align_4kb) {
442             *size_tlb   = SMALL_PAGE;
443             *entry_size = PAGE_SIZE_4KB;
444         } else {
445             status = -EINVAL;
446             GT_setFailureReason (curTrace,
447                                  GT_4CLASS,
448                                  "get_mmu_entry_size",
449                                  status,
450                                  "size and alignment are invalid");
451             goto error_exit;
452         }
453     } else if (size == PAGE_SIZE_4KB) {
454         if (page_align_4kb) {
455             *size_tlb   = SMALL_PAGE;
456             *entry_size = PAGE_SIZE_4KB;
457         } else {
458             status = -EINVAL;
459             GT_setFailureReason (curTrace,
460                                  GT_4CLASS,
461                                  "get_mmu_entry_size",
462                                  status,
463                                  "size and alignment are invalid");
464             goto error_exit;
465         }
466     } else {
467         status = -EINVAL;
468         GT_setFailureReason (curTrace,
469                              GT_4CLASS,
470                              "get_mmu_entry_size",
471                              status,
472                              "size is invalid");
473         goto error_exit;
474     }
475     return 0;
477 error_exit:
478     return status;
481 /*
482  * Note: Leaving add_dsp_mmu_entry here, but commented out, so that it is
483  * available in the future if static tlbs are needed to be added outside
484  * of the translation table for faster access.
485  */
486 #if 0
487 /*=========================================
488  * Add DSP MMU entries corresponding to given MPU-Physical address
489  * and DSP-virtual address
490  */
491 static Int add_dsp_mmu_entry (VAYUIPU_HalObject * halObject,
492                               UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
494     UInt32 mapped_size = 0;
495     enum pagetype size_tlb = SECTION;
496     UInt32 entry_size = 0;
497     int status = 0;
498     struct iotlb_entry tlb_entry;
499     int retval = 0;
501     while ((mapped_size < size) && (status == 0)) {
502         status = get_mmu_entry_size(*phys_addr, (size - mapped_size),
503                                     &size_tlb, &entry_size);
504         if (status < 0) {
505             GT_setFailureReason (curTrace,
506                                  GT_4CLASS,
507                                  "add_dsp_mmu_entry",
508                                  status,
509                                  "get_mmu_entry_size failed");
510             goto error_exit;
511         }
513         if (size_tlb == SUPER_SECTION)
514             tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
516         else if (size_tlb == SECTION)
517             tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
519         else if (size_tlb == LARGE_PAGE)
520             tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
522         else if (size_tlb == SMALL_PAGE)
523             tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
525         tlb_entry.elsz = MMU_RAM_ELSZ_16;
526         tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
527         tlb_entry.mixed = MMU_RAM_MIXED;
528         tlb_entry.prsvd = MMU_CAM_P;
529         tlb_entry.valid = MMU_CAM_V;
531         tlb_entry.da = *dsp_addr;
532         tlb_entry.pa = *phys_addr;
533         retval = load_iotlb_entry(halObject, &tlb_entry);
534         if (retval < 0) {
535             GT_setFailureReason (curTrace,
536                                  GT_4CLASS,
537                                  "add_dsp_mmu_entry",
538                                  retval,
539                                  "load_iotlb_entry failed");
540             goto error_exit;
541         }
542         mapped_size  += entry_size;
543         *phys_addr   += entry_size;
544         *dsp_addr   += entry_size;
545     }
547     return 0;
549 error_exit:
550     printf("pte set failure retval = 0x%x, status = 0x%x \n",
551                             retval, status);
553     return retval;
555 #endif
557 static Int add_entry_ext (VAYUIPU_HalObject * halObject,
558                           UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
560     UInt32 mapped_size = 0;
561     enum pagetype     size_tlb = SECTION;
562     UInt32 entry_size = 0;
563     Int status = 0;
564     UInt32 page_size = HW_PAGE_SIZE_1MB;
565     UInt32 flags = 0;
567     flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
568                     DSP_MAPPHYSICALADDR);
569     while ((mapped_size < size) && (status == 0)) {
571         /*  get_mmu_entry_size fills the size_tlb and entry_size
572         based on alignment and size of memory to map
573         to DSP - size */
574         status = get_mmu_entry_size (*phys_addr,
575                                      (size - mapped_size),
576                                      &size_tlb,
577                                      &entry_size);
578         if (status < 0) {
579             GT_setFailureReason (curTrace,
580                                  GT_4CLASS,
581                                  "add_entry_ext",
582                                  status,
583                                  "get_mmu_entry_size failed");
584             break;
585         }
586         else {
587             if (size_tlb == SUPER_SECTION)
588                 page_size = HW_PAGE_SIZE_16MB;
589             else if (size_tlb == SECTION)
590                 page_size = HW_PAGE_SIZE_1MB;
591             else if (size_tlb == LARGE_PAGE)
592                 page_size = HW_PAGE_SIZE_64KB;
593             else if (size_tlb == SMALL_PAGE)
594                 page_size = HW_PAGE_SIZE_4KB;
596             if (status == 0) {
597                 status = rproc_mem_map (halObject,
598                                         *phys_addr,
599                                         *dsp_addr,
600                                         page_size,
601                                         flags);
602                 if (status < 0) {
603                     GT_setFailureReason (curTrace,
604                                          GT_4CLASS,
605                                          "add_entry_ext",
606                                          status,
607                                          "benelli_mem_map failed");
608                     break;
609                 }
610                 mapped_size  += entry_size;
611                 *phys_addr   += entry_size;
612                 *dsp_addr   += entry_size;
613             }
614         }
615     }
616     return status;
619 static Int __dump_tlb_entries (VAYUIPU_HalObject * halObject,
620                                struct cr_regs *crs, int num)
622     int i;
623     struct iotlb_lock saved;
624     struct cr_regs tmp;
625     struct cr_regs *p = crs;
627     iotlb_getLock(halObject, &saved);
628     for_each_iotlb_cr(num, i, tmp) {
629         if (!iotlb_cr_valid(&tmp))
630             continue;
631         *p++ = tmp;
632     }
633     iotlb_setLock(halObject, &saved);
634     return  p - crs;
637 UInt32 get_IpuCore0VirtAdd(VAYUIPU_HalObject * halObject, UInt32 physAdd)
639     int i, num;
640     struct cr_regs *cr;
641     struct cr_regs *p = NULL;
642     //DWORD dwPhys;
643     UInt32 lRetVal = 0;
644     num = 32;
645     if(shm_phys_addr == 0)
646         return 0;
647     cr = mmap(NULL,
648               sizeof(struct cr_regs) * num,
649               PROT_NOCACHE | PROT_READ | PROT_WRITE,
650               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
651               NOFD,
652               0);
653     if (cr == MAP_FAILED)
654     {
655         return NULL;
656     }
658     memset(cr, 0, sizeof(struct cr_regs) * num);
660     num = __dump_tlb_entries(halObject, cr, num);
661     for (i = 0; i < num; i++)
662     {
663         p = cr + i;
664         if(physAdd >= (p->ram & 0xFFFFF000) &&  physAdd < ((p + 1)->ram & 0xFFFFF000))
665         {
666             lRetVal = ((p->cam & 0xFFFFF000) + (physAdd - (p->ram & 0xFFFFF000)));
667         }
668     }
669     munmap(cr, sizeof(struct cr_regs) * num);
671     return lRetVal;
675 /**
676  * dump_tlb_entries - dump cr arrays to given buffer
677  * @obj:    target iommu
678  * @buf:    output buffer
679  **/
680 static UInt32 dump_tlb_entries (VAYUIPU_HalObject * halObject,
681                                 char *buf, UInt32 bytes)
683     Int i, num;
684     struct cr_regs *cr;
685     Char *p = buf;
687     num = bytes / sizeof(*cr);
688     num = min(32, num);
689     cr = mmap(NULL,
690             sizeof(struct cr_regs) * num,
691               PROT_NOCACHE | PROT_READ | PROT_WRITE,
692               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
693               NOFD,
694               0);
695     if (!cr)
696     {
697         return NULL;
699     }
700     memset(cr, 0, sizeof(struct cr_regs) * num);
702     num = __dump_tlb_entries(halObject, cr, num);
703     for (i = 0; i < num; i++)
704         p += iotlb_dump_cr(cr + i, p);
705     munmap(cr, sizeof(struct cr_regs) * num);
706     return p - buf;
710 static Void rproc_tlb_dump (VAYUIPU_HalObject * halObject)
712     Char *p;
714     p = mmap(NULL,
715              1000,
716              PROT_NOCACHE | PROT_READ | PROT_WRITE,
717              MAP_ANON | MAP_PHYS | MAP_PRIVATE,
718              NOFD,
719              0);
720     if (MAP_FAILED != p)
721     {
722         dump_tlb_entries(halObject, p, 1000);
723         munmap(p, 1000);
724     }
726     return;
730 /*================================
731  * Initialize the IPU MMU.
732  *===============================*/
734 static Int rproc_mmu_init (VAYUIPU_HalObject * halObject,
735                            ProcMgr_AddrInfo * memEntries,
736                            UInt32 numMemEntries)
738     Int ret_val = 0;
739     UInt32 phys_addr = 0;
740     UInt32 i = 0;
741     UInt32 virt_addr = 0;
742     VAYUIpu_MMURegs * mmuRegs = NULL;
744     if (halObject == NULL) {
745         ret_val = -ENOMEM;
746         GT_setFailureReason (curTrace,
747                              GT_4CLASS,
748                              "rproc_mmu_init",
749                              ret_val,
750                              "halObject is NULL");
751         goto error_exit;
752     }
754     if (halObject->mmuBase == 0) {
755         ret_val = -ENOMEM;
756         GT_setFailureReason (curTrace,
757                              GT_4CLASS,
758                              "rproc_mmu_init",
759                              ret_val,
760                              "halObject->mmuBase is 0");
761         goto error_exit;
762     }
763     mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
765     /*  Disable the MMU & TWL */
766     hw_mmu_disable(halObject->mmuBase);
767     hw_mmu_twl_disable(halObject->mmuBase);
769     printf("  Programming IPU memory regions\n");
770     printf("=========================================\n");
772     for (i = 0; i < numMemEntries; i++) {
773         phys_addr = memEntries[i].addr[ProcMgr_AddrType_MasterPhys];
774         if (phys_addr == (UInt32)(-1) || phys_addr == 0) {
775             GT_setFailureReason (curTrace,
776                                  GT_4CLASS,
777                                  "benelli_mmu_init",
778                                  ret_val,
779                                  "phys_addr is invalid");
780             goto error_exit;
781         }
782         printf( "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
783                 memEntries[i].addr[ProcMgr_AddrType_SlaveVirt],
784                 memEntries[i].size,
785                 (unsigned int)phys_addr);
787         /* VAYU SDC code */
788         /* Adjust below logic if using cacheable shared memory */
789         shm_phys_addr = 1;
790         virt_addr = memEntries[i].addr[ProcMgr_AddrType_SlaveVirt];
792         ret_val = add_entry_ext(halObject, &phys_addr, &virt_addr,
793                                     (memEntries[i].size));
794         if (ret_val < 0) {
795             GT_setFailureReason (curTrace,
796                                  GT_4CLASS,
797                                  "benelli_mmu_init",
798                                  ret_val,
799                                  "add_dsp_mmu_entry failed");
800             goto error_exit;
801         }
802     }
804     /* Set the TTB to point to the L1 page table's physical address */
805     OUTREG32(&mmuRegs->TTB,
806            ((struct pg_table_attrs *)(halObject->mmuObj.pPtAttrs))->l1_base_pa);
808     /* Enable the TWL */
809     hw_mmu_twl_enable(halObject->mmuBase);
811     hw_mmu_enable(halObject->mmuBase);
813     rproc_tlb_dump(halObject);
815     return 0;
816 error_exit:
817     return ret_val;
821 /****************************************************
823 *  Function which sets the TWL of the remote core
826 *****************************************************/
828 static Int rproc_set_twl (VAYUIPU_HalObject * halObject, Bool on)
830     Int status = 0;
831     VAYUIpu_MMURegs * mmuRegs = NULL;
832     ULONG reg;
834     if (halObject == NULL) {
835         status = -ENOMEM;
836         GT_setFailureReason (curTrace,
837                              GT_4CLASS,
838                              "benelli_set_twl",
839                              status,
840                              "halObject is NULL");
841     }
842     else if (halObject->mmuBase == 0) {
843         status = -ENOMEM;
844         GT_setFailureReason (curTrace,
845                              GT_4CLASS,
846                              "benelli_set_twl",
847                              status,
848                              "halObject->mmuBase is NULL");
849     }
850     else {
851         mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
853         /* Setting MMU to Smart Idle Mode */
854         reg = INREG32(&mmuRegs->SYSCONFIG);
855         reg &= ~MMU_SYS_IDLE_MASK;
856         reg |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
857         OUTREG32(&mmuRegs->SYSCONFIG, reg);
859         /* Enabling MMU */
860         reg =  INREG32(&mmuRegs->CNTL);
862         if (on)
863             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TWL_MASK);
864         else
865             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TLB_MISS_MASK);
867         reg &= ~MMU_CNTL_MASK;
868         if (on)
869             reg |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
870         else
871             reg |= (MMU_CNTL_MMU_EN);
873         OUTREG32(&mmuRegs->CNTL, reg);
874     }
876     return status;
880 /*========================================
881  * This sets up the IPU processor MMU Page tables
882  *
883  */
884 static struct pg_table_attrs * init_mmu_page_attribs (UInt32 l1_size,
885                                                       UInt32 l1_allign,
886                                                       UInt32 ls_num_of_pages)
888     struct pg_table_attrs * p_pt_attrs = NULL;
889     UInt32 pg_tbl_pa = 0;
890     off64_t offset = 0;
891     UInt32 pg_tbl_va = 0;
892     UInt32 align_size = 0;
893     UInt32 len = 0;
894     int status = 0;
896     p_pt_attrs = Memory_alloc (NULL, sizeof(struct pg_table_attrs), 0, NULL);
897     if (p_pt_attrs)
898         Memory_set (p_pt_attrs, 0, sizeof(struct pg_table_attrs));
899     else {
900         status = -ENOMEM;
901         GT_setFailureReason (curTrace,
902                              GT_4CLASS,
903                              "init_mmu_page_attribs",
904                              status,
905                              "Memory_alloc failed");
906         goto error_exit;
907     }
909     p_pt_attrs->l1_size = l1_size;
910     align_size = p_pt_attrs->l1_size;
911     p_pt_attrs->l1_tbl_alloc_sz = 0x100000;
912     /* Align sizes are expected to be power of 2 */
913     /* we like to get aligned on L1 table size */
914     pg_tbl_va = (UInt32) mmap64 (NULL,
915                                  p_pt_attrs->l1_tbl_alloc_sz,
916                                  PROT_NOCACHE | PROT_READ | PROT_WRITE,
917                                  MAP_ANON | MAP_PHYS | MAP_PRIVATE,
918                                  NOFD,
919                                  0x0);
920     if (pg_tbl_va == (UInt32)MAP_FAILED) {
921         pg_tbl_va = 0;
922         status = -ENOMEM;
923         GT_setFailureReason (curTrace,
924                              GT_4CLASS,
925                              "init_mmu_page_attribs",
926                              status,
927                              "mmap64 failed");
928         goto error_exit;
929     }
930     else {
931         /* Make sure the memory is contiguous */
932         status = mem_offset64 ((void *)pg_tbl_va, NOFD,
933                                p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
934         pg_tbl_pa = (UInt32)offset;
935         if (len != p_pt_attrs->l1_tbl_alloc_sz) {
936             status = -ENOMEM;
937             GT_setFailureReason (curTrace,
938                                  GT_4CLASS,
939                                  "init_mmu_page_attribs",
940                                  status,
941                                  "phys mem is not contiguous");
942         }
943         if (status != 0) {
944             GT_setFailureReason (curTrace,
945                                  GT_4CLASS,
946                                  "init_mmu_page_attribs",
947                                  status,
948                                  "mem_offset64 failed");
949             goto error_exit;
950         }
951     }
952     /* Check if the PA is aligned for us */
953     if ((pg_tbl_pa) & (align_size-1)) {
954         /* PA not aligned to page table size ,*/
955         /* try with more allocation and align */
956         munmap((void *)pg_tbl_va, p_pt_attrs->l1_tbl_alloc_sz);
957         p_pt_attrs->l1_tbl_alloc_sz = p_pt_attrs->l1_tbl_alloc_sz*2;
958         /* we like to get aligned on L1 table size */
959         pg_tbl_va = (UInt32) mmap64 (NULL,
960                                      p_pt_attrs->l1_tbl_alloc_sz,
961                                      PROT_NOCACHE | PROT_READ | PROT_WRITE,
962                                      MAP_ANON | MAP_PHYS | MAP_PRIVATE,
963                                      NOFD,
964                                      0);
965         if (pg_tbl_va == (UInt32)MAP_FAILED) {
966             pg_tbl_va = 0;
967             status = -ENOMEM;
968             GT_setFailureReason (curTrace,
969                                  GT_4CLASS,
970                                  "init_mmu_page_attribs",
971                                  status,
972                                  "mmap64 failed");
973             goto error_exit;
974         }
975         else {
976             /* Make sure the memory is contiguous */
977             status = mem_offset64 ((void *)pg_tbl_va, NOFD,
978                                    p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
979             pg_tbl_pa = (UInt32)offset;
980             if (len != p_pt_attrs->l1_tbl_alloc_sz) {
981                 status = -ENOMEM;
982                 GT_setFailureReason (curTrace,
983                                      GT_4CLASS,
984                                      "init_mmu_page_attribs",
985                                      status,
986                                      "phys mem is not contiguous");
987             }
988             if (status != 0) {
989                 GT_setFailureReason (curTrace,
990                                      GT_4CLASS,
991                                      "init_mmu_page_attribs",
992                                      status,
993                                      "mem_offset64 failed");
994                 goto error_exit;
995             }
996         }
997         /* We should be able to get aligned table now */
998         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
999         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1000         /* Align the PA to the next 'align'  boundary */
1001         p_pt_attrs->l1_base_pa = ((pg_tbl_pa) + (align_size-1)) &
1002                             (~(align_size-1));
1003         p_pt_attrs->l1_base_va = pg_tbl_va + (p_pt_attrs->l1_base_pa -
1004                                 pg_tbl_pa);
1005     } else {
1006         /* We got aligned PA, cool */
1007         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1008         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1009         p_pt_attrs->l1_base_pa = pg_tbl_pa;
1010         p_pt_attrs->l1_base_va = pg_tbl_va;
1011     }
1013     if (p_pt_attrs->l1_base_va)
1014         memset((UInt8*)p_pt_attrs->l1_base_va, 0x00, p_pt_attrs->l1_size);
1015     p_pt_attrs->l2_num_pages = ls_num_of_pages;
1016     p_pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * p_pt_attrs->l2_num_pages;
1017     align_size = 4; /* Make it UInt32 aligned  */
1018     /* we like to get aligned on L1 table size */
1019     pg_tbl_va = p_pt_attrs->l1_base_va + 0x80000;
1020     pg_tbl_pa = p_pt_attrs->l1_base_pa + 0x80000;
1021     p_pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
1022     p_pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
1023     p_pt_attrs->ls_tbl_alloc_sz = p_pt_attrs->l2_size;
1024     p_pt_attrs->l2_base_pa = pg_tbl_pa;
1025     p_pt_attrs->l2_base_va = pg_tbl_va;
1026     if (p_pt_attrs->l2_base_va)
1027         memset((UInt8*)p_pt_attrs->l2_base_va, 0x00, p_pt_attrs->l2_size);
1029     p_pt_attrs->pg_info = Memory_alloc(NULL, sizeof(struct page_info) * p_pt_attrs->l2_num_pages, 0, NULL);
1030     if (p_pt_attrs->pg_info)
1031         Memory_set (p_pt_attrs->pg_info, 0, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1032     else {
1033         status = -ENOMEM;
1034         GT_setFailureReason (curTrace,
1035                              GT_4CLASS,
1036                              "init_mmu_page_attribs",
1037                              status,
1038                              "Memory_alloc failed");
1039         goto error_exit;
1040     }
1041     return p_pt_attrs;
1043 error_exit:
1044     if (p_pt_attrs) {
1045         if (p_pt_attrs->pg_info)
1046             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1047         if (p_pt_attrs->l1_tbl_alloc_va) {
1048             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1049                     p_pt_attrs->l1_tbl_alloc_sz);
1050         }
1051         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1052         p_pt_attrs = NULL;
1053     }
1055     return NULL;
1059 /*========================================
1060  * This destroys the IPU processor MMU Page tables
1061  *
1062  */
1063 static Void deinit_mmu_page_attribs (struct pg_table_attrs * p_pt_attrs)
1065     if (p_pt_attrs) {
1066         if (p_pt_attrs->pg_info)
1067             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info) * p_pt_attrs->l2_num_pages);
1068         if (p_pt_attrs->l1_tbl_alloc_va) {
1069             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1070                     p_pt_attrs->l1_tbl_alloc_sz);
1071         }
1072         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1073         p_pt_attrs = NULL;
1074     }
1078 /*============================================
1079  * This function calculates PTE address (MPU virtual) to be updated
1080  *  It also manages the L2 page tables
1081  */
1082 static Int pte_set (UInt32 pa, UInt32 va, UInt32 size,
1083                     struct hw_mmu_map_attrs_t *attrs, struct pg_table_attrs *pt_Table)
1085     UInt32 i;
1086     UInt32 pte_val;
1087     UInt32 pte_addr_l1;
1088     UInt32 pte_size;
1089     UInt32 pg_tbl_va; /* Base address of the PT that will be updated */
1090     UInt32 l1_base_va;
1091      /* Compiler warns that the next three variables might be used
1092      * uninitialized in this function. Doesn't seem so. Working around,
1093      * anyways.  */
1094     UInt32 l2_base_va = 0;
1095     UInt32 l2_base_pa = 0;
1096     UInt32 l2_page_num = 0;
1097     struct pg_table_attrs *pt = pt_Table;
1098     int status = 0;
1099     VAYUIPU_HalMmuEntryInfo setPteInfo;
1101     l1_base_va = pt->l1_base_va;
1102     pg_tbl_va = l1_base_va;
1104     if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
1105         /* Find whether the L1 PTE points to a valid L2 PT */
1106         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1107         if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1108             pte_val = *(UInt32 *)pte_addr_l1;
1109             pte_size = hw_mmu_pte_sizel1(pte_val);
1110         } else {
1111             return -EINVAL;
1112         }
1113         /* FIX ME */
1114         /* TODO: ADD synchronication element*/
1115         /*        sync_enter_cs(pt->hcs_object);*/
1116         if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1117             /* Get the L2 PA from the L1 PTE, and find
1118              * corresponding L2 VA */
1119             l2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1120             l2_base_va = l2_base_pa - pt->l2_base_pa +
1121             pt->l2_base_va;
1122             l2_page_num = (l2_base_pa - pt->l2_base_pa) /
1123                     HW_MMU_COARSE_PAGE_SIZE;
1124         } else if (pte_size == 0) {
1125             /* L1 PTE is invalid. Allocate a L2 PT and
1126              * point the L1 PTE to it */
1127             /* Find a free L2 PT. */
1128             for (i = 0; (i < pt->l2_num_pages) &&
1129                 (pt->pg_info[i].num_entries != 0); i++)
1130                 ;;
1131             if (i < pt->l2_num_pages) {
1132                 l2_page_num = i;
1133                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1134                        HW_MMU_COARSE_PAGE_SIZE);
1135                 l2_base_va = pt->l2_base_va + (l2_page_num *
1136                        HW_MMU_COARSE_PAGE_SIZE);
1137                 /* Endianness attributes are ignored for
1138                  * HW_MMU_COARSE_PAGE_SIZE */
1139                 status = hw_mmu_pte_set(pg_tbl_va, l2_base_pa, va,
1140                                         HW_MMU_COARSE_PAGE_SIZE, attrs);
1141             } else {
1142                 status = -ENOMEM;
1143             }
1144         } else {
1145             /* Found valid L1 PTE of another size.
1146              * Should not overwrite it. */
1147             status = -EINVAL;
1148         }
1149         if (status == 0) {
1150             pg_tbl_va = l2_base_va;
1151             if (size == HW_PAGE_SIZE_64KB)
1152                 pt->pg_info[l2_page_num].num_entries += 16;
1153             else
1154                 pt->pg_info[l2_page_num].num_entries++;
1155         }
1156     }
1157     if (status == 0) {
1158         setPteInfo.elementSize = attrs->element_size;
1159         setPteInfo.endianism = attrs->endianism;
1160         setPteInfo.masterPhyAddr = pa;
1161         setPteInfo.mixedSize = attrs->mixedSize;
1162         setPteInfo.size = size;
1163         setPteInfo.slaveVirtAddr = va;
1164         status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1165         if (status == RET_OK)
1166             status = 0;
1167     }
1168     return status;
1172 /*=============================================
1173  * This function calculates the optimum page-aligned addresses and sizes
1174  * Caller must pass page-aligned values
1175  */
1176 static Int pte_update (UInt32 pa, UInt32 va, UInt32 size,
1177                        struct hw_mmu_map_attrs_t *map_attrs, struct pg_table_attrs *pt_Table)
1179     UInt32 i;
1180     UInt32 all_bits;
1181     UInt32 pa_curr = pa;
1182     UInt32 va_curr = va;
1183     UInt32 num_bytes = size;
1184     Int status = 0;
1185     UInt32 pg_size[] = {HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB,
1186                HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB};
1187     while (num_bytes && (status == 0)) {
1188         /* To find the max. page size with which both PA & VA are
1189          * aligned */
1190         all_bits = pa_curr | va_curr;
1191         for (i = 0; i < 4; i++) {
1192             if ((num_bytes >= pg_size[i]) && ((all_bits &
1193                (pg_size[i] - 1)) == 0)) {
1194                 status = pte_set(pa_curr,
1195                     va_curr, pg_size[i], map_attrs, pt_Table);
1196                 pa_curr += pg_size[i];
1197                 va_curr += pg_size[i];
1198                 num_bytes -= pg_size[i];
1199                  /* Don't try smaller sizes. Hopefully we have
1200                  * reached an address aligned to a bigger page
1201                  * size */
1202                 break;
1203             }
1204         }
1205     }
1206     return status;
1210 /*============================================
1211  * This function maps MPU buffer to the DSP address space. It performs
1212 * linear to physical address translation if required. It translates each
1213 * page since linear addresses can be physically non-contiguous
1214 * All address & size arguments are assumed to be page aligned (in proc.c)
1215  *
1216  */
1217 static Int rproc_mem_map (VAYUIPU_HalObject * halObject,
1218                           UInt32 mpu_addr, UInt32 ul_virt_addr,
1219                           UInt32 num_bytes, UInt32 map_attr)
1221     UInt32 attrs;
1222     Int status = 0;
1223     struct hw_mmu_map_attrs_t hw_attrs;
1224     Int pg_i = 0;
1226     if (halObject == NULL) {
1227         status = -ENOMEM;
1228         GT_setFailureReason (curTrace,
1229                              GT_4CLASS,
1230                              "benelli_mem_map",
1231                              status,
1232                              "halObject is NULL");
1233     }
1234     else if (halObject->mmuBase == 0) {
1235         status = -ENOMEM;
1236         GT_setFailureReason (curTrace,
1237                              GT_4CLASS,
1238                              "benelli_mem_map",
1239                              status,
1240                              "halObject->mmuBase is 0");
1241     }
1242     else if (num_bytes == 0) {
1243         status = -EINVAL;
1244         GT_setFailureReason (curTrace,
1245                              GT_4CLASS,
1246                              "benelli_mem_map",
1247                              status,
1248                              "num_bytes is 0");
1249     }
1250     else {
1251         if (map_attr != 0) {
1252             attrs = map_attr;
1253             attrs |= DSP_MAPELEMSIZE32;
1254         } else {
1255             /* Assign default attributes */
1256             attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE32;
1257         }
1258         /* Take mapping properties */
1259         if (attrs & DSP_MAPBIGENDIAN)
1260             hw_attrs.endianism = HW_BIG_ENDIAN;
1261         else
1262             hw_attrs.endianism = HW_LITTLE_ENDIAN;
1264         hw_attrs.mixedSize = (enum hw_mmu_mixed_size_t)
1265                      ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1266         /* Ignore element_size if mixedSize is enabled */
1267         if (hw_attrs.mixedSize == 0) {
1268             if (attrs & DSP_MAPELEMSIZE8) {
1269                 /* Size is 8 bit */
1270                 hw_attrs.element_size = HW_ELEM_SIZE_8BIT;
1271             } else if (attrs & DSP_MAPELEMSIZE16) {
1272                 /* Size is 16 bit */
1273                 hw_attrs.element_size = HW_ELEM_SIZE_16BIT;
1274             } else if (attrs & DSP_MAPELEMSIZE32) {
1275                 /* Size is 32 bit */
1276                 hw_attrs.element_size = HW_ELEM_SIZE_32BIT;
1277             } else if (attrs & DSP_MAPELEMSIZE64) {
1278                 /* Size is 64 bit */
1279                 hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
1280             } else {
1281                 /* Mixedsize isn't enabled, so size can't be
1282                  * zero here */
1283                 status = -EINVAL;
1284                 GT_setFailureReason (curTrace,
1285                                      GT_4CLASS,
1286                                      "benelli_mem_map",
1287                                      status,
1288                                      "MMU element size is zero");
1289             }
1290         }
1292         if (status >= 0) {
1293             /*
1294              * Do OS-specific user-va to pa translation.
1295              * Combine physically contiguous regions to reduce TLBs.
1296              * Pass the translated pa to PteUpdate.
1297              */
1298             if ((attrs & DSP_MAPPHYSICALADDR)) {
1299                 status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
1300                            &hw_attrs,
1301                            (struct pg_table_attrs *)halObject->mmuObj.pPtAttrs);
1302             }
1304             /* Don't propogate Linux or HW status to upper layers */
1305             if (status < 0) {
1306                 /*
1307                  * Roll out the mapped pages incase it failed in middle of
1308                  * mapping
1309                  */
1310                 if (pg_i)
1311                     rproc_mem_unmap(halObject, ul_virt_addr,
1312                                     (pg_i * PAGE_SIZE));
1313             }
1315             /* In any case, flush the TLB
1316              * This is called from here instead from pte_update to avoid
1317              * unnecessary repetition while mapping non-contiguous physical
1318              * regions of a virtual region */
1319             hw_mmu_tlb_flushAll(halObject->mmuBase);
1320         }
1321     }
1322     return status;
1325 /*
1326  *  ======== rproc_mem_lookup ========
1327  *  Look up the physical address of a virtual address based on PTEs
1328  */
1329 Int rproc_mem_lookup(VAYUIPU_HalObject * halObject,
1330     UInt32 da, UInt32 * pAddr)
1332     UInt32 L1_base_va = 0;
1333     UInt32 L2_base_va = 0;
1334     UInt32 L2_base_pa;
1335     UInt32 pte_val;
1336     UInt32 pte_size;
1337     UInt32 pte_addr_l1;
1338     UInt32 pte_addr_l2 = 0;
1339     UInt32 vaCurr;
1340     Int status = 0;
1341     VAYUIpu_MMURegs * mmuRegs;
1342     UInt32 tableBaseAddr = 0;
1344     if (halObject == NULL) {
1345         status = -ENOMEM;
1346         GT_setFailureReason (curTrace,
1347                              GT_4CLASS,
1348                              "rproc_mem_lookup",
1349                              status,
1350                              "halObject is NULL");
1351     }
1352     else if (halObject->mmuBase == 0) {
1353         status = -ENOMEM;
1354         GT_setFailureReason (curTrace,
1355                              GT_4CLASS,
1356                              "rproc_mem_lookup",
1357                              status,
1358                              "halObject->mmuBase is 0");
1359     }
1360     else {
1361         /* Retrieve the L1 page table's physical address from TTB */
1362         mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
1363         tableBaseAddr = INREG32(&mmuRegs->TTB);
1364         vaCurr = da;
1366         /* Temporarily map to virtual address space */
1367         L1_base_va = (UInt32) mmap(NULL,
1368                     TRANSLATION_TABLE_SIZE,
1369                     PROT_NOCACHE | PROT_READ | PROT_WRITE,
1370                     MAP_PHYS | MAP_PRIVATE,
1371                     NOFD,
1372                     (off_t)tableBaseAddr);
1373         if (L1_base_va == (UInt32)MAP_FAILED) {
1374             status = -ENOMEM;
1375             GT_setFailureReason (curTrace,
1376                 GT_4CLASS,
1377                 "rproc_mem_lookup",
1378                 status,
1379                 "Memory map failed.");
1380                 goto EXIT_LOOP;
1381         }
1383         /* Lookup entry in L1 page table */
1384         pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1385         pte_val = *(UInt32 *)pte_addr_l1;
1386         pte_size = hw_mmu_pte_sizel1(pte_val);
1388         if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1389             /*
1390              * Get the L2 PA from the L1 PTE, and find
1391              * corresponding L2 VA
1392              */
1393             L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1395             /* Temporarily map to virtual address space */
1396             L2_base_va = (UInt32)mmap(NULL, HW_MMU_COARSE_PAGE_SIZE,
1397                 PROT_NOCACHE | PROT_READ | PROT_WRITE,
1398                 MAP_PHYS | MAP_PRIVATE,
1399                 NOFD,
1400                 (off_t)L2_base_pa);
1401             if (L2_base_va == (UInt32)MAP_FAILED) {
1402                 status = -ENOMEM;
1403                 GT_setFailureReason (curTrace,
1404                          GT_4CLASS,
1405                          "rproc_mem_lookup",
1406                          status,
1407                          "Memory map failed.");
1408                 goto EXIT_LOOP;
1409             }
1411             /*
1412              * Find the L2 PTE address from which we will start
1413              * clearing, the number of PTEs to be cleared on this
1414              * page, and the size of VA space that needs to be
1415              * cleared on this L2 page
1416              */
1417             pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
1418             /*
1419              * Unmap the VA space on this L2 PT. A quicker way
1420              * would be to clear pte_count entries starting from
1421              * pte_addr_l2. However, below code checks that we don't
1422              * clear invalid entries or less than 64KB for a 64KB
1423              * entry. Similar checking is done for L1 PTEs too
1424              * below
1425              */
1426             pte_val = *(UInt32 *)pte_addr_l2;
1427             pte_size = hw_mmu_pte_sizel2(pte_val);
1428             /* vaCurr aligned to pte_size? */
1429             if (pte_size != 0) {
1430                 /* Obtain Physical address from VA */
1431                 *pAddr = (pte_val & ~(pte_size - 1));
1432                 *pAddr += (vaCurr & (pte_size - 1));
1433             }
1434             else {
1435                 /* Error. Not found */
1436                 *pAddr = 0;
1437                 status = -EFAULT;
1438             }
1439         }
1440         else if (pte_size != 0) {
1441             /* pte_size = 1 MB or 16 MB */
1442             /* entry is in L1 page table */
1443             *pAddr = (pte_val & ~(pte_size - 1));
1444             *pAddr += (vaCurr & (pte_size - 1));
1445         }
1446         else {
1447             /* Not found */
1448             *pAddr = 0;
1449             status = -EFAULT;
1450         }
1451     }
1453 EXIT_LOOP:
1455     if ((L2_base_va != 0) && (L2_base_va != (UInt32)MAP_FAILED)) {
1456         munmap((void *)L2_base_va, HW_MMU_COARSE_PAGE_SIZE);
1457     }
1459     if ((L1_base_va != 0) && (L1_base_va != (UInt32)MAP_FAILED)) {
1460         munmap((void *)L1_base_va, TRANSLATION_TABLE_SIZE);
1461     }
1463     return status;
1467 /*
1468  *  ======== benelli_mem_unmap ========
1469  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1470  *
1471  *      PTEs of a mapped memory block are contiguous in any page table
1472  *      So, instead of looking up the PTE address for every 4K block,
1473  *      we clear consecutive PTEs until we unmap all the bytes
1474  */
1475 static Int rproc_mem_unmap (VAYUIPU_HalObject * halObject,
1476                             UInt32 da, UInt32 num_bytes)
1478     UInt32 L1_base_va;
1479     UInt32 L2_base_va;
1480     UInt32 L2_base_pa;
1481     UInt32 L2_page_num;
1482     UInt32 pte_val;
1483     UInt32 pte_size;
1484     UInt32 pte_count;
1485     UInt32 pte_addr_l1;
1486     UInt32 pte_addr_l2 = 0;
1487     UInt32 rem_bytes;
1488     UInt32 rem_bytes_l2;
1489     UInt32 vaCurr;
1490     Int status = 0;
1491     UInt32 temp;
1492     UInt32 pAddr;
1493     UInt32 numof4Kpages = 0;
1494     struct pg_table_attrs * p_pt_attrs = NULL;
1496     if (halObject == NULL) {
1497         status = -ENOMEM;
1498         GT_setFailureReason (curTrace,
1499                              GT_4CLASS,
1500                              "rproc_mem_unmap",
1501                              status,
1502                              "halObject is NULL");
1503     }
1504     else if (halObject->mmuBase == 0) {
1505         status = -ENOMEM;
1506         GT_setFailureReason (curTrace,
1507                              GT_4CLASS,
1508                              "rproc_mem_unmap",
1509                              status,
1510                              "halObject->mmuBase is 0");
1511     }
1512     else if (halObject->mmuObj.pPtAttrs == NULL) {
1513         status = -ENOMEM;
1514         GT_setFailureReason (curTrace,
1515                              GT_4CLASS,
1516                              "rproc_mem_unmap",
1517                              status,
1518                              "halObject->mmuObj.pPtAttrs is 0");
1519     }
1520     else {
1521         p_pt_attrs = (struct pg_table_attrs *)halObject->mmuObj.pPtAttrs;
1522         vaCurr = da;
1523         rem_bytes = num_bytes;
1524         rem_bytes_l2 = 0;
1525         L1_base_va = p_pt_attrs->l1_base_va;
1526         pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1527         while (rem_bytes) {
1528             UInt32 vaCurrOrig = vaCurr;
1529             /* Find whether the L1 PTE points to a valid L2 PT */
1530             pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1531             pte_val = *(UInt32 *)pte_addr_l1;
1532             pte_size = hw_mmu_pte_sizel1(pte_val);
1533             if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1534                 /*
1535                  * Get the L2 PA from the L1 PTE, and find
1536                  * corresponding L2 VA
1537                  */
1538                 L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1539                 L2_base_va = L2_base_pa - p_pt_attrs->l2_base_pa
1540                             + p_pt_attrs->l2_base_va;
1541                 L2_page_num = (L2_base_pa - p_pt_attrs->l2_base_pa) /
1542                         HW_MMU_COARSE_PAGE_SIZE;
1543                 /*
1544                  * Find the L2 PTE address from which we will start
1545                  * clearing, the number of PTEs to be cleared on this
1546                  * page, and the size of VA space that needs to be
1547                  * cleared on this L2 page
1548                  */
1549                 pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
1550                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1551                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) /
1552                         sizeof(UInt32);
1553                 if (rem_bytes < (pte_count * PAGE_SIZE))
1554                     pte_count = rem_bytes / PAGE_SIZE;
1556                 rem_bytes_l2 = pte_count * PAGE_SIZE;
1557                 /*
1558                  * Unmap the VA space on this L2 PT. A quicker way
1559                  * would be to clear pte_count entries starting from
1560                  * pte_addr_l2. However, below code checks that we don't
1561                  * clear invalid entries or less than 64KB for a 64KB
1562                  * entry. Similar checking is done for L1 PTEs too
1563                  * below
1564                  */
1565                 while (rem_bytes_l2) {
1566                     pte_val = *(UInt32 *)pte_addr_l2;
1567                     pte_size = hw_mmu_pte_sizel2(pte_val);
1568                     /* vaCurr aligned to pte_size? */
1569                     if ((pte_size != 0) && (rem_bytes_l2
1570                         >= pte_size) &&
1571                         !(vaCurr & (pte_size - 1))) {
1572                         /* Collect Physical addresses from VA */
1573                         pAddr = (pte_val & ~(pte_size - 1));
1574                         if (pte_size == HW_PAGE_SIZE_64KB)
1575                             numof4Kpages = 16;
1576                         else
1577                             numof4Kpages = 1;
1578                         temp = 0;
1580                         if (hw_mmu_pte_clear(pte_addr_l2,
1581                             vaCurr, pte_size) == RET_OK) {
1582                             rem_bytes_l2 -= pte_size;
1583                             vaCurr += pte_size;
1584                             pte_addr_l2 += (pte_size >> 12)
1585                                 * sizeof(UInt32);
1586                         } else {
1587                             status = -EFAULT;
1588                             goto EXIT_LOOP;
1589                         }
1590                     } else
1591                         status = -EFAULT;
1592                 }
1593                 if (rem_bytes_l2 != 0) {
1594                     status = -EFAULT;
1595                     goto EXIT_LOOP;
1596                 }
1597                 p_pt_attrs->pg_info[L2_page_num].num_entries -=
1598                             pte_count;
1599                 if (p_pt_attrs->pg_info[L2_page_num].num_entries
1600                                     == 0) {
1601                     /*
1602                      * Clear the L1 PTE pointing to the
1603                      * L2 PT
1604                      */
1605                     if (RET_OK != hw_mmu_pte_clear(L1_base_va,
1606                         vaCurrOrig, HW_MMU_COARSE_PAGE_SIZE)) {
1607                         status = -EFAULT;
1608                         goto EXIT_LOOP;
1609                     }
1610                 }
1611                 rem_bytes -= pte_count * PAGE_SIZE;
1612             } else
1613                 /* vaCurr aligned to pte_size? */
1614                 /* pte_size = 1 MB or 16 MB */
1615                 if ((pte_size != 0) && (rem_bytes >= pte_size) &&
1616                    !(vaCurr & (pte_size - 1))) {
1617                     if (pte_size == HW_PAGE_SIZE_1MB)
1618                         numof4Kpages = 256;
1619                     else
1620                         numof4Kpages = 4096;
1621                     temp = 0;
1622                     /* Collect Physical addresses from VA */
1623                     pAddr = (pte_val & ~(pte_size - 1));
1624                     if (hw_mmu_pte_clear(L1_base_va, vaCurr,
1625                             pte_size) == RET_OK) {
1626                         rem_bytes -= pte_size;
1627                         vaCurr += pte_size;
1628                     } else {
1629                         status = -EFAULT;
1630                         goto EXIT_LOOP;
1631                     }
1632             } else {
1633                 status = -EFAULT;
1634             }
1635         }
1636     }
1637     /*
1638      * It is better to flush the TLB here, so that any stale old entries
1639      * get flushed
1640      */
1641 EXIT_LOOP:
1642     hw_mmu_tlb_flushAll(halObject->mmuBase);
1643     return status;
1647 /*========================================
1648  * This sets up the Ipu processor
1649  *
1650  */
1651 Int rproc_ipu_setup (VAYUIPU_HalObject * halObject,
1652                      ProcMgr_AddrInfo * memEntries,
1653                      UInt32 numMemEntries)
1655     Int ret_val = 0;
1656     struct pg_table_attrs * p_pt_attrs = NULL;
1658     p_pt_attrs = init_mmu_page_attribs(0x10000, 14, 128);
1659     if (!p_pt_attrs) {
1660         GT_setFailureReason (curTrace,
1661                              GT_4CLASS,
1662                              "rproc_setup",
1663                              ret_val,
1664                              "init_mmu_page_attribs failed");
1665     }
1666     else {
1667         halObject->mmuObj.pPtAttrs = p_pt_attrs;
1668         /* Disable TWL  */
1669         ret_val = rproc_set_twl(halObject, FALSE);
1670         if (ret_val < 0) {
1671             GT_setFailureReason (curTrace,
1672                                  GT_4CLASS,
1673                                  "ipu_setup",
1674                                  ret_val,
1675                                  "rproc_set_twl to FALSE failed");
1676         }
1677         else {
1678             ret_val = rproc_mmu_init (halObject, memEntries,
1679                                       numMemEntries);
1680             if (ret_val < 0) {
1681                 GT_setFailureReason (curTrace,
1682                                      GT_4CLASS,
1683                                      "ipu_setup",
1684                                      ret_val,
1685                                      "rproc_mmu_init failed");
1686             }
1687             else {
1688     #if 0
1689                 ret_val = rproc_set_twl(halObject, TRUE);
1690                 if (ret_val < 0) {
1691                     GT_setFailureReason (curTrace,
1692                                          GT_4CLASS,
1693                                          "ipu_setup",
1694                                          ret_val,
1695                                          "rproc_set_twl to TRUE failed");
1696                 }
1697     #endif
1698             }
1699         }
1700     }
1702     if (ret_val < 0) {
1703         deinit_mmu_page_attribs(p_pt_attrs);
1704         halObject->mmuObj.pPtAttrs = NULL;
1705     }
1707     return ret_val;
1712 Void rproc_ipu_destroy(VAYUIPU_HalObject * halObject)
1714     shm_phys_addr = 0;
1716     if (halObject->mmuObj.pPtAttrs) {
1717         deinit_mmu_page_attribs(halObject->mmuObj.pPtAttrs);
1718         halObject->mmuObj.pPtAttrs = NULL;
1719     }
1723 static Void iotlb_load_cr (VAYUIPU_HalObject * halObject,
1724                            struct cr_regs *cr)
1726     ULONG reg;
1727     VAYUIpu_MMURegs * mmuRegs = (VAYUIpu_MMURegs *)halObject->mmuBase;
1729     reg = cr->cam | MMU_CAM_V;
1730     OUTREG32(&mmuRegs->CAM, reg);
1732     reg = cr->ram;
1733     OUTREG32(&mmuRegs->RAM, reg);
1735     reg = 1;
1736     OUTREG32(&mmuRegs->FLUSH_ENTRY, reg);
1738     reg = 1;
1739     OUTREG32(&mmuRegs->LD_TLB, reg);
1743 /**
1744  * iotlb_dump_cr - Dump an iommu tlb entry into buf
1745  * @obj:    target iommu
1746  * @cr:        contents of cam and ram register
1747  * @buf:    output buffer
1748  **/
1749 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf)
1751     Char *p = buf;
1753     if(!cr || !buf)
1754         return 0;
1756     /* FIXME: Need more detail analysis of cam/ram */
1757     p += sprintf(p, "%08x %08x %01x\n", (unsigned int)cr->cam,
1758                     (unsigned int)cr->ram,
1759                     (cr->cam & MMU_CAM_P) ? 1 : 0);
1760     return (p - buf);
1765 static Int iotlb_cr_valid (struct cr_regs *cr)
1767     if (!cr)
1768         return -EINVAL;
1770     return (cr->cam & MMU_CAM_V);
1775 static struct cr_regs *omap5_alloc_cr (struct iotlb_entry *e)
1777     struct cr_regs *cr;
1779     if (e->da & ~(get_cam_va_mask(e->pgsz))) {
1780         GT_setFailureReason (curTrace,
1781                              GT_4CLASS,
1782                              "omap5_alloc_cr",
1783                              -EINVAL,
1784                              "failed mask check");
1785         return NULL;
1786     }
1788     cr = mmap(NULL,
1789               sizeof(struct cr_regs),
1790               PROT_NOCACHE | PROT_READ | PROT_WRITE,
1791               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1792               NOFD,
1793               0);
1795     if (MAP_FAILED == cr)
1796     {
1797         GT_setFailureReason (curTrace,
1798                              GT_4CLASS,
1799                              "omap5_alloc_cr",
1800                              -EINVAL,
1801                              "mmap failed");
1802         return NULL;
1803     }
1805     cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
1806     cr->ram = e->pa | e->endian | e->elsz | e->mixed;
1807     return cr;
1812 static struct cr_regs *iotlb_alloc_cr (struct iotlb_entry *e)
1814     if (!e) {
1815         GT_setFailureReason (curTrace,
1816                              GT_4CLASS,
1817                              "iotlb_alloc_cr",
1818                              -EINVAL,
1819                              "e is NULL");
1820         return NULL;
1821     }
1823     return omap5_alloc_cr(e);
1828 /**
1829  * load_iotlb_entry - Set an iommu tlb entry
1830  * @obj:    target iommu
1831  * @e:        an iommu tlb entry info
1832  **/
1833 static Int load_iotlb_entry (VAYUIPU_HalObject * halObject,
1834                              struct iotlb_entry *e)
1836     Int err = 0;
1837     struct iotlb_lock l;
1838     struct cr_regs *cr;
1840     if (halObject == NULL) {
1841         err = -EINVAL;
1842         GT_setFailureReason (curTrace,
1843                              GT_4CLASS,
1844                              "load_iotlb_entry",
1845                              err,
1846                              "halObject is NULL");
1847         goto out;
1848     }
1850     if (halObject->mmuBase == NULL) {
1851         err = -EINVAL;
1852         GT_setFailureReason (curTrace,
1853                              GT_4CLASS,
1854                              "load_iotlb_entry",
1855                              err,
1856                              "halObject->mmuBase is NULL");
1857         goto out;
1858     }
1860     if (!e) {
1861         err = -EINVAL;
1862         GT_setFailureReason (curTrace,
1863                              GT_4CLASS,
1864                              "load_iotlb_entry",
1865                              err,
1866                              "e is NULL");
1867         goto out;
1868     }
1870     iotlb_getLock(halObject, &l);
1872     if (l.base == 32) {
1873         err = -EBUSY;
1874         GT_setFailureReason (curTrace,
1875                              GT_4CLASS,
1876                              "load_iotlb_entry",
1877                              err,
1878                              "l.base is full");
1879         goto out;
1880     }
1881     if (!e->prsvd) {
1882         int i;
1883         struct cr_regs tmp;
1885         for_each_iotlb_cr(32, i, tmp)
1886             if (!iotlb_cr_valid(&tmp))
1887                 break;
1889         if (i == 32) {
1890             err = -EBUSY;
1891             GT_setFailureReason (curTrace,
1892                                  GT_4CLASS,
1893                                  "load_iotlb_entry",
1894                                  err,
1895                                  "i == 32");
1896             goto out;
1897         }
1899         iotlb_getLock(halObject, &l);
1900     } else {
1901         l.vict = l.base;
1902         iotlb_setLock(halObject, &l);
1903     }
1905     cr = iotlb_alloc_cr(e);
1906     if (!cr){
1907         err = -ENOMEM;
1908         GT_setFailureReason (curTrace,
1909                              GT_4CLASS,
1910                              "load_iotlb_entry",
1911                              err,
1912                              "iotlb_alloc_cr failed");
1913         goto out;
1914     }
1916     iotlb_load_cr(halObject, cr);
1917     munmap(cr, sizeof(struct cr_regs));
1919     if (e->prsvd)
1920         l.base++;
1921     /* increment victim for next tlb load */
1922     if (++l.vict == 32)
1923         l.vict = l.base;
1924     iotlb_setLock(halObject, &l);
1926 out:
1927     return err;