f1343f0e5b019d310cf43fef936f23368c751131
[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / family / vayu / vayudsp / VAYUDspEnabler.c
1 /*
2  *  @file  VAYUEnabler.c
3  *
4  *  @brief  MMU programming module
5  *
6  *
7  *  ============================================================================
8  *
9  *  Copyright (c) 2013, Texas Instruments Incorporated
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  *  Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *
18  *  *  Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *
22  *  *  Neither the name of Texas Instruments Incorporated nor the names of
23  *     its contributors may be used to endorse or promote products derived
24  *     from this software without specific prior written permission.
25  *
26  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  *  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  *  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  *  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  *  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  *  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
33  *  OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
34  *  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
35  *  OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
36  *  EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *  Contact information for paper mail:
38  *  Texas Instruments
39  *  Post Office Box 655303
40  *  Dallas, Texas 75265
41  *  Contact information:
42  *  http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
43  *  DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
44  *  ============================================================================
45  *
46  */
48 #include <errno.h>
49 #include <unistd.h>
50 #include <ti/syslink/Std.h>
52 /* OSAL and utils headers */
53 #include <ti/syslink/utils/List.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/OsalPrint.h>
57 /* Module level headers */
58 #include <OsalDrv.h>
59 #include <_ProcDefs.h>
60 #include <Processor.h>
61 #include <hw/inout.h>
62 #include <sys/mman.h>
64 #include <hw_defs.h>
65 #include <hw_mmu.h>
66 #include <VAYUDspHal.h>
67 #include <VAYUDspHalMmu.h>
68 #include <VAYUDspPhyShmem.h>
69 #include <VAYUDspEnabler.h>
70 #include <stdbool.h>
71 #include <stdint.h>
74 #define PAGE_SIZE 0x1000
76 /* Attributes of L2 page tables for DSP MMU.*/
77 struct page_info {
78     /* Number of valid PTEs in the L2 PT*/
79     UInt32 num_entries;
80 };
83 /* Attributes used to manage the DSP MMU page tables */
84 struct pg_table_attrs {
85     struct sync_cs_object *hcs_object;/* Critical section object handle */
86     UInt32 l1_base_pa; /* Physical address of the L1 PT */
87     UInt32 l1_base_va; /* Virtual  address of the L1 PT */
88     UInt32 l1_size; /* Size of the L1 PT */
89     UInt32 l1_tbl_alloc_pa;
90     /* Physical address of Allocated mem for L1 table. May not be aligned */
91     UInt32 l1_tbl_alloc_va;
92     /* Virtual address of Allocated mem for L1 table. May not be aligned */
93     UInt32 l1_tbl_alloc_sz;
94     /* Size of consistent memory allocated for L1 table.
95      * May not be aligned */
96     UInt32 l2_base_pa;        /* Physical address of the L2 PT */
97     UInt32 l2_base_va;        /* Virtual  address of the L2 PT */
98     UInt32 l2_size;        /* Size of the L2 PT */
99     UInt32 l2_tbl_alloc_pa;
100     /* Physical address of Allocated mem for L2 table. May not be aligned */
101     UInt32 l2_tbl_alloc_va;
102     /* Virtual address of Allocated mem for L2 table. May not be aligned */
103     UInt32 ls_tbl_alloc_sz;
104     /* Size of consistent memory allocated for L2 table.
105      * May not be aligned */
106     UInt32 l2_num_pages;    /* Number of allocated L2 PT */
107     struct page_info *pg_info;
108 };
111 enum pagetype {
112     SECTION = 0,
113     LARGE_PAGE = 1,
114     SMALL_PAGE = 2,
115     SUPER_SECTION  = 3
116 };
118 static UInt32 shm_phys_addr;
119 static UInt32 shm_phys_addr_dsp;
121 #define INREG32(x) in32((uintptr_t)x)
122 #define OUTREG32(x, y) out32((uintptr_t)x, y)
123 #define SIZE 0x4
125 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf);
126 static Int load_iotlb_entry (VAYUDSP_HalObject * halObject,
127                              struct iotlb_entry *e);
128 static Int iotlb_cr_valid (struct cr_regs *cr);
130 static Int rproc_mem_map (VAYUDSP_HalObject * halObject,
131                           UInt32 mpu_addr, UInt32 ul_virt_addr,
132                           UInt32 num_bytes, UInt32 map_attr);
133 static Int rproc_mem_unmap (VAYUDSP_HalObject * halObject, UInt32 da,
134                             UInt32 num_bytes);
137 static Void iotlb_cr_to_e (struct cr_regs *cr, struct iotlb_entry *e)
139     e->da       = cr->cam & MMU_CAM_VATAG_MASK;
140     e->pa       = cr->ram & MMU_RAM_PADDR_MASK;
141     e->valid    = cr->cam & MMU_CAM_V;
142     e->prsvd    = cr->cam & MMU_CAM_P;
143     e->pgsz     = cr->cam & MMU_CAM_PGSZ_MASK;
144     e->endian   = cr->ram & MMU_RAM_ENDIAN_MASK;
145     e->elsz     = cr->ram & MMU_RAM_ELSZ_MASK;
146     e->mixed    = cr->ram & MMU_RAM_MIXED;
149 static Void iotlb_getLock (VAYUDSP_HalObject * halObject,
150                            struct iotlb_lock *l)
152     ULONG reg;
153     VAYUDsp_MMURegs * mmuRegs =
154                                   (VAYUDsp_MMURegs *)halObject->mmuBase;
156     reg = INREG32(&mmuRegs->LOCK);
157     l->base = MMU_LOCK_BASE(reg);
158     l->vict = MMU_LOCK_VICT(reg);
161 static Void iotlb_setLock (VAYUDSP_HalObject * halObject,
162                            struct iotlb_lock *l)
164     ULONG reg;
165     VAYUDsp_MMURegs * mmuRegs =
166                                   (VAYUDsp_MMURegs *)halObject->mmuBase;
168     reg = (l->base << MMU_LOCK_BASE_SHIFT);
169     reg |= (l->vict << MMU_LOCK_VICT_SHIFT);
170     OUTREG32(&mmuRegs->LOCK, reg);
173 static void omap4_tlb_read_cr (VAYUDSP_HalObject * halObject,
174                                struct cr_regs *cr)
176     VAYUDsp_MMURegs * mmuRegs =
177                                   (VAYUDsp_MMURegs *)halObject->mmuBase;
179     cr->cam = INREG32(&mmuRegs->READ_CAM);
180     cr->ram = INREG32(&mmuRegs->READ_RAM);
183 /* only used for iotlb iteration in for-loop */
184 static struct cr_regs __iotlb_read_cr (VAYUDSP_HalObject * halObject,
185                                        int n)
187      struct cr_regs cr;
188      struct iotlb_lock l;
189      iotlb_getLock(halObject, &l);
190      l.vict = n;
191      iotlb_setLock(halObject, &l);
192      omap4_tlb_read_cr(halObject, &cr);
193      return cr;
196 #define for_each_iotlb_cr(n, __i, cr)                \
197     for (__i = 0;                            \
198          (__i < (n)) && (cr = __iotlb_read_cr(halObject, __i), TRUE);    \
199          __i++)
201 static Int save_tlbs (VAYUDSP_HalObject * halObject, UINT32 procId)
203     Int i =0;
204     struct cr_regs cr_tmp;
205     struct iotlb_lock l;
207     iotlb_getLock(halObject, &l);
209     halObject->mmuObj.nrTlbs = l.base;
210     for_each_iotlb_cr(halObject->mmuObj.nrTlbs, i, cr_tmp) {
211         iotlb_cr_to_e(&cr_tmp, &halObject->mmuObj.tlbs[i]);
212     }
214     return 0;
218 static Int restore_tlbs (VAYUDSP_HalObject * halObject, UInt32 procId)
220     Int i = 0;
221     Int status = -1;
222     struct iotlb_lock save;
224     /* Reset the base and victim values */
225     save.base = 0;
226     save.vict = 0;
227     iotlb_setLock(halObject, &save);
229     for (i = 0; i < halObject->mmuObj.nrTlbs; i++) {
230         status = load_iotlb_entry(halObject, &halObject->mmuObj.tlbs[i]);
231         if (status < 0) {
232             GT_setFailureReason (curTrace,
233                                  GT_4CLASS,
234                                  "restore_tlbs",
235                                  status,
236                                  "Error restoring the tlbs");
237             goto err;
238         }
239     }
241     return 0;
243 err:
244     return status;
247 static Int save_mmu_regs (VAYUDSP_HalObject * halObject, UInt32 procId)
249     UInt32 i = 0;
251     if (halObject == NULL) {
252         GT_setFailureReason (curTrace,
253                              GT_4CLASS,
254                              "save_mmu_regs",
255                              -ENOMEM,
256                              "halObject is NULL");
257         return -ENOMEM;
258     }
260     if (halObject->mmuBase == 0) {
261         GT_setFailureReason (curTrace,
262                              GT_4CLASS,
263                              "save_mmu_regs",
264                              -ENOMEM,
265                              "halObject->mmuBase is 0");
266         return -ENOMEM;
267     }
269     for (i = 0; i < MMU_REGS_SIZE; i++) {
270         halObject->mmuObj.mmuRegs[i] = INREG32(halObject->mmuBase + (i * 4));
271     }
273     return 0;
276 static Int restore_mmu_regs (VAYUDSP_HalObject * halObject,
277                              UInt32 procId)
279     UInt32 i = 0;
281     if (halObject == NULL) {
282         GT_setFailureReason (curTrace,
283                              GT_4CLASS,
284                              "restore_mmu_regs",
285                              -ENOMEM,
286                              "halObject is NULL");
287         return -ENOMEM;
288     }
290     if (halObject->mmuBase == 0) {
291         GT_setFailureReason (curTrace,
292                              GT_4CLASS,
293                              "restore_mmu_regs",
294                              -ENOMEM,
295                              "halObject->mmuBase is 0");
296         return -ENOMEM;
297     }
299     for (i = 0; i < MMU_REGS_SIZE; i++) {
300         OUTREG32(halObject->mmuBase + (i * 4), halObject->mmuObj.mmuRegs[i]);
301     }
303     return 0;
306 Int save_dsp_mmu_ctxt (VAYUDSP_HalObject * halObject, UInt32 procId)
308     Int status = -1;
310     status = save_mmu_regs(halObject, procId);
311     if (status < 0) {
312         GT_setFailureReason (curTrace,
313                              GT_4CLASS,
314                              "save_mmu_ctxt",
315                              status,
316                              "Unable to save MMU Regs");
317         return status;
318     }
320     status = save_tlbs(halObject, procId);
321     if (status < 0) {
322         GT_setFailureReason (curTrace,
323                              GT_4CLASS,
324                              "save_mmu_ctxt",
325                              status,
326                              "Unable to save TLBs");
327         return status;
328     }
329     return status;
333 Int restore_dsp_mmu_ctxt (VAYUDSP_HalObject * halObject, UInt32 procId)
335     Int status = -1;
337     status = restore_mmu_regs(halObject, procId);
338     if (status < 0) {
339         GT_setFailureReason (curTrace,
340                              GT_4CLASS,
341                              "restore_mmu_ctxt",
342                              status,
343                              "Unable to restore MMU Regs");
344         return status;
345     }
347     status = restore_tlbs(halObject, procId);
348     if (status < 0) {
349         GT_setFailureReason (curTrace,
350                              GT_4CLASS,
351                              "restore_mmu_ctxt",
352                              status,
353                              "Unable to restore TLBS");
354         return status;
355     }
357     return status;
361  /*=========================================
362  * Decides a TLB entry size
363  *
364  */
365 static Int get_mmu_entry_size (UInt32 pa, UInt32 size, enum pagetype *size_tlb,
366                                UInt32 *entry_size)
368     Int     status = 0;
369     Bool    page_align_4kb  = false;
370     Bool    page_align_64kb = false;
371     Bool    page_align_1mb = false;
372     Bool    page_align_16mb = false;
373     UInt32  phys_addr = pa;
376     /*  First check the page alignment*/
377     if ((phys_addr % PAGE_SIZE_4KB)  == 0)
378         page_align_4kb  = true;
379     if ((phys_addr % PAGE_SIZE_64KB) == 0)
380         page_align_64kb = true;
381     if ((phys_addr % PAGE_SIZE_1MB)  == 0)
382         page_align_1mb  = true;
383     if ((phys_addr % PAGE_SIZE_16MB)  == 0)
384         page_align_16mb  = true;
386     if ((!page_align_64kb) && (!page_align_1mb)  && (!page_align_4kb)) {
387         status = -EINVAL;
388         GT_setFailureReason (curTrace,
389                              GT_4CLASS,
390                              "get_mmu_entry_size",
391                              status,
392                              "phys_addr is not properly aligned");
393         goto error_exit;
394     }
396     /*  Now decide the entry size */
397     if (size >= PAGE_SIZE_16MB) {
398         if (page_align_16mb) {
399             *size_tlb   = SUPER_SECTION;
400             *entry_size = PAGE_SIZE_16MB;
401         } else if (page_align_1mb) {
402             *size_tlb   = SECTION;
403             *entry_size = PAGE_SIZE_1MB;
404         } else if (page_align_64kb) {
405             *size_tlb   = LARGE_PAGE;
406             *entry_size = PAGE_SIZE_64KB;
407         } else if (page_align_4kb) {
408             *size_tlb   = SMALL_PAGE;
409             *entry_size = PAGE_SIZE_4KB;
410         } else {
411             status = -EINVAL;
412             GT_setFailureReason (curTrace,
413                                  GT_4CLASS,
414                                  "get_mmu_entry_size",
415                                  status,
416                                  "size and alignment are invalid");
417             goto error_exit;
418         }
419     } else if (size >= PAGE_SIZE_1MB && size < PAGE_SIZE_16MB) {
420         if (page_align_1mb) {
421             *size_tlb   = SECTION;
422             *entry_size = PAGE_SIZE_1MB;
423         } else if (page_align_64kb) {
424             *size_tlb   = LARGE_PAGE;
425             *entry_size = PAGE_SIZE_64KB;
426         } else if (page_align_4kb) {
427             *size_tlb   = SMALL_PAGE;
428             *entry_size = PAGE_SIZE_4KB;
429         } else {
430             status = -EINVAL;
431             GT_setFailureReason (curTrace,
432                                  GT_4CLASS,
433                                  "get_mmu_entry_size",
434                                  status,
435                                  "size and alignment are invalid");
436             goto error_exit;
437         }
438     } else if (size > PAGE_SIZE_4KB && size < PAGE_SIZE_1MB) {
439         if (page_align_64kb) {
440             *size_tlb   = LARGE_PAGE;
441             *entry_size = PAGE_SIZE_64KB;
442         } else if (page_align_4kb) {
443             *size_tlb   = SMALL_PAGE;
444             *entry_size = PAGE_SIZE_4KB;
445         } else {
446             status = -EINVAL;
447             GT_setFailureReason (curTrace,
448                                  GT_4CLASS,
449                                  "get_mmu_entry_size",
450                                  status,
451                                  "size and alignment are invalid");
452             goto error_exit;
453         }
454     } else if (size == PAGE_SIZE_4KB) {
455         if (page_align_4kb) {
456             *size_tlb   = SMALL_PAGE;
457             *entry_size = PAGE_SIZE_4KB;
458         } else {
459             status = -EINVAL;
460             GT_setFailureReason (curTrace,
461                                  GT_4CLASS,
462                                  "get_mmu_entry_size",
463                                  status,
464                                  "size and alignment are invalid");
465             goto error_exit;
466         }
467     } else {
468         status = -EINVAL;
469         GT_setFailureReason (curTrace,
470                              GT_4CLASS,
471                              "get_mmu_entry_size",
472                              status,
473                              "size is invalid");
474         goto error_exit;
475     }
476     return 0;
478 error_exit:
479     return status;
482 /*
483  * Note: Leaving add_dsp_mmu_entry here, but commented out, so that it is
484  * available in the future if static tlbs are needed to be added outside
485  * of the translation table for faster access.
486  */
487 #if 0
488 /*=========================================
489  * Add DSP MMU entries corresponding to given MPU-Physical address
490  * and DSP-virtual address
491  */
492 static Int add_dsp_mmu_entry (VAYUDSP_HalObject * halObject,
493                               UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
495     UInt32 mapped_size = 0;
496     enum pagetype size_tlb = SECTION;
497     UInt32 entry_size = 0;
498     int status = 0;
499     struct iotlb_entry tlb_entry;
500     int retval = 0;
502     while ((mapped_size < size) && (status == 0)) {
503         status = get_mmu_entry_size(*phys_addr, (size - mapped_size),
504                                     &size_tlb, &entry_size);
505         if (status < 0) {
506             GT_setFailureReason (curTrace,
507                                  GT_4CLASS,
508                                  "add_dsp_mmu_entry",
509                                  status,
510                                  "get_mmu_entry_size failed");
511             goto error_exit;
512         }
514         if (size_tlb == SUPER_SECTION)
515             tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
517         else if (size_tlb == SECTION)
518             tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
520         else if (size_tlb == LARGE_PAGE)
521             tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
523         else if (size_tlb == SMALL_PAGE)
524             tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
526         tlb_entry.elsz = MMU_RAM_ELSZ_16;
527         tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
528         tlb_entry.mixed = MMU_RAM_MIXED;
529         tlb_entry.prsvd = MMU_CAM_P;
530         tlb_entry.valid = MMU_CAM_V;
532         tlb_entry.da = *dsp_addr;
533         tlb_entry.pa = *phys_addr;
534         retval = load_iotlb_entry(halObject, &tlb_entry);
535         if (retval < 0) {
536             GT_setFailureReason (curTrace,
537                                  GT_4CLASS,
538                                  "add_dsp_mmu_entry",
539                                  retval,
540                                  "load_iotlb_entry failed");
541             goto error_exit;
542         }
543         mapped_size  += entry_size;
544         *phys_addr   += entry_size;
545         *dsp_addr   += entry_size;
546     }
548     return 0;
550 error_exit:
551     printf("pte set failure retval = 0x%x, status = 0x%x \n",
552                             retval, status);
554     return retval;
556 #endif
558 static Int add_entry_ext (VAYUDSP_HalObject * halObject,
559                           UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
561     UInt32 mapped_size = 0;
562     enum pagetype     size_tlb = SECTION;
563     UInt32 entry_size = 0;
564     Int status = 0;
565     UInt32 page_size = HW_PAGE_SIZE_1MB;
566     UInt32 flags = 0;
568     flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
569                     DSP_MAPPHYSICALADDR);
570     while ((mapped_size < size) && (status == 0)) {
572         /*  get_mmu_entry_size fills the size_tlb and entry_size
573         based on alignment and size of memory to map
574         to DSP - size */
575         status = get_mmu_entry_size (*phys_addr,
576                                      (size - mapped_size),
577                                      &size_tlb,
578                                      &entry_size);
579         if (status < 0) {
580             GT_setFailureReason (curTrace,
581                                  GT_4CLASS,
582                                  "add_entry_ext",
583                                  status,
584                                  "get_mmu_entry_size failed");
585             break;
586         }
587         else {
588             if (size_tlb == SUPER_SECTION)
589                 page_size = HW_PAGE_SIZE_16MB;
590             else if (size_tlb == SECTION)
591                 page_size = HW_PAGE_SIZE_1MB;
592             else if (size_tlb == LARGE_PAGE)
593                 page_size = HW_PAGE_SIZE_64KB;
594             else if (size_tlb == SMALL_PAGE)
595                 page_size = HW_PAGE_SIZE_4KB;
597             if (status == 0) {
598                 status = rproc_mem_map (halObject,
599                                         *phys_addr,
600                                         *dsp_addr,
601                                         page_size,
602                                         flags);
603                 if (status < 0) {
604                     GT_setFailureReason (curTrace,
605                                          GT_4CLASS,
606                                          "add_entry_ext",
607                                          status,
608                                          "benelli_mem_map failed");
609                     break;
610                 }
611                 mapped_size  += entry_size;
612                 *phys_addr   += entry_size;
613                 *dsp_addr   += entry_size;
614             }
615         }
616     }
617     return status;
620 static Int __dump_tlb_entries (VAYUDSP_HalObject * halObject,
621                                struct cr_regs *crs, int num)
623     int i;
624     struct iotlb_lock saved;
625     struct cr_regs tmp;
626     struct cr_regs *p = crs;
628     iotlb_getLock(halObject, &saved);
629     for_each_iotlb_cr(num, i, tmp) {
630         if (!iotlb_cr_valid(&tmp))
631             continue;
632         *p++ = tmp;
633     }
634     iotlb_setLock(halObject, &saved);
635     return  p - crs;
638 UInt32 get_DspVirtAdd(VAYUDSP_HalObject * halObject, UInt32 physAdd)
640     int i, num;
641     struct cr_regs *cr;
642     struct cr_regs *p = NULL;
643     //DWORD dwPhys;
644     UInt32 lRetVal = 0;
645     num = 32;
646     if(shm_phys_addr_dsp == 0)
647         return 0;
648     cr = mmap(NULL,
649               sizeof(struct cr_regs) * num,
650               PROT_NOCACHE | PROT_READ | PROT_WRITE,
651               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
652               NOFD,
653               0);
654     if (cr == MAP_FAILED)
655     {
656         return NULL;
657     }
659     memset(cr, 0, sizeof(struct cr_regs) * num);
661     num = __dump_tlb_entries(halObject, cr, num);
662     for (i = 0; i < num; i++)
663     {
664         p = cr + i;
665         if(physAdd >= (p->ram & 0xFFFFF000) &&  physAdd < ((p + 1)->ram & 0xFFFFF000))
666         {
667             lRetVal = ((p->cam & 0xFFFFF000) + (physAdd - (p->ram & 0xFFFFF000)));
668         }
669     }
670     munmap(cr, sizeof(struct cr_regs) * num);
672     return lRetVal;
676 /**
677  * dump_tlb_entries - dump cr arrays to given buffer
678  * @obj:    target iommu
679  * @buf:    output buffer
680  **/
681 static UInt32 dump_tlb_entries (VAYUDSP_HalObject * halObject,
682                                 char *buf, UInt32 bytes)
684     Int i, num;
685     struct cr_regs *cr;
686     Char *p = buf;
688     num = bytes / sizeof(*cr);
689     num = min(32, num);
690     cr = mmap(NULL,
691             sizeof(struct cr_regs) * num,
692               PROT_NOCACHE | PROT_READ | PROT_WRITE,
693               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
694               NOFD,
695               0);
696     if (!cr)
697     {
698         return NULL;
700     }
701     memset(cr, 0, sizeof(struct cr_regs) * num);
703     num = __dump_tlb_entries(halObject, cr, num);
704     for (i = 0; i < num; i++)
705         p += iotlb_dump_cr(cr + i, p);
706     munmap(cr, sizeof(struct cr_regs) * num);
707     return p - buf;
711 static Void rproc_tlb_dump (VAYUDSP_HalObject * halObject)
713     Char *p;
715     p = mmap(NULL,
716              1000,
717              PROT_NOCACHE | PROT_READ | PROT_WRITE,
718              MAP_ANON | MAP_PHYS | MAP_PRIVATE,
719              NOFD,
720              0);
721     if (MAP_FAILED != p)
722     {
723         dump_tlb_entries(halObject, p, 1000);
724         munmap(p, 1000);
725     }
727     return;
731 /*================================
732  * Initialize the Dsp MMU.
733  *===============================*/
735 static Int rproc_mmu_init (VAYUDSP_HalObject * halObject,
736                            ProcMgr_AddrInfo * memEntries,
737                            UInt32 numMemEntries)
739     Int ret_val = 0;
740     UInt32 phys_addr = 0;
741     UInt32 i = 0;
742     UInt32 virt_addr = 0;
743     UInt32 reg;
744     VAYUDsp_MMURegs * mmuRegs = NULL;
746     if (halObject == NULL) {
747         ret_val = -ENOMEM;
748         GT_setFailureReason (curTrace,
749                              GT_4CLASS,
750                              "rproc_mmu_init",
751                              ret_val,
752                              "halObject is NULL");
753         goto error_exit;
754     }
756     if (halObject->mmuBase == 0) {
757         ret_val = -ENOMEM;
758         GT_setFailureReason (curTrace,
759                              GT_4CLASS,
760                              "rproc_mmu_init",
761                              ret_val,
762                              "halObject->mmuBase is 0");
763         goto error_exit;
764     }
765     mmuRegs = (VAYUDsp_MMURegs *)halObject->mmuBase;
767     /*  Disable the MMU & TWL */
768     hw_mmu_disable(halObject->mmuBase);
769     hw_mmu_twl_disable(halObject->mmuBase);
771     printf("  Programming Dsp memory regions\n");
772     printf("=========================================\n");
774     for (i = 0; i < numMemEntries; i++) {
775         phys_addr = memEntries[i].addr[ProcMgr_AddrType_MasterPhys];
776         if (phys_addr == (UInt32)(-1) || phys_addr == 0) {
777             GT_setFailureReason (curTrace,
778                                  GT_4CLASS,
779                                  "benelli_mmu_init",
780                                  ret_val,
781                                  "phys_addr is invalid");
782             goto error_exit;
783         }
784         printf( "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
785                 memEntries[i].addr[ProcMgr_AddrType_SlaveVirt],
786                 memEntries[i].size,
787                 (unsigned int)phys_addr);
789         /* VAYU SDC code */
790         /* Adjust below logic if using cacheable shared memory */
791         shm_phys_addr = 1;
792         virt_addr = memEntries[i].addr[ProcMgr_AddrType_SlaveVirt];
794         ret_val = add_entry_ext(halObject, &phys_addr, &virt_addr,
795                                     (memEntries[i].size));
796         if (ret_val < 0) {
797             GT_setFailureReason (curTrace,
798                                  GT_4CLASS,
799                                  "benelli_mmu_init",
800                                  ret_val,
801                                  "add_dsp_mmu_entry failed");
802             goto error_exit;
803         }
804     }
806     /* Set the TTB to point to the L1 page table's physical address */
807     OUTREG32(&mmuRegs->TTB,
808            ((struct pg_table_attrs *)(halObject->mmuObj.pPtAttrs))->l1_base_pa);
810     /* Enable the TWL */
811     hw_mmu_twl_enable(halObject->mmuBase);
813     hw_mmu_enable(halObject->mmuBase);
815     rproc_tlb_dump(halObject);
817     //Set the SYSCONFIG
818     reg = INREG32(halObject->mmuBase + 0x10);
819     reg&=0xFFFFFFEF;
820     reg|=0x11;
821     OUTREG32(halObject->mmuBase+0x10, reg);
823     return 0;
824 error_exit:
825     return ret_val;
829 /****************************************************
831 *  Function which sets the TWL of the remote core
834 *****************************************************/
836 static Int rproc_set_twl (VAYUDSP_HalObject * halObject, Bool on)
838     Int status = 0;
839     VAYUDsp_MMURegs * mmuRegs = NULL;
840     ULONG reg;
842     if (halObject == NULL) {
843         status = -ENOMEM;
844         GT_setFailureReason (curTrace,
845                              GT_4CLASS,
846                              "benelli_set_twl",
847                              status,
848                              "halObject is NULL");
849     }
850     else if (halObject->mmuBase == 0) {
851         status = -ENOMEM;
852         GT_setFailureReason (curTrace,
853                              GT_4CLASS,
854                              "benelli_set_twl",
855                              status,
856                              "halObject->mmuBase is NULL");
857     }
858     else {
859         mmuRegs = (VAYUDsp_MMURegs *)halObject->mmuBase;
861         /* Setting MMU to Smart Idle Mode */
862         reg = INREG32(&mmuRegs->SYSCONFIG);
863         reg &= ~MMU_SYS_IDLE_MASK;
864         reg |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
865         OUTREG32(&mmuRegs->SYSCONFIG, reg);
867         /* Enabling MMU */
868         reg =  INREG32(&mmuRegs->CNTL);
870         if (on)
871             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TWL_MASK);
872         else
873             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TLB_MISS_MASK);
875         reg &= ~MMU_CNTL_MASK;
876         if (on)
877             reg |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
878         else
879             reg |= (MMU_CNTL_MMU_EN);
881         OUTREG32(&mmuRegs->CNTL, reg);
882     }
884     return status;
888 /*========================================
889  * This sets up the Dsp processor MMU Page tables
890  *
891  */
892 static struct pg_table_attrs * init_mmu_page_attribs (UInt32 l1_size,
893                                                       UInt32 l1_allign,
894                                                       UInt32 ls_num_of_pages)
896     struct pg_table_attrs * p_pt_attrs = NULL;
897     UInt32 pg_tbl_pa = 0;
898     off64_t offset = 0;
899     UInt32 pg_tbl_va = 0;
900     UInt32 align_size = 0;
901     UInt32 len = 0;
902     int status = 0;
904     p_pt_attrs = Memory_alloc (NULL, sizeof(struct pg_table_attrs), 0, NULL);
905     if (p_pt_attrs)
906         Memory_set (p_pt_attrs, 0, sizeof(struct pg_table_attrs));
907     else {
908         status = -ENOMEM;
909         GT_setFailureReason (curTrace,
910                              GT_4CLASS,
911                              "init_mmu_page_attribs",
912                              status,
913                              "Memory_alloc failed");
914         goto error_exit;
915     }
917     p_pt_attrs->l1_size = l1_size;
918     align_size = p_pt_attrs->l1_size;
919     p_pt_attrs->l1_tbl_alloc_sz = 0x100000;
920     /* Align sizes are expected to be power of 2 */
921     /* we like to get aligned on L1 table size */
922     pg_tbl_va = (UInt32) mmap64 (NULL,
923                                  p_pt_attrs->l1_tbl_alloc_sz,
924                                  PROT_NOCACHE | PROT_READ | PROT_WRITE,
925                                  MAP_ANON | MAP_PHYS | MAP_PRIVATE,
926                                  NOFD,
927                                  0x0);
928     if (pg_tbl_va == (UInt32)MAP_FAILED) {
929         pg_tbl_va = 0;
930         status = -ENOMEM;
931         GT_setFailureReason (curTrace,
932                              GT_4CLASS,
933                              "init_mmu_page_attribs",
934                              status,
935                              "mmap64 failed");
936         goto error_exit;
937     }
938     else {
939         /* Make sure the memory is contiguous */
940         status = mem_offset64 ((void *)pg_tbl_va, NOFD,
941                                p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
942         pg_tbl_pa = (UInt32)offset;
943         if (len != p_pt_attrs->l1_tbl_alloc_sz) {
944             status = -ENOMEM;
945             GT_setFailureReason (curTrace,
946                                  GT_4CLASS,
947                                  "init_mmu_page_attribs",
948                                  status,
949                                  "phys mem is not contiguous");
950         }
951         if (status != 0) {
952             GT_setFailureReason (curTrace,
953                                  GT_4CLASS,
954                                  "init_mmu_page_attribs",
955                                  status,
956                                  "mem_offset64 failed");
957             goto error_exit;
958         }
959     }
960     /* Check if the PA is aligned for us */
961     if ((pg_tbl_pa) & (align_size-1)) {
962         /* PA not aligned to page table size ,*/
963         /* try with more allocation and align */
964         munmap((void *)pg_tbl_va, p_pt_attrs->l1_tbl_alloc_sz);
965         p_pt_attrs->l1_tbl_alloc_sz = p_pt_attrs->l1_tbl_alloc_sz*2;
966         /* we like to get aligned on L1 table size */
967         pg_tbl_va = (UInt32) mmap64 (NULL,
968                                      p_pt_attrs->l1_tbl_alloc_sz,
969                                      PROT_NOCACHE | PROT_READ | PROT_WRITE,
970                                      MAP_ANON | MAP_PHYS | MAP_PRIVATE,
971                                      NOFD,
972                                      0);
973         if (pg_tbl_va == (UInt32)MAP_FAILED) {
974             pg_tbl_va = 0;
975             status = -ENOMEM;
976             GT_setFailureReason (curTrace,
977                                  GT_4CLASS,
978                                  "init_mmu_page_attribs",
979                                  status,
980                                  "mmap64 failed");
981             goto error_exit;
982         }
983         else {
984             /* Make sure the memory is contiguous */
985             status = mem_offset64 ((void *)pg_tbl_va, NOFD,
986                                    p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
987             pg_tbl_pa = (UInt32)offset;
988             if (len != p_pt_attrs->l1_tbl_alloc_sz) {
989                 status = -ENOMEM;
990                 GT_setFailureReason (curTrace,
991                                      GT_4CLASS,
992                                      "init_mmu_page_attribs",
993                                      status,
994                                      "phys mem is not contiguous");
995             }
996             if (status != 0) {
997                 GT_setFailureReason (curTrace,
998                                      GT_4CLASS,
999                                      "init_mmu_page_attribs",
1000                                      status,
1001                                      "mem_offset64 failed");
1002                 goto error_exit;
1003             }
1004         }
1005         /* We should be able to get aligned table now */
1006         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1007         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1008         /* Align the PA to the next 'align'  boundary */
1009         p_pt_attrs->l1_base_pa = ((pg_tbl_pa) + (align_size-1)) &
1010                             (~(align_size-1));
1011         p_pt_attrs->l1_base_va = pg_tbl_va + (p_pt_attrs->l1_base_pa -
1012                                 pg_tbl_pa);
1013     } else {
1014         /* We got aligned PA, cool */
1015         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1016         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1017         p_pt_attrs->l1_base_pa = pg_tbl_pa;
1018         p_pt_attrs->l1_base_va = pg_tbl_va;
1019     }
1021     if (p_pt_attrs->l1_base_va)
1022         memset((UInt8*)p_pt_attrs->l1_base_va, 0x00, p_pt_attrs->l1_size);
1023     p_pt_attrs->l2_num_pages = ls_num_of_pages;
1024     p_pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * p_pt_attrs->l2_num_pages;
1025     align_size = 4; /* Make it UInt32 aligned  */
1026     /* we like to get aligned on L1 table size */
1027     pg_tbl_va = p_pt_attrs->l1_base_va + 0x80000;
1028     pg_tbl_pa = p_pt_attrs->l1_base_pa + 0x80000;
1029     p_pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
1030     p_pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
1031     p_pt_attrs->ls_tbl_alloc_sz = p_pt_attrs->l2_size;
1032     p_pt_attrs->l2_base_pa = pg_tbl_pa;
1033     p_pt_attrs->l2_base_va = pg_tbl_va;
1034     if (p_pt_attrs->l2_base_va)
1035         memset((UInt8*)p_pt_attrs->l2_base_va, 0x00, p_pt_attrs->l2_size);
1037     p_pt_attrs->pg_info = Memory_alloc(NULL, sizeof(struct page_info), 0, NULL);
1038     if (p_pt_attrs->pg_info)
1039         Memory_set (p_pt_attrs->pg_info, 0, sizeof(struct page_info));
1040     else {
1041         status = -ENOMEM;
1042         GT_setFailureReason (curTrace,
1043                              GT_4CLASS,
1044                              "init_mmu_page_attribs",
1045                              status,
1046                              "Memory_alloc failed");
1047         goto error_exit;
1048     }
1049     return p_pt_attrs;
1051 error_exit:
1052     if (p_pt_attrs) {
1053         if (p_pt_attrs->pg_info)
1054             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info));
1055         if (p_pt_attrs->l1_tbl_alloc_va) {
1056             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1057                     p_pt_attrs->l1_tbl_alloc_sz);
1058         }
1059         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1060         p_pt_attrs = NULL;
1061     }
1063     return NULL;
1067 /*========================================
1068  * This destroys the Dsp processor MMU Page tables
1069  *
1070  */
1071 static Void deinit_mmu_page_attribs (struct pg_table_attrs * p_pt_attrs)
1073     if (p_pt_attrs) {
1074         if (p_pt_attrs->pg_info)
1075             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info));
1076         if (p_pt_attrs->l1_tbl_alloc_va) {
1077             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1078                     p_pt_attrs->l1_tbl_alloc_sz);
1079         }
1080         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1081         p_pt_attrs = NULL;
1082     }
1086 /*============================================
1087  * This function calculates PTE address (MPU virtual) to be updated
1088  *  It also manages the L2 page tables
1089  */
1090 static Int pte_set (UInt32 pa, UInt32 va, UInt32 size,
1091                     struct hw_mmu_map_attrs_t *attrs, struct pg_table_attrs *pt_Table)
1093     UInt32 i;
1094     UInt32 pte_val;
1095     UInt32 pte_addr_l1;
1096     UInt32 pte_size;
1097     UInt32 pg_tbl_va; /* Base address of the PT that will be updated */
1098     UInt32 l1_base_va;
1099      /* Compiler warns that the next three variables might be used
1100      * uninitialized in this function. Doesn't seem so. Working around,
1101      * anyways.  */
1102     UInt32 l2_base_va = 0;
1103     UInt32 l2_base_pa = 0;
1104     UInt32 l2_page_num = 0;
1105     struct pg_table_attrs *pt = pt_Table;
1106     struct iotlb_entry    *mapAttrs;
1107     int status = 0;
1108     VAYUDSP_HalMmuEntryInfo setPteInfo;
1109     mapAttrs = Memory_alloc(0, sizeof(struct iotlb_entry), 0, NULL);
1111     l1_base_va = pt->l1_base_va;
1112     pg_tbl_va = l1_base_va;
1113     if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
1114         /* Find whether the L1 PTE points to a valid L2 PT */
1115         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1116         if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1117             pte_val = *(UInt32 *)pte_addr_l1;
1118             pte_size = hw_mmu_pte_sizel1(pte_val);
1119         } else {
1120             return -EINVAL;
1121         }
1122         /* FIX ME */
1123         /* TODO: ADD synchronication element*/
1124         /*        sync_enter_cs(pt->hcs_object);*/
1125         if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1126             /* Get the L2 PA from the L1 PTE, and find
1127              * corresponding L2 VA */
1128             l2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1129             l2_base_va = l2_base_pa - pt->l2_base_pa +
1130             pt->l2_base_va;
1131             l2_page_num = (l2_base_pa - pt->l2_base_pa) /
1132                     HW_MMU_COARSE_PAGE_SIZE;
1133         } else if (pte_size == 0) {
1134             /* L1 PTE is invalid. Allocate a L2 PT and
1135              * point the L1 PTE to it */
1136             /* Find a free L2 PT. */
1137             for (i = 0; (i < pt->l2_num_pages) &&
1138                 (pt->pg_info[i].num_entries != 0); i++)
1139                 ;;
1140             if (i < pt->l2_num_pages) {
1141                 l2_page_num = i;
1142                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1143                        HW_MMU_COARSE_PAGE_SIZE);
1144                 l2_base_va = pt->l2_base_va + (l2_page_num *
1145                        HW_MMU_COARSE_PAGE_SIZE);
1146                 /* Endianness attributes are ignored for
1147                  * HW_MMU_COARSE_PAGE_SIZE */
1148                 mapAttrs->endian = attrs->endianism;
1149                 mapAttrs->mixed = attrs->mixedSize;
1150                 mapAttrs->elsz= attrs->element_size;
1151                 mapAttrs->da = va;
1152                 mapAttrs->pa = pa;
1153                 status = hw_mmu_pte_set(pg_tbl_va, l2_base_pa, va,
1154                                         HW_MMU_COARSE_PAGE_SIZE, attrs);
1155             } else {
1156                 status = -ENOMEM;
1157             }
1158         } else {
1159             /* Found valid L1 PTE of another size.
1160              * Should not overwrite it. */
1161             status = -EINVAL;
1162         }
1163         if (status == 0) {
1164             pg_tbl_va = l2_base_va;
1165             if (size == HW_PAGE_SIZE_64KB)
1166                 pt->pg_info[l2_page_num].num_entries += 16;
1167             else
1168                 pt->pg_info[l2_page_num].num_entries++;
1169         }
1170     }
1171     if (status == 0) {
1172         mapAttrs->endian = attrs->endianism;
1173         mapAttrs->mixed = attrs->mixedSize;
1174         mapAttrs->elsz= attrs->element_size;
1175         mapAttrs->da = va;
1176         mapAttrs->pa = pa;
1177         mapAttrs->pgsz = MMU_CAM_PGSZ_16M;
1178         setPteInfo.elementSize = attrs->element_size;
1179         setPteInfo.endianism = attrs->endianism;
1180         setPteInfo.masterPhyAddr = pa;
1181         setPteInfo.mixedSize = attrs->mixedSize;
1182         setPteInfo.size = size;
1183         setPteInfo.slaveVirtAddr = va;
1185         status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1186         if (status == RET_OK)
1187             status = 0;
1188     }
1189     Memory_free(0, mapAttrs, sizeof(struct iotlb_entry));
1190     return status;
1194 /*=============================================
1195  * This function calculates the optimum page-aligned addresses and sizes
1196  * Caller must pass page-aligned values
1197  */
1198 static Int pte_update (UInt32 pa, UInt32 va, UInt32 size,
1199                        struct hw_mmu_map_attrs_t *map_attrs, struct pg_table_attrs *pt_Table)
1201     UInt32 i;
1202     UInt32 all_bits;
1203     UInt32 pa_curr = pa;
1204     UInt32 va_curr = va;
1205     UInt32 num_bytes = size;
1206     Int status = 0;
1207     UInt32 pg_size[] = {HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB,
1208                HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB};
1209     while (num_bytes && (status == 0)) {
1210         /* To find the max. page size with which both PA & VA are
1211          * aligned */
1212         all_bits = pa_curr | va_curr;
1213         for (i = 0; i < 4; i++) {
1214             if ((num_bytes >= pg_size[i]) && ((all_bits &
1215                (pg_size[i] - 1)) == 0)) {
1216                 status = pte_set(pa_curr,
1217                     va_curr, pg_size[i], map_attrs, pt_Table);
1218                 pa_curr += pg_size[i];
1219                 va_curr += pg_size[i];
1220                 num_bytes -= pg_size[i];
1221                  /* Don't try smaller sizes. Hopefully we have
1222                  * reached an address aligned to a bigger page
1223                  * size */
1224                 break;
1225             }
1226         }
1227     }
1228     return status;
1232 /*============================================
1233  * This function maps MPU buffer to the DSP address space. It performs
1234 * linear to physical address translation if required. It translates each
1235 * page since linear addresses can be physically non-contiguous
1236 * All address & size arguments are assumed to be page aligned (in proc.c)
1237  *
1238  */
1239 static Int rproc_mem_map (VAYUDSP_HalObject * halObject,
1240                           UInt32 mpu_addr, UInt32 ul_virt_addr,
1241                           UInt32 num_bytes, UInt32 map_attr)
1243     UInt32 attrs;
1244     Int status = 0;
1245     struct hw_mmu_map_attrs_t hw_attrs;
1246     Int pg_i = 0;
1248     if (halObject == NULL) {
1249         status = -ENOMEM;
1250         GT_setFailureReason (curTrace,
1251                              GT_4CLASS,
1252                              "benelli_mem_map",
1253                              status,
1254                              "halObject is NULL");
1255     }
1256     else if (halObject->mmuBase == 0) {
1257         status = -ENOMEM;
1258         GT_setFailureReason (curTrace,
1259                              GT_4CLASS,
1260                              "benelli_mem_map",
1261                              status,
1262                              "halObject->mmuBase is 0");
1263     }
1264     else if (num_bytes == 0) {
1265         status = -EINVAL;
1266         GT_setFailureReason (curTrace,
1267                              GT_4CLASS,
1268                              "benelli_mem_map",
1269                              status,
1270                              "num_bytes is 0");
1271     }
1272     else {
1273         if (map_attr != 0) {
1274             attrs = map_attr;
1275             attrs |= DSP_MAPELEMSIZE32;
1276         } else {
1277             /* Assign default attributes */
1278             attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE32;
1279         }
1280         /* Take mapping properties */
1281         if (attrs & DSP_MAPBIGENDIAN)
1282             hw_attrs.endianism = HW_BIG_ENDIAN;
1283         else
1284             hw_attrs.endianism = HW_LITTLE_ENDIAN;
1286         hw_attrs.mixedSize = (enum hw_mmu_mixed_size_t)
1287                      ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1288         /* Ignore element_size if mixedSize is enabled */
1289         if (hw_attrs.mixedSize == 0) {
1290             if (attrs & DSP_MAPELEMSIZE8) {
1291                 /* Size is 8 bit */
1292                 hw_attrs.element_size = HW_ELEM_SIZE_8BIT;
1293             } else if (attrs & DSP_MAPELEMSIZE16) {
1294                 /* Size is 16 bit */
1295                 hw_attrs.element_size = HW_ELEM_SIZE_16BIT;
1296             } else if (attrs & DSP_MAPELEMSIZE32) {
1297                 /* Size is 32 bit */
1298                 hw_attrs.element_size = HW_ELEM_SIZE_32BIT;
1299             } else if (attrs & DSP_MAPELEMSIZE64) {
1300                 /* Size is 64 bit */
1301                 hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
1302             } else {
1303                 /* Mixedsize isn't enabled, so size can't be
1304                  * zero here */
1305                 status = -EINVAL;
1306                 GT_setFailureReason (curTrace,
1307                                      GT_4CLASS,
1308                                      "benelli_mem_map",
1309                                      status,
1310                                      "MMU element size is zero");
1311             }
1312         }
1314         if (status >= 0) {
1315             /*
1316              * Do OS-specific user-va to pa translation.
1317              * Combine physically contiguous regions to reduce TLBs.
1318              * Pass the translated pa to PteUpdate.
1319              */
1320             if ((attrs & DSP_MAPPHYSICALADDR)) {
1321                 status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
1322                            &hw_attrs,
1323                            (struct pg_table_attrs *)halObject->mmuObj.pPtAttrs);
1324             }
1326             /* Don't propogate Linux or HW status to upper layers */
1327             if (status < 0) {
1328                 /*
1329                  * Roll out the mapped pages incase it failed in middle of
1330                  * mapping
1331                  */
1332                 if (pg_i)
1333                     rproc_mem_unmap(halObject, ul_virt_addr,
1334                                     (pg_i * PAGE_SIZE));
1335             }
1337             /* In any case, flush the TLB
1338              * This is called from here instead from pte_update to avoid
1339              * unnecessary repetition while mapping non-contiguous physical
1340              * regions of a virtual region */
1341             hw_mmu_tlb_flushAll(halObject->mmuBase);
1342         }
1343     }
1344     return status;
1349 /*
1350  *  ======== benelli_mem_unmap ========
1351  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1352  *
1353  *      PTEs of a mapped memory block are contiguous in any page table
1354  *      So, instead of looking up the PTE address for every 4K block,
1355  *      we clear consecutive PTEs until we unmap all the bytes
1356  */
1357 static Int rproc_mem_unmap (VAYUDSP_HalObject * halObject,
1358                             UInt32 da, UInt32 num_bytes)
1360     UInt32 L1_base_va;
1361     UInt32 L2_base_va;
1362     UInt32 L2_base_pa;
1363     UInt32 L2_page_num;
1364     UInt32 pte_val;
1365     UInt32 pte_size;
1366     UInt32 pte_count;
1367     UInt32 pte_addr_l1;
1368     UInt32 pte_addr_l2 = 0;
1369     UInt32 rem_bytes;
1370     UInt32 rem_bytes_l2;
1371     UInt32 vaCurr;
1372     Int status = 0;
1373     UInt32 temp;
1374     UInt32 pAddr;
1375     UInt32 numof4Kpages = 0;
1376     struct pg_table_attrs * p_pt_attrs = NULL;
1378     if (halObject == NULL) {
1379         status = -ENOMEM;
1380         GT_setFailureReason (curTrace,
1381                              GT_4CLASS,
1382                              "rproc_mem_unmap",
1383                              status,
1384                              "halObject is NULL");
1385     }
1386     else if (halObject->mmuBase == 0) {
1387         status = -ENOMEM;
1388         GT_setFailureReason (curTrace,
1389                              GT_4CLASS,
1390                              "rproc_mem_unmap",
1391                              status,
1392                              "halObject->mmuBase is 0");
1393     }
1394     else if (halObject->mmuObj.pPtAttrs == NULL) {
1395         status = -ENOMEM;
1396         GT_setFailureReason (curTrace,
1397                              GT_4CLASS,
1398                              "rproc_mem_unmap",
1399                              status,
1400                              "halObject->mmuObj.pPtAttrs is 0");
1401     }
1402     else {
1403         p_pt_attrs = (struct pg_table_attrs *)halObject->mmuObj.pPtAttrs;
1404         vaCurr = da;
1405         rem_bytes = num_bytes;
1406         rem_bytes_l2 = 0;
1407         L1_base_va = p_pt_attrs->l1_base_va;
1408         pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1409         while (rem_bytes) {
1410             UInt32 vaCurrOrig = vaCurr;
1411             /* Find whether the L1 PTE points to a valid L2 PT */
1412             pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1413             pte_val = *(UInt32 *)pte_addr_l1;
1414             pte_size = hw_mmu_pte_sizel1(pte_val);
1415             if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1416                 /*
1417                  * Get the L2 PA from the L1 PTE, and find
1418                  * corresponding L2 VA
1419                  */
1420                 L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1421                 L2_base_va = L2_base_pa - p_pt_attrs->l2_base_pa
1422                             + p_pt_attrs->l2_base_va;
1423                 L2_page_num = (L2_base_pa - p_pt_attrs->l2_base_pa) /
1424                         HW_MMU_COARSE_PAGE_SIZE;
1425                 /*
1426                  * Find the L2 PTE address from which we will start
1427                  * clearing, the number of PTEs to be cleared on this
1428                  * page, and the size of VA space that needs to be
1429                  * cleared on this L2 page
1430                  */
1431                 pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
1432                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1433                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) /
1434                         sizeof(UInt32);
1435                 if (rem_bytes < (pte_count * PAGE_SIZE))
1436                     pte_count = rem_bytes / PAGE_SIZE;
1438                 rem_bytes_l2 = pte_count * PAGE_SIZE;
1439                 /*
1440                  * Unmap the VA space on this L2 PT. A quicker way
1441                  * would be to clear pte_count entries starting from
1442                  * pte_addr_l2. However, below code checks that we don't
1443                  * clear invalid entries or less than 64KB for a 64KB
1444                  * entry. Similar checking is done for L1 PTEs too
1445                  * below
1446                  */
1447                 while (rem_bytes_l2) {
1448                     pte_val = *(UInt32 *)pte_addr_l2;
1449                     pte_size = hw_mmu_pte_sizel2(pte_val);
1450                     /* vaCurr aligned to pte_size? */
1451                     if ((pte_size != 0) && (rem_bytes_l2
1452                         >= pte_size) &&
1453                         !(vaCurr & (pte_size - 1))) {
1454                         /* Collect Physical addresses from VA */
1455                         pAddr = (pte_val & ~(pte_size - 1));
1456                         if (pte_size == HW_PAGE_SIZE_64KB)
1457                             numof4Kpages = 16;
1458                         else
1459                             numof4Kpages = 1;
1460                         temp = 0;
1462                         if (hw_mmu_pte_clear(pte_addr_l2,
1463                             vaCurr, pte_size) == RET_OK) {
1464                             rem_bytes_l2 -= pte_size;
1465                             vaCurr += pte_size;
1466                             pte_addr_l2 += (pte_size >> 12)
1467                                 * sizeof(UInt32);
1468                         } else {
1469                             status = -EFAULT;
1470                             goto EXIT_LOOP;
1471                         }
1472                     } else
1473                         status = -EFAULT;
1474                 }
1475                 if (rem_bytes_l2 != 0) {
1476                     status = -EFAULT;
1477                     goto EXIT_LOOP;
1478                 }
1479                 p_pt_attrs->pg_info[L2_page_num].num_entries -=
1480                             pte_count;
1481                 if (p_pt_attrs->pg_info[L2_page_num].num_entries
1482                                     == 0) {
1483                     /*
1484                      * Clear the L1 PTE pointing to the
1485                      * L2 PT
1486                      */
1487                     if (RET_OK != hw_mmu_pte_clear(L1_base_va,
1488                         vaCurrOrig, HW_MMU_COARSE_PAGE_SIZE)) {
1489                         status = -EFAULT;
1490                         goto EXIT_LOOP;
1491                     }
1492                 }
1493                 rem_bytes -= pte_count * PAGE_SIZE;
1494             } else
1495                 /* vaCurr aligned to pte_size? */
1496                 /* pte_size = 1 MB or 16 MB */
1497                 if ((pte_size != 0) && (rem_bytes >= pte_size) &&
1498                    !(vaCurr & (pte_size - 1))) {
1499                     if (pte_size == HW_PAGE_SIZE_1MB)
1500                         numof4Kpages = 256;
1501                     else
1502                         numof4Kpages = 4096;
1503                     temp = 0;
1504                     /* Collect Physical addresses from VA */
1505                     pAddr = (pte_val & ~(pte_size - 1));
1506                     if (hw_mmu_pte_clear(L1_base_va, vaCurr,
1507                             pte_size) == RET_OK) {
1508                         rem_bytes -= pte_size;
1509                         vaCurr += pte_size;
1510                     } else {
1511                         status = -EFAULT;
1512                         goto EXIT_LOOP;
1513                     }
1514             } else {
1515                 status = -EFAULT;
1516             }
1517         }
1518     }
1519     /*
1520      * It is better to flush the TLB here, so that any stale old entries
1521      * get flushed
1522      */
1523 EXIT_LOOP:
1524     hw_mmu_tlb_flushAll(halObject->mmuBase);
1525     return status;
1529 /*========================================
1530  * This sets up the Dsp processor
1531  *
1532  */
1533 Int rproc_dsp_setup (VAYUDSP_HalObject * halObject,
1534                      ProcMgr_AddrInfo * memEntries,
1535                      UInt32 numMemEntries)
1537     Int ret_val = 0;
1538     struct pg_table_attrs * p_pt_attrs = NULL;
1540     p_pt_attrs = init_mmu_page_attribs(0x10000, 14, 128);
1541     if (!p_pt_attrs) {
1542         GT_setFailureReason (curTrace,
1543                              GT_4CLASS,
1544                              "rproc_setup",
1545                              ret_val,
1546                              "init_mmu_page_attribs failed");
1547     }
1548     else {
1549         halObject->mmuObj.pPtAttrs = p_pt_attrs;
1550         /* Disable TWL  */
1551         ret_val = rproc_set_twl(halObject, FALSE);
1552         if (ret_val < 0) {
1553             GT_setFailureReason (curTrace,
1554                                  GT_4CLASS,
1555                                  "ipu_setup",
1556                                  ret_val,
1557                                  "benelli_set_twl to FALSE failed");
1558         }
1559         else {
1560             ret_val = rproc_mmu_init (halObject, memEntries,
1561                                       numMemEntries);
1562             if (ret_val < 0) {
1563                 GT_setFailureReason (curTrace,
1564                                      GT_4CLASS,
1565                                      "ipu_setup",
1566                                      ret_val,
1567                                      "benelli_mmu_init failed");
1568             }
1569             else {
1570                 ret_val = rproc_set_twl(halObject, TRUE);
1571                 if (ret_val < 0) {
1572                     GT_setFailureReason (curTrace,
1573                                          GT_4CLASS,
1574                                          "ipu_setup",
1575                                          ret_val,
1576                                          "ducati_set_twl to TRUE failed");
1577                 }
1578             }
1579         }
1580     }
1582     if (ret_val < 0) {
1583         deinit_mmu_page_attribs(p_pt_attrs);
1584         halObject->mmuObj.pPtAttrs = NULL;
1585     }
1587     return ret_val;
1592 Void rproc_dsp_destroy(VAYUDSP_HalObject * halObject)
1594     shm_phys_addr_dsp = 0;
1596     if (halObject->mmuObj.pPtAttrs) {
1597         deinit_mmu_page_attribs(halObject->mmuObj.pPtAttrs);
1598         halObject->mmuObj.pPtAttrs = NULL;
1599     }
1603 static Void iotlb_load_cr (VAYUDSP_HalObject * halObject,
1604                            struct cr_regs *cr)
1606     ULONG reg;
1607     VAYUDsp_MMURegs * mmuRegs =
1608                                   (VAYUDsp_MMURegs *)halObject->mmuBase;
1610     reg = cr->cam | MMU_CAM_V;
1611     OUTREG32(&mmuRegs->CAM, reg);
1613     reg = cr->ram;
1614     OUTREG32(&mmuRegs->RAM, reg);
1616     reg = 1;
1617     OUTREG32(&mmuRegs->FLUSH_ENTRY, reg);
1619     reg = 1;
1620     OUTREG32(&mmuRegs->LD_TLB, reg);
1624 /**
1625  * iotlb_dump_cr - Dump an iommu tlb entry into buf
1626  * @obj:    target iommu
1627  * @cr:        contents of cam and ram register
1628  * @buf:    output buffer
1629  **/
1630 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf)
1632     Char *p = buf;
1634     if(!cr || !buf)
1635         return 0;
1637     /* FIXME: Need more detail analysis of cam/ram */
1638     p += sprintf(p, "%08x %08x %01x\n", (unsigned int)cr->cam,
1639                     (unsigned int)cr->ram,
1640                     (cr->cam & MMU_CAM_P) ? 1 : 0);
1641     return (p - buf);
1646 static Int iotlb_cr_valid (struct cr_regs *cr)
1648     if (!cr)
1649         return -EINVAL;
1651     return (cr->cam & MMU_CAM_V);
1656 static struct cr_regs *omap4_alloc_cr (struct iotlb_entry *e)
1658     struct cr_regs *cr;
1660     if (e->da & ~(get_cam_va_mask(e->pgsz))) {
1661         GT_setFailureReason (curTrace,
1662                              GT_4CLASS,
1663                              "omap4_alloc_cr",
1664                              -EINVAL,
1665                              "failed mask check");
1666         return NULL;
1667     }
1669     cr = mmap(NULL,
1670               sizeof(struct cr_regs),
1671               PROT_NOCACHE | PROT_READ | PROT_WRITE,
1672               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1673               NOFD,
1674               0);
1676     if (MAP_FAILED == cr)
1677     {
1678         GT_setFailureReason (curTrace,
1679                              GT_4CLASS,
1680                              "omap4_alloc_cr",
1681                              -EINVAL,
1682                              "mmap failed");
1683         return NULL;
1684     }
1686     cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
1687     cr->ram = e->pa | e->endian | e->elsz | e->mixed;
1688     return cr;
1693 static struct cr_regs *iotlb_alloc_cr (struct iotlb_entry *e)
1695     if (!e) {
1696         GT_setFailureReason (curTrace,
1697                              GT_4CLASS,
1698                              "iotlb_alloc_cr",
1699                              -EINVAL,
1700                              "e is NULL");
1701         return NULL;
1702     }
1704     return omap4_alloc_cr(e);
1709 /**
1710  * load_iotlb_entry - Set an iommu tlb entry
1711  * @obj:    target iommu
1712  * @e:        an iommu tlb entry info
1713  **/
1714 static Int load_iotlb_entry (VAYUDSP_HalObject * halObject,
1715                              struct iotlb_entry *e)
1717     Int err = 0;
1718     struct iotlb_lock l;
1719     struct cr_regs *cr;
1721     if (halObject == NULL) {
1722         err = -EINVAL;
1723         GT_setFailureReason (curTrace,
1724                              GT_4CLASS,
1725                              "load_iotlb_entry",
1726                              err,
1727                              "halObject is NULL");
1728         goto out;
1729     }
1731     if (halObject->mmuBase == NULL) {
1732         err = -EINVAL;
1733         GT_setFailureReason (curTrace,
1734                              GT_4CLASS,
1735                              "load_iotlb_entry",
1736                              err,
1737                              "halObject->mmuBase is NULL");
1738         goto out;
1739     }
1741     if (!e) {
1742         err = -EINVAL;
1743         GT_setFailureReason (curTrace,
1744                              GT_4CLASS,
1745                              "load_iotlb_entry",
1746                              err,
1747                              "e is NULL");
1748         goto out;
1749     }
1751     iotlb_getLock(halObject, &l);
1753     if (l.base == 32) {
1754         err = -EBUSY;
1755         GT_setFailureReason (curTrace,
1756                              GT_4CLASS,
1757                              "load_iotlb_entry",
1758                              err,
1759                              "l.base is full");
1760         goto out;
1761     }
1762     if (!e->prsvd) {
1763         int i;
1764         struct cr_regs tmp;
1766         for_each_iotlb_cr(32, i, tmp)
1767             if (!iotlb_cr_valid(&tmp))
1768                 break;
1770         if (i == 32) {
1771             err = -EBUSY;
1772             GT_setFailureReason (curTrace,
1773                                  GT_4CLASS,
1774                                  "load_iotlb_entry",
1775                                  err,
1776                                  "i == 32");
1777             goto out;
1778         }
1780         iotlb_getLock(halObject, &l);
1781     } else {
1782         l.vict = l.base;
1783         iotlb_setLock(halObject, &l);
1784     }
1786     cr = iotlb_alloc_cr(e);
1787     if (!cr){
1788         err = -ENOMEM;
1789         GT_setFailureReason (curTrace,
1790                              GT_4CLASS,
1791                              "load_iotlb_entry",
1792                              err,
1793                              "iotlb_alloc_cr failed");
1794         goto out;
1795     }
1797     iotlb_load_cr(halObject, cr);
1798     munmap(cr, sizeof(struct cr_regs));
1800     if (e->prsvd)
1801         l.base++;
1802     /* increment victim for next tlb load */
1803     if (++l.vict == 32)
1804         l.vict = l.base;
1805     iotlb_setLock(halObject, &l);
1807 out:
1808     return err;